file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.js | // Dependencies
import Map from 'ol/map';
import View from 'ol/view';
import TileLayer from 'ol/layer/tile';
import OSM from 'ol/source/osm'
import Draw from 'ol/interaction/draw'
import VectorSource from 'ol/source/vector'
import VectorLayer from 'ol/layer/vector'
import ImageLayer from 'ol/layer/image'
import Proj from 'ol/proj' // fromLonLat
import Projection from 'ol/proj/projection'
import Select from 'ol/interaction/select'
import DragBox from 'ol/interaction/dragbox'
import Condition from 'ol/events/condition'
import Static from 'ol/source/imagestatic.js';
import Interaction from 'ol/interaction'
import Meyda from "meyda"
import ImageArcGISRest from 'ol/source/ImageArcGISRest';
import TileWMS from 'ol/source/TileWMS';
import TileArcGISRest from 'ol/source/tilearcgisrest'
// Local Imports
import Remote from './connectables/Remote.js'
import Speaker from './connectables/Speaker.js'
import Computation from './connectables/Computation.js'
import Connection from './connectables/Connection.js'
import SCClientWS from './web-socket/SCClientWS.js'
// NOTE - if you're getting an error like 'cosMap' undefined
// you need to change the src of one of meyda's depends:
// node_modules/dct/src/dct.js line:10, add 'var' before cosMap;
SCClientWS.initSCClientWS();
var audienceSource = new VectorSource({wrapX: false});
var audienceLayer = new VectorLayer ({source:audienceSource});
var osm = new TileLayer({source: new OSM()})
var geo = new TileLayer({
source: new TileWMS({
url: 'https://ahocevar.com/geoserver/wms',
params: {
'LAYERS': 'ne:NE1_HR_LC_SR_W_DR',
'TILED': true
}
})
})
var highways = new ImageLayer({
source: new ImageArcGISRest({
ratio: 1,
params: {},
url: 'https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Specialty/ESRI_StateCityHighway_USA/MapServer'
})
})
var none = new ImageLayer({
source: new Static({
attributions: '© <a href="http://xkcd.com/license.html">xkcd</a>',
url: location.hostname+":"+location.port+'/performance-client/build/hyper-cloud.jpg',
projection: new Projection({
code: 'xkcd-image',
units: 'pixels',
extent: [0, 0, 2268, 4032]
}),
imageExtent: [0, 0, 2268, 4032]
})
})
var population = new TileLayer({
source: new TileArcGISRest({
url: 'https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Demographics/ESRI_Population_World/MapServer'
})
})
var layers = {
none: none,
geo: geo,
osm: osm,
population: population,
highways: highways,
};
var map = new Map({
target: 'map',
layers: [none, audienceLayer],
view: new View({
center: Proj.fromLonLat([0,0]),
zoom: 2,
minResolution: 40075016.68557849 / 256 / Math.pow(2,7),
maxResolution: 40075016.68557849 / 256 / 4
})
});
var speakerCoordinateRatios = [[1/3,1],[2/3,1],[1,2/3],[1,1/3],[2/3,0],[1/3,0],[0,1/3],[0,2/3]];
for (var i in speakerCoordinateRatios){
new Speaker([0,0],audienceSource)
}
positionSpeakers()
Connection.connections.on(['add','remove'],function(){
var dag = Connection.getConnectionsDAG(); // [{from:..., to:...}] where from and to are from 'getGraphData'
var msg = {
type: "updateConnections",
value: dag
};
SCClientWS.send(msg);
})
// a normal select interaction to handle click
var select = new Select({
wrapX:false,
condition:function (e){
return (Condition.shiftKeyOnly(e) && Condition.singleClick(e))
}
});
// var selectedFeatures = select.getFeatures();
var dragBox = new DragBox({condition: Condition.platformModifierKeyOnly});
dragBox.on('boxend', function() {
// features that intersect the box are added to the collection
// selected features
var extent = dragBox.getGeometry().getExtent();
audienceSource.forEachFeatureIntersectingExtent(extent, function(feature) {
// selectedFeatures.push(feature);
select.getFeatures().push(feature);
});
});
// clear selection when drawing a new box and when clicking on the map
dragBox.on('boxstart', function() {
select.getFeatures().clear();
if (drawStart){
connectionDraw.finishDrawing();
};
// selectedFeatures.clear();
});
// MASTER controls
var master = document.getElementById('master');
var layerSelect = document.getElementById('layer-select')
for (var i in layers){
var option = document.createElement("option");
option.value = i;
option.innerHTML = i;
if(i == 'none'){option.selected = true}
layerSelect.appendChild(option);
}
layerSelect.onchange = function(){
var l = layers[layerSelect.value]
if (!l){console.log("Error: no layer named: "+layerSelect.value); return} else {
map.getLayers().clear();
map.addLayer(audienceLayer)
map.addLayer(l)
l.setZIndex(0);
audienceLayer.setZIndex(1)
}
}
var masterCorpus = ""
var corpusSelect = document.getElementById('corpus-select');
corpusSelect.onchange = function (){
masterCorpus = corpusSelect.value;
SCClientWS.send({type:"corpus",value:corpusSelect.value});
}
var cmdBox = document.getElementById('cmdBox');
select.getFeatures().on(['add', 'remove'], function() {
var innerHTML = select.getFeatures().getArray().filter(function(x){
return ["remote","computation"].includes(x.type)}).map(function(feature){
var r;
r = feature.getInfoHTML();
return r?r:document.createElement("div");
}
);
if (innerHTML.length>0){
cmdBox.hidden = false;
cmdBox.innerHTML = "";
for(var i in innerHTML){
cmdBox.appendChild(innerHTML[i])
}
} else {
cmdBox.hidden = true;
cmdBox.innerHTML = ""
}
});
map.addInteraction(dragBox);
map.addInteraction(select);
// Connection Interaction
function onConnectable(coordinate){
var features = audienceSource.getFeatures().map(function(f){return f.type})
var a = audienceSource.getFeaturesAtCoordinate(coordinate)
var isOnConnectable = a.length>0;
return isOnConnectable;
}
var connectionDraw = new Draw({
type:"LineString",
condition: function(browserEvent){
var shift = Condition.shiftKeyOnly(browserEvent);
var ctrl = Condition.platformModifierKeyOnly(browserEvent);
return !ctrl && !shift && onConnectable(browserEvent.coordinate)},
wrapX: false,
freehandCondition: function(x){return false},
freehand:false,
maxPoints:2
});
var from;
var drawStart = false;
connectionDraw.on('drawstart', function(ev){
drawStart = true;
var coord = ev.target.sketchCoords_[1];
var atCoord = audienceSource.getFeaturesAtCoordinate(coord);
if(atCoord){
from = atCoord[0];
} else {
console.log("this condition should not have been activated, find this print message plz...")
// if nothing was found where the click happened, drawstart shouldn't have occurred
// (see connectionDraw's 'condition' function)
from = undefined;
connectionDraw.finishDrawing();
}
// TODO - multiple selection and connection?
// currentSelected = selectedFeatures.getArray();
// if(currentSelected.length<1){
// connectionDraw.finishDrawing();
// }
})
connectionDraw.on('drawend',function(ev){
drawStart = false;
var lineFeature = ev.feature;
var finalCoord = ev.target.sketchCoords_[1];
var to = audienceSource.getFeaturesAtCoordinate(finalCoord);
if(to){
to = to[0];
} else {
return;
}
if(from){
var success = from.connect(to);
if(!success){
console.log("...")
}
} else {
console.log("this condition shouldn't have been reached ...")
}
from = undefined;
})
map.addInteraction(connectionDraw);
// TODO - find smoother way of doing this
map.getView().on('change:resolution', resizeObjects);
map.getView().on('change',positionSpeakers);
function resizeObjects (){
resizeRemotes();
resizeComputations();
}
function resizeComputations(){
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Computation.computations){
Computation.computations[i].setRadius(radius);
}
}
function resizeRemotes(){
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Remote.remotes){
//TODO some error here, seems like remotes gets out of sync somehow...
Remote.remotes[i].getGeometry().setRadius(radius);
}
}
function p | ){
var extent = map.getView().calculateExtent();
var resolution = map.getView().getResolution();
var radius = 40*resolution;
for (var i in Speaker.eightChannelSpeakerCoordinateRatios){
var x = speakerCoordinateRatios[i][0];
var y = speakerCoordinateRatios[i][1];
var coord = [(extent[2]-extent[0])*x+extent[0], (extent[3]-extent[1])*y+extent[1]];
// TODO - put these two into a speaker or Connectable method.
Speaker.speakers[i].coordinate = coord;
Speaker.speakers[i].getGeometry().setCenterAndRadius(coord, radius);
for (var j in Speaker.speakers[i].connections){
Speaker.speakers[i].connections[j].redraw();
}
}
}
map.getViewport().addEventListener('contextmenu', function (evt) {
evt.preventDefault();
var coordinate = map.getEventCoordinate(evt);
var resolution = map.getView().getResolution();
var radius = 15*resolution;
var c = new Computation(coordinate, audienceSource, radius)
SCClientWS.send({type:"newConnectable",value:c.getGraphData()});
// c.onComputationChange = function (){
c.onChange = function (){
SCClientWS.send({type:"updateConnectable", value:this.getGraphData()});
}
})
// global key mappings (hopefully these don't overwrite anything...)
var closureKeyUp = document.onkeyup;
document.onkeyup = function(e) {
// JIC something in openlayers sets something to document onkeyup
if(closureKeyUp){
closureKeyUp(e)
}
// esc key
if (e.key.toLowerCase() == "escape") { // escape key maps to keycode `27`
select.getFeatures().clear();
if(drawStart){
connectionDraw.finishDrawing()
};
} else if (e.key.toLowerCase() =="delete"){
var deletes = select.getFeatures().getArray();
// var deletes = selectedFeatures.getArray();
var deletedConnections = []
for (var i in deletes){
if (deletes[i].type =="computation"){
deletedConnections = deletedConnections.concat(deletes[i].connections);
var msg = {
type: "removeConnectable",
value: {uid: deletes[i].uid,type: deletes[i].type}
}
//Tell SC that computation is deleted
SCClientWS.send(msg);
deletes[i].delete();
// select.getFeatures().remove(deletes[i]);
} else if (deletes[i].type =="connection" && !deletedConnections.includes(deletes[i])){
deletes[i].delete();
}
}
select.getFeatures().clear();
}
}
var nodeServerWS;
try{
console.log("connecting via ws to: "+location.hostname+":"+location.port);
nodeServerWS = new WebSocket("ws://"+location.hostname+":"+location.port, 'echo-protocol');
} catch (e){
console.log("no WebSocket connection "+e)
}
if (nodeServerWS){
nodeServerWS.addEventListener('message', function(message){
var msg;
try {
// For some reason a single parse is leaving it as a string...
var msg = JSON.parse(message.data);
if (typeof(msg)== "string"){
msg = JSON.parse(msg);
}
} catch (e){
console.log("WARNING: could not parse ws JSON message")
console.log(msg);
}
console.log("msg type: "+msg.type)
if (msg.type == "params"){
updateRemoteParams(msg.value)
} else if (msg.type == "newRemote"){
console.log('new remote: '+msg.uid)
var remote = new Remote(msg.uid, Proj.fromLonLat(msg.coordinates), audienceSource);
var msg = {type:"subscribe", uid:msg.uid};
try{
nodeServerWS.send(JSON.stringify(msg))
} catch (e){
console.log("!!!!!ERROR couldn't sned subscribe request")
console.log(e);
}
// Tell SC a new remote
SCClientWS.send({type:"newConnectable",value:remote.getGraphData()})
// set onChange to tell SC when this remote changes
// remote.onRemoteChange = function (){
remote.onChange = function (){
// TODO @@@@ CONFIRM: I think 'this' refers to the remote here? if not need to change this
SCClientWS.send({type:"updateConnectable",value:this.getGraphData()})
}
} else if (msg.type == "removeRemote"){
try {
console.log('remove remote')
Remote.remotes[msg.uid].delete();
// audienceSource.removeFeature(Remote.remotes[msg.uid]);
SCClientWS.send({type:"removeConnectable",value:{type:"remote",uid:msg.uid}})
// delete Remote.remotes[msg.uid]
} catch (e){
console.log("WARNING: Error deleting remote <"+msg.uid+"> :" +e)
}
} else {
console.log("WARNING: WS message with unknown type <"+msg.type+"> received.")
}
})
}
// setTimeout(function(){
// // for making figures:
// var aa =new Remote(11, Proj.fromLonLat([43,-79]), audienceSource);
// var bb = new Remote(22, Proj.fromLonLat([50,-109]), audienceSource);
// var cc = new Remote(33, Proj.fromLonLat([60,43]), audienceSource);
// var dd = new Remote(44, Proj.fromLonLat([67,94]), audienceSource);
//
// aa.onRemoteChange = function (){}
// bb.onRemoteChange = function (){}
// cc.onRemoteChange = function (){}
// dd.onRemoteChange = function (){}
// },4000)
function updateRemoteParams(msg){
// TODO - @@@***%%% DANGER CHANGE THIS BACKs
msg.loudness = msg.rms;
Remote.remotes[msg.uid].setParams(msg);
}
| ositionSpeakers( | identifier_name |
main.js | // Dependencies
import Map from 'ol/map';
import View from 'ol/view';
import TileLayer from 'ol/layer/tile';
import OSM from 'ol/source/osm'
import Draw from 'ol/interaction/draw'
import VectorSource from 'ol/source/vector'
import VectorLayer from 'ol/layer/vector'
import ImageLayer from 'ol/layer/image'
import Proj from 'ol/proj' // fromLonLat
import Projection from 'ol/proj/projection'
import Select from 'ol/interaction/select'
import DragBox from 'ol/interaction/dragbox'
import Condition from 'ol/events/condition'
import Static from 'ol/source/imagestatic.js';
import Interaction from 'ol/interaction'
import Meyda from "meyda"
import ImageArcGISRest from 'ol/source/ImageArcGISRest';
import TileWMS from 'ol/source/TileWMS';
import TileArcGISRest from 'ol/source/tilearcgisrest'
// Local Imports
import Remote from './connectables/Remote.js'
import Speaker from './connectables/Speaker.js'
import Computation from './connectables/Computation.js'
import Connection from './connectables/Connection.js'
import SCClientWS from './web-socket/SCClientWS.js'
// NOTE - if you're getting an error like 'cosMap' undefined
// you need to change the src of one of meyda's depends:
// node_modules/dct/src/dct.js line:10, add 'var' before cosMap;
SCClientWS.initSCClientWS();
var audienceSource = new VectorSource({wrapX: false});
var audienceLayer = new VectorLayer ({source:audienceSource});
var osm = new TileLayer({source: new OSM()})
var geo = new TileLayer({
source: new TileWMS({
url: 'https://ahocevar.com/geoserver/wms',
params: {
'LAYERS': 'ne:NE1_HR_LC_SR_W_DR',
'TILED': true
}
})
})
var highways = new ImageLayer({
source: new ImageArcGISRest({
ratio: 1,
params: {},
url: 'https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Specialty/ESRI_StateCityHighway_USA/MapServer'
})
})
var none = new ImageLayer({
source: new Static({
attributions: '© <a href="http://xkcd.com/license.html">xkcd</a>',
url: location.hostname+":"+location.port+'/performance-client/build/hyper-cloud.jpg',
projection: new Projection({
code: 'xkcd-image',
units: 'pixels',
extent: [0, 0, 2268, 4032]
}),
imageExtent: [0, 0, 2268, 4032]
})
})
var population = new TileLayer({
source: new TileArcGISRest({
url: 'https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Demographics/ESRI_Population_World/MapServer'
})
})
var layers = {
none: none,
geo: geo,
osm: osm,
population: population,
highways: highways,
};
var map = new Map({
target: 'map',
layers: [none, audienceLayer],
view: new View({
center: Proj.fromLonLat([0,0]),
zoom: 2,
minResolution: 40075016.68557849 / 256 / Math.pow(2,7),
maxResolution: 40075016.68557849 / 256 / 4
})
});
var speakerCoordinateRatios = [[1/3,1],[2/3,1],[1,2/3],[1,1/3],[2/3,0],[1/3,0],[0,1/3],[0,2/3]];
for (var i in speakerCoordinateRatios){
new Speaker([0,0],audienceSource)
}
positionSpeakers()
Connection.connections.on(['add','remove'],function(){
var dag = Connection.getConnectionsDAG(); // [{from:..., to:...}] where from and to are from 'getGraphData'
var msg = {
type: "updateConnections",
value: dag
};
SCClientWS.send(msg);
})
// a normal select interaction to handle click
var select = new Select({
wrapX:false,
condition:function (e){
return (Condition.shiftKeyOnly(e) && Condition.singleClick(e))
}
});
// var selectedFeatures = select.getFeatures();
var dragBox = new DragBox({condition: Condition.platformModifierKeyOnly});
dragBox.on('boxend', function() {
// features that intersect the box are added to the collection
// selected features
var extent = dragBox.getGeometry().getExtent();
audienceSource.forEachFeatureIntersectingExtent(extent, function(feature) {
// selectedFeatures.push(feature);
select.getFeatures().push(feature);
});
});
// clear selection when drawing a new box and when clicking on the map
dragBox.on('boxstart', function() {
select.getFeatures().clear();
if (drawStart){
connectionDraw.finishDrawing();
};
// selectedFeatures.clear();
});
// MASTER controls
var master = document.getElementById('master');
var layerSelect = document.getElementById('layer-select')
for (var i in layers){
var option = document.createElement("option");
option.value = i;
option.innerHTML = i;
if(i == 'none'){option.selected = true}
layerSelect.appendChild(option);
}
layerSelect.onchange = function(){
var l = layers[layerSelect.value]
if (!l){console.log("Error: no layer named: "+layerSelect.value); return} else {
map.getLayers().clear();
map.addLayer(audienceLayer)
map.addLayer(l)
l.setZIndex(0);
audienceLayer.setZIndex(1)
}
}
var masterCorpus = ""
var corpusSelect = document.getElementById('corpus-select');
corpusSelect.onchange = function (){
masterCorpus = corpusSelect.value;
SCClientWS.send({type:"corpus",value:corpusSelect.value});
}
var cmdBox = document.getElementById('cmdBox');
select.getFeatures().on(['add', 'remove'], function() {
var innerHTML = select.getFeatures().getArray().filter(function(x){
return ["remote","computation"].includes(x.type)}).map(function(feature){
var r;
r = feature.getInfoHTML();
return r?r:document.createElement("div");
}
);
if (innerHTML.length>0){
cmdBox.hidden = false;
cmdBox.innerHTML = "";
for(var i in innerHTML){
cmdBox.appendChild(innerHTML[i])
}
} else {
cmdBox.hidden = true;
cmdBox.innerHTML = ""
}
});
map.addInteraction(dragBox);
map.addInteraction(select);
// Connection Interaction
function onConnectable(coordinate){
var features = audienceSource.getFeatures().map(function(f){return f.type})
var a = audienceSource.getFeaturesAtCoordinate(coordinate)
var isOnConnectable = a.length>0;
return isOnConnectable;
}
var connectionDraw = new Draw({
type:"LineString",
condition: function(browserEvent){
var shift = Condition.shiftKeyOnly(browserEvent);
var ctrl = Condition.platformModifierKeyOnly(browserEvent);
return !ctrl && !shift && onConnectable(browserEvent.coordinate)},
wrapX: false,
freehandCondition: function(x){return false},
freehand:false,
maxPoints:2
});
var from;
var drawStart = false;
connectionDraw.on('drawstart', function(ev){
drawStart = true;
var coord = ev.target.sketchCoords_[1];
var atCoord = audienceSource.getFeaturesAtCoordinate(coord);
if(atCoord){
from = atCoord[0];
} else {
console.log("this condition should not have been activated, find this print message plz...")
// if nothing was found where the click happened, drawstart shouldn't have occurred
// (see connectionDraw's 'condition' function)
from = undefined;
connectionDraw.finishDrawing();
}
// TODO - multiple selection and connection?
// currentSelected = selectedFeatures.getArray();
// if(currentSelected.length<1){
// connectionDraw.finishDrawing();
// }
})
connectionDraw.on('drawend',function(ev){
drawStart = false;
var lineFeature = ev.feature;
var finalCoord = ev.target.sketchCoords_[1];
var to = audienceSource.getFeaturesAtCoordinate(finalCoord);
if(to){
to = to[0];
} else {
return;
}
if(from){
var success = from.connect(to);
if(!success){
console.log("...")
}
} else {
console.log("this condition shouldn't have been reached ...")
}
from = undefined;
})
map.addInteraction(connectionDraw);
// TODO - find smoother way of doing this
map.getView().on('change:resolution', resizeObjects);
map.getView().on('change',positionSpeakers);
function resizeObjects (){
resizeRemotes();
resizeComputations();
}
function resizeComputations(){
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Computation.computations){
Computation.computations[i].setRadius(radius);
}
}
function resizeRemotes(){ |
function positionSpeakers(){
var extent = map.getView().calculateExtent();
var resolution = map.getView().getResolution();
var radius = 40*resolution;
for (var i in Speaker.eightChannelSpeakerCoordinateRatios){
var x = speakerCoordinateRatios[i][0];
var y = speakerCoordinateRatios[i][1];
var coord = [(extent[2]-extent[0])*x+extent[0], (extent[3]-extent[1])*y+extent[1]];
// TODO - put these two into a speaker or Connectable method.
Speaker.speakers[i].coordinate = coord;
Speaker.speakers[i].getGeometry().setCenterAndRadius(coord, radius);
for (var j in Speaker.speakers[i].connections){
Speaker.speakers[i].connections[j].redraw();
}
}
}
map.getViewport().addEventListener('contextmenu', function (evt) {
evt.preventDefault();
var coordinate = map.getEventCoordinate(evt);
var resolution = map.getView().getResolution();
var radius = 15*resolution;
var c = new Computation(coordinate, audienceSource, radius)
SCClientWS.send({type:"newConnectable",value:c.getGraphData()});
// c.onComputationChange = function (){
c.onChange = function (){
SCClientWS.send({type:"updateConnectable", value:this.getGraphData()});
}
})
// global key mappings (hopefully these don't overwrite anything...)
var closureKeyUp = document.onkeyup;
document.onkeyup = function(e) {
// JIC something in openlayers sets something to document onkeyup
if(closureKeyUp){
closureKeyUp(e)
}
// esc key
if (e.key.toLowerCase() == "escape") { // escape key maps to keycode `27`
select.getFeatures().clear();
if(drawStart){
connectionDraw.finishDrawing()
};
} else if (e.key.toLowerCase() =="delete"){
var deletes = select.getFeatures().getArray();
// var deletes = selectedFeatures.getArray();
var deletedConnections = []
for (var i in deletes){
if (deletes[i].type =="computation"){
deletedConnections = deletedConnections.concat(deletes[i].connections);
var msg = {
type: "removeConnectable",
value: {uid: deletes[i].uid,type: deletes[i].type}
}
//Tell SC that computation is deleted
SCClientWS.send(msg);
deletes[i].delete();
// select.getFeatures().remove(deletes[i]);
} else if (deletes[i].type =="connection" && !deletedConnections.includes(deletes[i])){
deletes[i].delete();
}
}
select.getFeatures().clear();
}
}
var nodeServerWS;
try{
console.log("connecting via ws to: "+location.hostname+":"+location.port);
nodeServerWS = new WebSocket("ws://"+location.hostname+":"+location.port, 'echo-protocol');
} catch (e){
console.log("no WebSocket connection "+e)
}
if (nodeServerWS){
nodeServerWS.addEventListener('message', function(message){
var msg;
try {
// For some reason a single parse is leaving it as a string...
var msg = JSON.parse(message.data);
if (typeof(msg)== "string"){
msg = JSON.parse(msg);
}
} catch (e){
console.log("WARNING: could not parse ws JSON message")
console.log(msg);
}
console.log("msg type: "+msg.type)
if (msg.type == "params"){
updateRemoteParams(msg.value)
} else if (msg.type == "newRemote"){
console.log('new remote: '+msg.uid)
var remote = new Remote(msg.uid, Proj.fromLonLat(msg.coordinates), audienceSource);
var msg = {type:"subscribe", uid:msg.uid};
try{
nodeServerWS.send(JSON.stringify(msg))
} catch (e){
console.log("!!!!!ERROR couldn't sned subscribe request")
console.log(e);
}
// Tell SC a new remote
SCClientWS.send({type:"newConnectable",value:remote.getGraphData()})
// set onChange to tell SC when this remote changes
// remote.onRemoteChange = function (){
remote.onChange = function (){
// TODO @@@@ CONFIRM: I think 'this' refers to the remote here? if not need to change this
SCClientWS.send({type:"updateConnectable",value:this.getGraphData()})
}
} else if (msg.type == "removeRemote"){
try {
console.log('remove remote')
Remote.remotes[msg.uid].delete();
// audienceSource.removeFeature(Remote.remotes[msg.uid]);
SCClientWS.send({type:"removeConnectable",value:{type:"remote",uid:msg.uid}})
// delete Remote.remotes[msg.uid]
} catch (e){
console.log("WARNING: Error deleting remote <"+msg.uid+"> :" +e)
}
} else {
console.log("WARNING: WS message with unknown type <"+msg.type+"> received.")
}
})
}
// setTimeout(function(){
// // for making figures:
// var aa =new Remote(11, Proj.fromLonLat([43,-79]), audienceSource);
// var bb = new Remote(22, Proj.fromLonLat([50,-109]), audienceSource);
// var cc = new Remote(33, Proj.fromLonLat([60,43]), audienceSource);
// var dd = new Remote(44, Proj.fromLonLat([67,94]), audienceSource);
//
// aa.onRemoteChange = function (){}
// bb.onRemoteChange = function (){}
// cc.onRemoteChange = function (){}
// dd.onRemoteChange = function (){}
// },4000)
function updateRemoteParams(msg){
// TODO - @@@***%%% DANGER CHANGE THIS BACKs
msg.loudness = msg.rms;
Remote.remotes[msg.uid].setParams(msg);
}
|
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Remote.remotes){
//TODO some error here, seems like remotes gets out of sync somehow...
Remote.remotes[i].getGeometry().setRadius(radius);
}
}
| identifier_body |
main.js | // Dependencies
import Map from 'ol/map';
import View from 'ol/view';
import TileLayer from 'ol/layer/tile';
import OSM from 'ol/source/osm'
import Draw from 'ol/interaction/draw'
import VectorSource from 'ol/source/vector'
import VectorLayer from 'ol/layer/vector'
import ImageLayer from 'ol/layer/image'
import Proj from 'ol/proj' // fromLonLat
import Projection from 'ol/proj/projection'
import Select from 'ol/interaction/select'
import DragBox from 'ol/interaction/dragbox'
import Condition from 'ol/events/condition'
import Static from 'ol/source/imagestatic.js';
import Interaction from 'ol/interaction'
import Meyda from "meyda"
import ImageArcGISRest from 'ol/source/ImageArcGISRest';
import TileWMS from 'ol/source/TileWMS';
import TileArcGISRest from 'ol/source/tilearcgisrest'
// Local Imports
import Remote from './connectables/Remote.js'
import Speaker from './connectables/Speaker.js'
import Computation from './connectables/Computation.js'
import Connection from './connectables/Connection.js'
import SCClientWS from './web-socket/SCClientWS.js'
// NOTE - if you're getting an error like 'cosMap' undefined
// you need to change the src of one of meyda's depends:
// node_modules/dct/src/dct.js line:10, add 'var' before cosMap;
SCClientWS.initSCClientWS();
var audienceSource = new VectorSource({wrapX: false});
var audienceLayer = new VectorLayer ({source:audienceSource});
var osm = new TileLayer({source: new OSM()})
var geo = new TileLayer({
source: new TileWMS({
url: 'https://ahocevar.com/geoserver/wms',
params: {
'LAYERS': 'ne:NE1_HR_LC_SR_W_DR',
'TILED': true
}
})
})
var highways = new ImageLayer({
source: new ImageArcGISRest({
ratio: 1,
params: {},
url: 'https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Specialty/ESRI_StateCityHighway_USA/MapServer'
})
})
var none = new ImageLayer({
source: new Static({
attributions: '© <a href="http://xkcd.com/license.html">xkcd</a>',
url: location.hostname+":"+location.port+'/performance-client/build/hyper-cloud.jpg',
projection: new Projection({
code: 'xkcd-image',
units: 'pixels',
extent: [0, 0, 2268, 4032]
}),
imageExtent: [0, 0, 2268, 4032]
})
})
var population = new TileLayer({
source: new TileArcGISRest({
url: 'https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Demographics/ESRI_Population_World/MapServer'
})
})
var layers = {
none: none,
geo: geo,
osm: osm,
population: population,
highways: highways,
};
var map = new Map({
target: 'map',
layers: [none, audienceLayer],
view: new View({
center: Proj.fromLonLat([0,0]),
zoom: 2,
minResolution: 40075016.68557849 / 256 / Math.pow(2,7),
maxResolution: 40075016.68557849 / 256 / 4
})
});
var speakerCoordinateRatios = [[1/3,1],[2/3,1],[1,2/3],[1,1/3],[2/3,0],[1/3,0],[0,1/3],[0,2/3]];
for (var i in speakerCoordinateRatios){
new Speaker([0,0],audienceSource)
}
positionSpeakers()
Connection.connections.on(['add','remove'],function(){
var dag = Connection.getConnectionsDAG(); // [{from:..., to:...}] where from and to are from 'getGraphData'
var msg = {
type: "updateConnections",
value: dag
};
SCClientWS.send(msg);
})
// a normal select interaction to handle click
var select = new Select({
wrapX:false,
condition:function (e){
return (Condition.shiftKeyOnly(e) && Condition.singleClick(e))
}
});
// var selectedFeatures = select.getFeatures();
var dragBox = new DragBox({condition: Condition.platformModifierKeyOnly});
dragBox.on('boxend', function() {
// features that intersect the box are added to the collection
// selected features
var extent = dragBox.getGeometry().getExtent();
audienceSource.forEachFeatureIntersectingExtent(extent, function(feature) {
// selectedFeatures.push(feature);
select.getFeatures().push(feature);
});
});
// clear selection when drawing a new box and when clicking on the map
dragBox.on('boxstart', function() {
select.getFeatures().clear();
if (drawStart){
connectionDraw.finishDrawing();
};
// selectedFeatures.clear();
});
// MASTER controls
var master = document.getElementById('master');
var layerSelect = document.getElementById('layer-select')
for (var i in layers){
var option = document.createElement("option");
option.value = i;
option.innerHTML = i;
if(i == 'none'){option.selected = true}
layerSelect.appendChild(option);
}
layerSelect.onchange = function(){
var l = layers[layerSelect.value]
if (!l){console.log("Error: no layer named: "+layerSelect.value); return} else {
map.getLayers().clear();
map.addLayer(audienceLayer)
map.addLayer(l)
l.setZIndex(0);
audienceLayer.setZIndex(1)
}
}
var masterCorpus = ""
var corpusSelect = document.getElementById('corpus-select');
corpusSelect.onchange = function (){
masterCorpus = corpusSelect.value;
SCClientWS.send({type:"corpus",value:corpusSelect.value});
}
var cmdBox = document.getElementById('cmdBox');
select.getFeatures().on(['add', 'remove'], function() {
var innerHTML = select.getFeatures().getArray().filter(function(x){
return ["remote","computation"].includes(x.type)}).map(function(feature){
var r;
r = feature.getInfoHTML();
return r?r:document.createElement("div");
}
);
if (innerHTML.length>0){
cmdBox.hidden = false;
cmdBox.innerHTML = "";
for(var i in innerHTML){
cmdBox.appendChild(innerHTML[i])
}
} else {
cmdBox.hidden = true;
cmdBox.innerHTML = ""
}
});
map.addInteraction(dragBox);
map.addInteraction(select);
// Connection Interaction
function onConnectable(coordinate){
var features = audienceSource.getFeatures().map(function(f){return f.type})
var a = audienceSource.getFeaturesAtCoordinate(coordinate)
var isOnConnectable = a.length>0;
return isOnConnectable;
}
var connectionDraw = new Draw({
type:"LineString",
condition: function(browserEvent){
var shift = Condition.shiftKeyOnly(browserEvent);
var ctrl = Condition.platformModifierKeyOnly(browserEvent);
return !ctrl && !shift && onConnectable(browserEvent.coordinate)},
wrapX: false,
freehandCondition: function(x){return false},
freehand:false,
maxPoints:2
});
var from;
var drawStart = false;
connectionDraw.on('drawstart', function(ev){
drawStart = true;
var coord = ev.target.sketchCoords_[1];
var atCoord = audienceSource.getFeaturesAtCoordinate(coord);
if(atCoord){
from = atCoord[0];
} else {
console.log("this condition should not have been activated, find this print message plz...")
// if nothing was found where the click happened, drawstart shouldn't have occurred
// (see connectionDraw's 'condition' function)
from = undefined;
connectionDraw.finishDrawing();
}
// TODO - multiple selection and connection?
// currentSelected = selectedFeatures.getArray();
// if(currentSelected.length<1){
// connectionDraw.finishDrawing();
// }
})
connectionDraw.on('drawend',function(ev){
drawStart = false;
var lineFeature = ev.feature;
var finalCoord = ev.target.sketchCoords_[1];
var to = audienceSource.getFeaturesAtCoordinate(finalCoord);
if(to){
to = to[0];
} else {
return;
}
if(from){
var success = from.connect(to);
if(!success){
console.log("...")
}
} else {
console.log("this condition shouldn't have been reached ...")
}
from = undefined;
})
map.addInteraction(connectionDraw);
// TODO - find smoother way of doing this
map.getView().on('change:resolution', resizeObjects);
map.getView().on('change',positionSpeakers);
function resizeObjects (){
resizeRemotes();
resizeComputations();
}
function resizeComputations(){
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Computation.computations){
Computation.computations[i].setRadius(radius);
}
}
function resizeRemotes(){
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Remote.remotes){
//TODO some error here, seems like remotes gets out of sync somehow...
Remote.remotes[i].getGeometry().setRadius(radius);
}
}
function positionSpeakers(){
var extent = map.getView().calculateExtent();
var resolution = map.getView().getResolution();
var radius = 40*resolution;
for (var i in Speaker.eightChannelSpeakerCoordinateRatios){
var x = speakerCoordinateRatios[i][0];
var y = speakerCoordinateRatios[i][1];
var coord = [(extent[2]-extent[0])*x+extent[0], (extent[3]-extent[1])*y+extent[1]];
// TODO - put these two into a speaker or Connectable method.
Speaker.speakers[i].coordinate = coord;
Speaker.speakers[i].getGeometry().setCenterAndRadius(coord, radius);
for (var j in Speaker.speakers[i].connections){
Speaker.speakers[i].connections[j].redraw();
}
}
}
map.getViewport().addEventListener('contextmenu', function (evt) {
evt.preventDefault();
var coordinate = map.getEventCoordinate(evt);
var resolution = map.getView().getResolution();
var radius = 15*resolution;
var c = new Computation(coordinate, audienceSource, radius)
SCClientWS.send({type:"newConnectable",value:c.getGraphData()});
// c.onComputationChange = function (){
c.onChange = function (){
SCClientWS.send({type:"updateConnectable", value:this.getGraphData()});
}
})
// global key mappings (hopefully these don't overwrite anything...)
var closureKeyUp = document.onkeyup;
document.onkeyup = function(e) {
// JIC something in openlayers sets something to document onkeyup
if(closureKeyUp){
closureKeyUp(e)
}
// esc key
if (e.key.toLowerCase() == "escape") { // escape key maps to keycode `27`
select.getFeatures().clear();
if(drawStart){
connectionDraw.finishDrawing()
};
} else if (e.key.toLowerCase() =="delete"){
var deletes = select.getFeatures().getArray();
// var deletes = selectedFeatures.getArray();
var deletedConnections = []
for (var i in deletes){
if (deletes[i].type =="computation"){
deletedConnections = deletedConnections.concat(deletes[i].connections);
var msg = {
type: "removeConnectable",
value: {uid: deletes[i].uid,type: deletes[i].type}
}
//Tell SC that computation is deleted
SCClientWS.send(msg);
deletes[i].delete();
// select.getFeatures().remove(deletes[i]);
} else if (deletes[i].type =="connection" && !deletedConnections.includes(deletes[i])){
deletes[i].delete();
}
}
select.getFeatures().clear();
}
}
var nodeServerWS;
try{
console.log("connecting via ws to: "+location.hostname+":"+location.port);
nodeServerWS = new WebSocket("ws://"+location.hostname+":"+location.port, 'echo-protocol');
} catch (e){
console.log("no WebSocket connection "+e)
}
if (nodeServerWS){
nodeServerWS.addEventListener('message', function(message){
var msg;
try {
// For some reason a single parse is leaving it as a string...
var msg = JSON.parse(message.data);
if (typeof(msg)== "string"){
msg = JSON.parse(msg);
}
} catch (e){
console.log("WARNING: could not parse ws JSON message")
console.log(msg);
}
console.log("msg type: "+msg.type) | if (msg.type == "params"){
updateRemoteParams(msg.value)
} else if (msg.type == "newRemote"){
console.log('new remote: '+msg.uid)
var remote = new Remote(msg.uid, Proj.fromLonLat(msg.coordinates), audienceSource);
var msg = {type:"subscribe", uid:msg.uid};
try{
nodeServerWS.send(JSON.stringify(msg))
} catch (e){
console.log("!!!!!ERROR couldn't sned subscribe request")
console.log(e);
}
// Tell SC a new remote
SCClientWS.send({type:"newConnectable",value:remote.getGraphData()})
// set onChange to tell SC when this remote changes
// remote.onRemoteChange = function (){
remote.onChange = function (){
// TODO @@@@ CONFIRM: I think 'this' refers to the remote here? if not need to change this
SCClientWS.send({type:"updateConnectable",value:this.getGraphData()})
}
} else if (msg.type == "removeRemote"){
try {
console.log('remove remote')
Remote.remotes[msg.uid].delete();
// audienceSource.removeFeature(Remote.remotes[msg.uid]);
SCClientWS.send({type:"removeConnectable",value:{type:"remote",uid:msg.uid}})
// delete Remote.remotes[msg.uid]
} catch (e){
console.log("WARNING: Error deleting remote <"+msg.uid+"> :" +e)
}
} else {
console.log("WARNING: WS message with unknown type <"+msg.type+"> received.")
}
})
}
// setTimeout(function(){
// // for making figures:
// var aa =new Remote(11, Proj.fromLonLat([43,-79]), audienceSource);
// var bb = new Remote(22, Proj.fromLonLat([50,-109]), audienceSource);
// var cc = new Remote(33, Proj.fromLonLat([60,43]), audienceSource);
// var dd = new Remote(44, Proj.fromLonLat([67,94]), audienceSource);
//
// aa.onRemoteChange = function (){}
// bb.onRemoteChange = function (){}
// cc.onRemoteChange = function (){}
// dd.onRemoteChange = function (){}
// },4000)
function updateRemoteParams(msg){
// TODO - @@@***%%% DANGER CHANGE THIS BACKs
msg.loudness = msg.rms;
Remote.remotes[msg.uid].setParams(msg);
} | random_line_split |
|
main.js | // Dependencies
import Map from 'ol/map';
import View from 'ol/view';
import TileLayer from 'ol/layer/tile';
import OSM from 'ol/source/osm'
import Draw from 'ol/interaction/draw'
import VectorSource from 'ol/source/vector'
import VectorLayer from 'ol/layer/vector'
import ImageLayer from 'ol/layer/image'
import Proj from 'ol/proj' // fromLonLat
import Projection from 'ol/proj/projection'
import Select from 'ol/interaction/select'
import DragBox from 'ol/interaction/dragbox'
import Condition from 'ol/events/condition'
import Static from 'ol/source/imagestatic.js';
import Interaction from 'ol/interaction'
import Meyda from "meyda"
import ImageArcGISRest from 'ol/source/ImageArcGISRest';
import TileWMS from 'ol/source/TileWMS';
import TileArcGISRest from 'ol/source/tilearcgisrest'
// Local Imports
import Remote from './connectables/Remote.js'
import Speaker from './connectables/Speaker.js'
import Computation from './connectables/Computation.js'
import Connection from './connectables/Connection.js'
import SCClientWS from './web-socket/SCClientWS.js'
// NOTE - if you're getting an error like 'cosMap' undefined
// you need to change the src of one of meyda's depends:
// node_modules/dct/src/dct.js line:10, add 'var' before cosMap;
SCClientWS.initSCClientWS();
var audienceSource = new VectorSource({wrapX: false});
var audienceLayer = new VectorLayer ({source:audienceSource});
var osm = new TileLayer({source: new OSM()})
var geo = new TileLayer({
source: new TileWMS({
url: 'https://ahocevar.com/geoserver/wms',
params: {
'LAYERS': 'ne:NE1_HR_LC_SR_W_DR',
'TILED': true
}
})
})
var highways = new ImageLayer({
source: new ImageArcGISRest({
ratio: 1,
params: {},
url: 'https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Specialty/ESRI_StateCityHighway_USA/MapServer'
})
})
var none = new ImageLayer({
source: new Static({
attributions: '© <a href="http://xkcd.com/license.html">xkcd</a>',
url: location.hostname+":"+location.port+'/performance-client/build/hyper-cloud.jpg',
projection: new Projection({
code: 'xkcd-image',
units: 'pixels',
extent: [0, 0, 2268, 4032]
}),
imageExtent: [0, 0, 2268, 4032]
})
})
var population = new TileLayer({
source: new TileArcGISRest({
url: 'https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Demographics/ESRI_Population_World/MapServer'
})
})
var layers = {
none: none,
geo: geo,
osm: osm,
population: population,
highways: highways,
};
var map = new Map({
target: 'map',
layers: [none, audienceLayer],
view: new View({
center: Proj.fromLonLat([0,0]),
zoom: 2,
minResolution: 40075016.68557849 / 256 / Math.pow(2,7),
maxResolution: 40075016.68557849 / 256 / 4
})
});
var speakerCoordinateRatios = [[1/3,1],[2/3,1],[1,2/3],[1,1/3],[2/3,0],[1/3,0],[0,1/3],[0,2/3]];
for (var i in speakerCoordinateRatios){
new Speaker([0,0],audienceSource)
}
positionSpeakers()
Connection.connections.on(['add','remove'],function(){
var dag = Connection.getConnectionsDAG(); // [{from:..., to:...}] where from and to are from 'getGraphData'
var msg = {
type: "updateConnections",
value: dag
};
SCClientWS.send(msg);
})
// a normal select interaction to handle click
var select = new Select({
wrapX:false,
condition:function (e){
return (Condition.shiftKeyOnly(e) && Condition.singleClick(e))
}
});
// var selectedFeatures = select.getFeatures();
var dragBox = new DragBox({condition: Condition.platformModifierKeyOnly});
dragBox.on('boxend', function() {
// features that intersect the box are added to the collection
// selected features
var extent = dragBox.getGeometry().getExtent();
audienceSource.forEachFeatureIntersectingExtent(extent, function(feature) {
// selectedFeatures.push(feature);
select.getFeatures().push(feature);
});
});
// clear selection when drawing a new box and when clicking on the map
dragBox.on('boxstart', function() {
select.getFeatures().clear();
if (drawStart){
connectionDraw.finishDrawing();
};
// selectedFeatures.clear();
});
// MASTER controls
var master = document.getElementById('master');
var layerSelect = document.getElementById('layer-select')
for (var i in layers){
var option = document.createElement("option");
option.value = i;
option.innerHTML = i;
if(i == 'none'){option.selected = true}
layerSelect.appendChild(option);
}
layerSelect.onchange = function(){
var l = layers[layerSelect.value]
if (!l){console.log("Error: no layer named: "+layerSelect.value); return} else {
map.getLayers().clear();
map.addLayer(audienceLayer)
map.addLayer(l)
l.setZIndex(0);
audienceLayer.setZIndex(1)
}
}
var masterCorpus = ""
var corpusSelect = document.getElementById('corpus-select');
corpusSelect.onchange = function (){
masterCorpus = corpusSelect.value;
SCClientWS.send({type:"corpus",value:corpusSelect.value});
}
var cmdBox = document.getElementById('cmdBox');
select.getFeatures().on(['add', 'remove'], function() {
var innerHTML = select.getFeatures().getArray().filter(function(x){
return ["remote","computation"].includes(x.type)}).map(function(feature){
var r;
r = feature.getInfoHTML();
return r?r:document.createElement("div");
}
);
if (innerHTML.length>0){ | else {
cmdBox.hidden = true;
cmdBox.innerHTML = ""
}
});
map.addInteraction(dragBox);
map.addInteraction(select);
// Connection Interaction
function onConnectable(coordinate){
var features = audienceSource.getFeatures().map(function(f){return f.type})
var a = audienceSource.getFeaturesAtCoordinate(coordinate)
var isOnConnectable = a.length>0;
return isOnConnectable;
}
var connectionDraw = new Draw({
type:"LineString",
condition: function(browserEvent){
var shift = Condition.shiftKeyOnly(browserEvent);
var ctrl = Condition.platformModifierKeyOnly(browserEvent);
return !ctrl && !shift && onConnectable(browserEvent.coordinate)},
wrapX: false,
freehandCondition: function(x){return false},
freehand:false,
maxPoints:2
});
var from;
var drawStart = false;
connectionDraw.on('drawstart', function(ev){
drawStart = true;
var coord = ev.target.sketchCoords_[1];
var atCoord = audienceSource.getFeaturesAtCoordinate(coord);
if(atCoord){
from = atCoord[0];
} else {
console.log("this condition should not have been activated, find this print message plz...")
// if nothing was found where the click happened, drawstart shouldn't have occurred
// (see connectionDraw's 'condition' function)
from = undefined;
connectionDraw.finishDrawing();
}
// TODO - multiple selection and connection?
// currentSelected = selectedFeatures.getArray();
// if(currentSelected.length<1){
// connectionDraw.finishDrawing();
// }
})
connectionDraw.on('drawend',function(ev){
drawStart = false;
var lineFeature = ev.feature;
var finalCoord = ev.target.sketchCoords_[1];
var to = audienceSource.getFeaturesAtCoordinate(finalCoord);
if(to){
to = to[0];
} else {
return;
}
if(from){
var success = from.connect(to);
if(!success){
console.log("...")
}
} else {
console.log("this condition shouldn't have been reached ...")
}
from = undefined;
})
map.addInteraction(connectionDraw);
// TODO - find smoother way of doing this
map.getView().on('change:resolution', resizeObjects);
map.getView().on('change',positionSpeakers);
function resizeObjects (){
resizeRemotes();
resizeComputations();
}
function resizeComputations(){
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Computation.computations){
Computation.computations[i].setRadius(radius);
}
}
function resizeRemotes(){
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Remote.remotes){
//TODO some error here, seems like remotes gets out of sync somehow...
Remote.remotes[i].getGeometry().setRadius(radius);
}
}
function positionSpeakers(){
var extent = map.getView().calculateExtent();
var resolution = map.getView().getResolution();
var radius = 40*resolution;
for (var i in Speaker.eightChannelSpeakerCoordinateRatios){
var x = speakerCoordinateRatios[i][0];
var y = speakerCoordinateRatios[i][1];
var coord = [(extent[2]-extent[0])*x+extent[0], (extent[3]-extent[1])*y+extent[1]];
// TODO - put these two into a speaker or Connectable method.
Speaker.speakers[i].coordinate = coord;
Speaker.speakers[i].getGeometry().setCenterAndRadius(coord, radius);
for (var j in Speaker.speakers[i].connections){
Speaker.speakers[i].connections[j].redraw();
}
}
}
map.getViewport().addEventListener('contextmenu', function (evt) {
evt.preventDefault();
var coordinate = map.getEventCoordinate(evt);
var resolution = map.getView().getResolution();
var radius = 15*resolution;
var c = new Computation(coordinate, audienceSource, radius)
SCClientWS.send({type:"newConnectable",value:c.getGraphData()});
// c.onComputationChange = function (){
c.onChange = function (){
SCClientWS.send({type:"updateConnectable", value:this.getGraphData()});
}
})
// global key mappings (hopefully these don't overwrite anything...)
var closureKeyUp = document.onkeyup;
document.onkeyup = function(e) {
// JIC something in openlayers sets something to document onkeyup
if(closureKeyUp){
closureKeyUp(e)
}
// esc key
if (e.key.toLowerCase() == "escape") { // escape key maps to keycode `27`
select.getFeatures().clear();
if(drawStart){
connectionDraw.finishDrawing()
};
} else if (e.key.toLowerCase() =="delete"){
var deletes = select.getFeatures().getArray();
// var deletes = selectedFeatures.getArray();
var deletedConnections = []
for (var i in deletes){
if (deletes[i].type =="computation"){
deletedConnections = deletedConnections.concat(deletes[i].connections);
var msg = {
type: "removeConnectable",
value: {uid: deletes[i].uid,type: deletes[i].type}
}
//Tell SC that computation is deleted
SCClientWS.send(msg);
deletes[i].delete();
// select.getFeatures().remove(deletes[i]);
} else if (deletes[i].type =="connection" && !deletedConnections.includes(deletes[i])){
deletes[i].delete();
}
}
select.getFeatures().clear();
}
}
var nodeServerWS;
try{
console.log("connecting via ws to: "+location.hostname+":"+location.port);
nodeServerWS = new WebSocket("ws://"+location.hostname+":"+location.port, 'echo-protocol');
} catch (e){
console.log("no WebSocket connection "+e)
}
if (nodeServerWS){
nodeServerWS.addEventListener('message', function(message){
var msg;
try {
// For some reason a single parse is leaving it as a string...
var msg = JSON.parse(message.data);
if (typeof(msg)== "string"){
msg = JSON.parse(msg);
}
} catch (e){
console.log("WARNING: could not parse ws JSON message")
console.log(msg);
}
console.log("msg type: "+msg.type)
if (msg.type == "params"){
updateRemoteParams(msg.value)
} else if (msg.type == "newRemote"){
console.log('new remote: '+msg.uid)
var remote = new Remote(msg.uid, Proj.fromLonLat(msg.coordinates), audienceSource);
var msg = {type:"subscribe", uid:msg.uid};
try{
nodeServerWS.send(JSON.stringify(msg))
} catch (e){
console.log("!!!!!ERROR couldn't sned subscribe request")
console.log(e);
}
// Tell SC a new remote
SCClientWS.send({type:"newConnectable",value:remote.getGraphData()})
// set onChange to tell SC when this remote changes
// remote.onRemoteChange = function (){
remote.onChange = function (){
// TODO @@@@ CONFIRM: I think 'this' refers to the remote here? if not need to change this
SCClientWS.send({type:"updateConnectable",value:this.getGraphData()})
}
} else if (msg.type == "removeRemote"){
try {
console.log('remove remote')
Remote.remotes[msg.uid].delete();
// audienceSource.removeFeature(Remote.remotes[msg.uid]);
SCClientWS.send({type:"removeConnectable",value:{type:"remote",uid:msg.uid}})
// delete Remote.remotes[msg.uid]
} catch (e){
console.log("WARNING: Error deleting remote <"+msg.uid+"> :" +e)
}
} else {
console.log("WARNING: WS message with unknown type <"+msg.type+"> received.")
}
})
}
// setTimeout(function(){
// // for making figures:
// var aa =new Remote(11, Proj.fromLonLat([43,-79]), audienceSource);
// var bb = new Remote(22, Proj.fromLonLat([50,-109]), audienceSource);
// var cc = new Remote(33, Proj.fromLonLat([60,43]), audienceSource);
// var dd = new Remote(44, Proj.fromLonLat([67,94]), audienceSource);
//
// aa.onRemoteChange = function (){}
// bb.onRemoteChange = function (){}
// cc.onRemoteChange = function (){}
// dd.onRemoteChange = function (){}
// },4000)
function updateRemoteParams(msg){
// TODO - @@@***%%% DANGER CHANGE THIS BACKs
msg.loudness = msg.rms;
Remote.remotes[msg.uid].setParams(msg);
}
|
cmdBox.hidden = false;
cmdBox.innerHTML = "";
for(var i in innerHTML){
cmdBox.appendChild(innerHTML[i])
}
} | conditional_block |
iptool.py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Author:Cyan
"""
Try copying the cluttered IP range contents below to the file and use:
python3 ipParse.py -f filename --smart
192.168.1.0 192.168.2.1/24,192.168.3.4-7,192.168.5.1-.192.168.5.34、192.176.34.6\26、192.187.34.2-67,192.111.111.111,192.168.5.1 - 192.168.5.34 192.168.5.1. -- 0192.168.5.34,192.168.5.1--192.168.5.34、1.2.4.5、192.168.5.5-9
192.168.5.1~192.168.5.34,192.168.5. 1 ~ 192.168.05.0 123.3.3.3. 192.168.5.1~56 192.168.7.1
"""
import requests
from gevent import monkey; monkey.patch_socket()
from gevent.pool import Pool
import gevent
import re
import argparse
import ipaddress
import json
import dns.resolver
import logging
import urllib
import socket
import sys
import os
import concurrent.futures
import tldextract
requests.packages.urllib3.disable_warnings()
REG_CD = re.compile(
r'(?P<cd>((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){3})(?P<c1>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))-(?P<c2>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))$')
REG_SUBNET = re.compile(
r'((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){3}(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\/([0-9]|[1-2][0-9]|3[0-2])$')
REG_IP = re.compile(
r'((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){3}(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))$')
REG_IPRANGE = re.compile(
r'(?P<bd>((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){2})(?P<c1>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))\.(?P<d1>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))-(?P=bd)(?P<c2>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))\.(?P<d2>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))$')
REG_Domain = re.compile(r'^([A-Za-z0-9]\.|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9]\.){1,3}[A-Za-z]{2,6}$')
def replSpace(rep):
return rep.group().replace(' ', '')
def replPoint(rep):
return rep.group().strip('.')
def replZero(rep):
return rep.group().lstrip('0')
# IPLIST = []
# 保存并去重
# def save(ip):
# if ip not in IPLIST:
# IPLIST.append(ip)
# 处理 192.168.1.1-192.168.2.128 形式
def ipRange(item):
r=[]
res = REG_IPRANGE.match(item)
bd = res.group('bd')
c1 = int(res.group('c1'))
c2 = int(res.group('c2'))
d1 = int(res.group('d1'))
d2 = int(res.group('d2'))
if c1 == c2:
if d1 < d2:
for i in range(d1, d2 + 1):
r.append(bd + str(c1) + '.' + str(i))
else:
print(f'\033[1;31m请检查你的IP:{item}\033[0m')
elif c1 < c2:
for c in range(c1, c2 + 1):
for d in range(d1, 255):
if c == c2 and d > d2:
break
else:
r.append(bd + str(c) + '.' + str(d))
d1 = 0
else:
print(f'\033[1;31m请检查你的IP:{item}\033[0m')
return r
# 处理 192.168.2.1-243 形式
def dealCd(item):
r=[]
res = REG_CD.match(item)
cd = res.group('cd')
c1 = int(res.group('c1'))
c2 = int(res.group('c2'))
if c1 < c2:
for i in range(c1, c2 + 1):
r.append(cd + str(i))
else:
print(f'\033[1;31m请检查你的IP:{item}\033[0m')
return r
# 处理 192.168.1.0/24 形式
def dealSubnet(item):
r=[]
if int(re.match(r'.*/(\d+)',item).group(1))<=16:
print(f'too big range:{item}')
exit()
net = ipaddress.ip_network(item, strict=False)
for ip in net.hosts():
r.append(str(ip))
return r
# 将不同形式的 IP 交给不同的方法处理
def ipParse(iplist):
IPLIST=[]
for item in iplist:
# print(item)
if REG_IPRANGE.match(item): # 192.168.1.1-192.168.2.128
IPLIST.extend(ipRange(item))
elif REG_CD.match(item): # 192.168.2.1-243
IPLIST.extend(dealCd(item))
elif REG_SUBNET.match(item): # 192.168.2.1/24
IPLIST.extend(dealSubnet(item))
elif REG_IP.match(item):
IPLIST.append(item)
else:
logging.info(f'\033[1;31m请检查你的IP:{item}\033[0m')
r = list(set(IPLIST))
r.sort(key=IPLIST.index)
return r
# 处理无格式 IP 范围文件
def format(ipfile):
with open(ipfile, encoding="utf-8") as f:
content = f.read()
logging.info("-" * 80)
# 192.168.1.1 -- 254 将不规范的分割符(如: ~~ ~ -- -)全部替换成-,\替换成/
s1 = re.sub(r'\s*[-~]+\s*', '-', content).replace('\\','/').replace('"','').replace("'",'')
# 123. 34 .123 . 123 去掉之间多余的空格 -- 如果出错,请注释此行
s1 = re.sub(r'(\d+\s*\.\s*){3}\d+', replSpace, s1)
# .123.123.123.123 去掉左右两边误写的. -- 如果出错,请注释此行
s1 = re.sub(r'\.?(\d+\.*){3}\d+\.?', replPoint, s1)
s1 = re.sub(r'\d{2,}', replZero, s1) # 去掉 123.0.02.1 中 0 开头的多位数
s1 = re.split(r'[\n\s,,、;;]+', s1) # 以这些符号分隔成列表并去重
s1 = list({x for x in s1 if x !=''})
s1.sort()
logging.info(s1)
logging.info("-" * 80)
for x in ipParse(s1):
print(x)
def dns_record(domain):
green = "\x1b[1;32m"
cyan = "\x1b[1;36m"
clear = "\x1b[0m"
record_type = ["A","AAAA","CNAME","NS","MX","TXT","SOA","PTR","SPF","SRV","AXFR","IXFR",
"MAILB","URI","HIP","A6","AFSDB","APL","CAA","CDNSKEY","CDS",
"CSYNC","DHCID","DLV","DNAME","DNSKEY","DS","EUI48","EUI64",
"MB","MD","MF","MG","MINFO","MR","NAPTR","NINFO","NSAP","NSEC",
"NSEC3","NSEC3PARAM","NULL","NXT","OPENPGPKEY","OPT","PX","RP",
"RRSIG","RT","SIG","SSHFP","TA","TKEY","TLSA","TSIG",
"GPOS","HINFO","IPSECKEY","ISDN","KEY","KX","LOC","MAILA",
"UNSPEC","WKS","X25","CERT","ATMA","DOA","EID","GID","L32",
"L64","LP","NB","NBSTAT","NID","NIMLOC","NSAP-PTR","RKEY",
"SINK","SMIMEA","SVCB","TALINK","UID","UINFO","ZONEMD","HTTPS"]
for rt in record_type:
try:
r = dns.resolver.resolve(domain, rt)
except Exception as e:
print(rt + "\t" + str(e))
# print(e)
else:
# print(rt)
for v in r:
print(
green + rt + clear + "\t" +
cyan + str(v) + clear)
def ip_location(ip):
# try:
# requests.get(f"https://www.sogou.com/reventondc/external?key={ip}&type=2&charset=utf8&objid=20099801&userarea=d123&uuid=6a3e3dd2-d0cb-440c-ac45-a62125dee188&p_ip=180.101.49.12&callback=sogouCallback1620961932681")
# except Exception as e:
# pass
# else:
# try:
# requests.get("https://open.onebox.so.com/dataApi?callback=jQuery18301409029392462775_1620962038263&type=ip&src=onebox&tpl=0&num=1&query=ip&ip=180.101.49.12&url=ip&_=1620962046570")
# except Exception as e:
# pass
# try:
# requests.get("https://apiquark.sm.cn/rest?method=sc.number_ip_new&request_sc=shortcut_searcher::number_ip_new&callback=sc_ip_search_callback&q=103.235.46.39&callback=jsonp2")
# except Exception as e:
# pass
# try:
# requests.get("https://so.toutiao.com/2/wap/search/extra/ip_query?ip=103.235.46.39")
# except Exception:
# pass
ip=ip.strip()
# print(ip)
try:
resp=requests.get(f"https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?query=f{ip}&co=&resource_id=5809&t=1600743020566&ie=utf8&oe=gbk&cb=op_aladdin_callback&format=json&tn=baidu&cb=jQuery110208008102506768224_1600742984815&_=1600742984816")
# print(resp.text)
except Exception as e:
# print(e)
return ip, "Error: "+str(e)
j = json.loads(resp.text[42:-1])
# print(j)
if len(j['Result'])!=0:
# print(j['Result'][0])
return ip, j['Result'][0]['DisplayData']['resultData']['tplData']['location']
else:
# print(f"INFO: {ip} {j}")
# print(j['Result'])
return ip, j['Result']
def ip_reverse(ip):
# https://www.threatcrowd.org/searchApi/v2/ip/report/?ip=
try:
resp=requests.get(f"https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}&__cf_chl_jschl_tk__=b23e1ebddba7a8afcec8002ebe8161982a307678-1600841853-0-AdBviI4eBSvsCtV19ogQiOgQh8BZDLUSjLLWlPxcUmToHHMVBUzRMOttXDt0rU_oBQ9sjEco0JVg1HpkyolfayL92SM2O7_7QPM67RLnKw6bB2HLrDSbAe1isBru5CZQMW37d1m5MI-3maLEyCwpAx5M5n3gjSTPATv6XUK6GYvSdIIflKHKr8NI1wjWqe6YHdsdGshphzA5RP9IINVQ_q3mRfxz7YbZiW49E3sduJLtQjiFB1IaGapMdW_HMt_qbw_jJo4S7j_w-ZnEVKTCBpwR5LVACjy3p2rv_lTL7Uw1zW1J84fJ--sTRfKa1iZlN1-eENeG293SoP0IIGM0l-c",
timeout=10,
cookies={"__cfduid":"d1f527bf2b91e30ae3e5edc6392e873091600248379","cf_clearance":"1d01f377dd9b8c5c7b76a488c7b4adbd3da6055a-1600841859-0-1zd74c2a3az56d45067z127237b9-150"},
headers={"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36"},
verify=False,
)
except Exception as e:
print(f"Please manual access: https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}")
return e
# print(resp.text)
try:
j=json.loads(resp.text)
except Exception as e:
print(f"Please manual access: https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}")
return "Cloudflare DDos Detect!"
r=""
if j['response_code']!='0':
if len(j['resolutions'])>100:
j['resolutions']=j['resolutions'][:99]
for record in j['resolutions']:
r+=f"{record['last_resolved']}\t{record['domain']}\n"
return r[:-1]
else:
# print("Not Found!")
return "Not found any reverse information!"
def interactive_ip_reverse():
"""
interactive of ip reverse
"""
while True:
ip=input("Input IP: ").strip()
if not re.match(r"^(\d{1,3}\.){3}\d{1,3}$",ip):
print("\"%s\" is not a valid IP!"%ip)
print("-"*100)
continue
jobs=[
# gevent.spawn(ip_location, ip),
gevent.spawn(ip_reverse, ip),
]
gevent.joinall(jobs)
for job in jobs:
print(job.value)
print("-"*100)
def extract_host(url):
url=url.strip()
if (not url.startswith("http") and not url.startswith("//")):
url="https://"+url
# print(urllib.parse.urlparse(url)[1])
return urllib.parse.urlparse(url)[1]
my_resolver = dns.resolver.Resolver()
my_resolver.nameservers = ['8.8.8.8']
def getIP(url):
host=extract_host(url)
try:
google_record=[rdata.address for rdata in my_resolver.resolve(host, 'A')]
except Exception as e:
# print(f"\033[1;31m ERROR: {host} resolve error: {e.__class__.__name__}\033[0m")
google_record=[]
try:
socket_record=socket.gethostbyname_ex(host)[2]
except Exception as e:
# print(f"\033[1;31m ERROR: {host} resolve error: {e.__class__.__name__}\033[0m")
socket_record=[]
# print(google_record,socket_record)
socket_record.extend([x for x in google_record if x not in socket_record])
# print(google_record,socket_record)
if len(socket_record) == 0:
print(f"\033[1;31m ERROR: {host} resolve error\033[0m")
return host,socket_record
def sync_getIP(url_list):
r=[]
p=Pool(THREADS)
threads=[p.spawn(getIP, i) for i in url_list]
gevent.joinall(threads)
for item in threads:
r.append(item.value)
return r
def getTLD(file):
tld_list=set()
with open(file,"r") as f:
for x in f:
if x.strip()!="":
tld = tldextract.extract(x).registered_domain
if tld!="":
tld_list.add(tld)
for x in tld_list:
print(x)
def archive(domain_list):
sigleIP={}
info_pool=[]
for host,ip_list in sync_getIP(domain_list):
info_pool.append((host,ip_list))
if len(ip_list)==1:
sigleIP[ip_list[0]]=[]
# for ip in sigleIP:
# print("### "+ip)
# for info in info_pool:
# if ip in info[2]:
# print(info[1])
for info in info_pool:
for ip in info[1]:
if ip in sigleIP.keys():
sigleIP[ip].append(info[0])
break
else:
print(info[0],info[1])
# print(sigleIP)
for i,v in sigleIP.items():
print(f"### {i}\t"+ip_location(i))
for t in v:
print(t)
print("### Nmap")
print(f"sudo nmap -Pn -sS -sV -T3 -p1-65535 --open {' '.join([ip for ip in sigleIP.keys()])}")
def sync_ip_location(ip_list):
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
for ip, located in executor.map(ip_location, ip_list):
print(ip, located)
THREADS=None
logging.basicConfig(format='%(message)s',
level=logging.INFO)
def main():
Useage = """
single
# ip # show local ip
# ip 8.8.8.8 # show location && provider
# ip www.baidu.com # show ip and location
multi
# ip -c 8.8.8.8/24 [--location] # show cidr
# ip -f iplist.txt [--format] [--archive] [--tld] [--location] # list all ip
# ip -dns www.baidu.com # check dns
# ip --interactive # show domain or ip location
# ip --history 8.8.8.8 # show history domain TODO
"""
argvlen = len(sys.argv)
if argvlen == 1:
os.system("ifconfig -l | xargs -n1 ipconfig getifaddr")
return
if argvlen == 2:
if REG_IP.match(sys.argv[1]):
print("\t".join(ip_location(sys.argv[1])))
elif REG_Domain.match(sys.argv[1]):
host, ip_list = getIP(sys.argv[1])
print(host)
for ip in ip_list:
print("\t".join(ip_location(ip)))
else:
print("please provider valid domain or ip")
return
parser = argparse.ArgumentParser()
# ip_parser=parser.add_argument_group("For IP list")
# # parser.description = 'Parse IP range like 192.168.2.3/26 10.0.4.1-10.0.4.9 10.0.0.1-254'
group = parser.add_mutually_exclusive_group()
# domain_parser=parser.add_argument_group("For domain list")
# reverse_parser=parser.add_argument_group("Reverse IP")
group.add_argument("-f", '--file', help="The file containing a list of IPs or domains")
group.add_argument("-c", '--cidr', help="Command line read a domains,IP or CIDR like 192.168.2.3/26,10.0.0.1-254,10.0.4.1-10.0.4.9")
group.add_argument("-dns", '--dns', help="Show dns record of domain")
parser.add_argument('--location', action="store_true", help="The location of IP")
# parser.add_argument('-t', "--threads", type=int, default=20, help="Number of threads(default 20)")
parser.add_argument('--format', action="store_true", help="Automatic analysis of messy file containing IPs")
parser.add_argument('--tld', action="store_true", help="Show TLD of domain")
# domain_parser.add_argument('--ip', action="store_true", help="show IP of domain")
# reverse_parser.add_argument('--interactive', action="store_true", help="open an interactive to get domain history of IP")
# domain_parser.add_argument('--archive', action="store_true", help="Archive IP and domain")
args = parser.parse_args()
if args.cidr:
ip_list = ipParse(args.cidr.strip(',').split(','))
if args.location:
sync_ip_location(ip_list)
else:
print("\n".join(ip_list))
logging.info(f'\033[0;36m共{len(ip_list)}个IP\033[0m')
return
if args.file:
if args.format:
format(args.file)
return
if args.tld:
getTLD(args.file)
return
if args.location:
with open(args.file, encoding="utf-8") as f:
ip_list = f.readlines()
# print(ip_list)
sync_ip_location(ip_list)
if args.dns:
dns_record(args.dns)
# if args.interactive:
# interactive_ip_reverse()
# if not args.file and not args.cidr:
# print("The argument requires the -f or -c")
# exit(1)
# if args.archive and not args.ip:
# print("The --archive argument requires the --ip")
# exit(1)
# if args.smart and not args.file:
# print("The --smart argument requires the -f or --file")
# exit(1)
# global THREADS
# THREADS=args.threads
# if args.ip:
# if args.file:
# if args.archive:
# # python3 iptool.py -f domain_list.txt --ip --archive
# with open(args.file, encoding="utf-8") as f:
# archive(f.readlines())
# else:
# # python3 iptool.py -f domain_list.txt --ip
# with open(args.file, encoding="utf-8") as f:
# for x,y in sync_getIP(f.readlines()):
# print(x,y)
# else:
# # python3 iptool.py -c www.baidu.com,www.qq.com --ip
# url_list=args.cidr.strip(',').split(',')
# for u in url_list:
# host,ip_list=getIP(u)
# print(host)
# for ip in ip_list:
# print(ip,ip_location(ip))
# elif args.file:
# if args.smart:
# # python3 iptool.py -f ip_or_CIDR_messy_list.txt
# smart(args.file)
# else:
# with open(args.file, encoding="utf-8") as f:
# ip_list=[i.strip() for i in f if i.strip() !='']
# # ip.sort()
# if args.location:
# # python3 iptool.py -f ip_or_CIDR_list.txt --location
# sync_ip_location(ipParse(ip_list)) # 异步处理
# else:
# for x in ipParse(ip_list):
# # python3 iptool.py -f ip_or_CIDR_list.txt
# print(x)
# elif args.cidr:
# ip_list=ipParse(args.cidr.strip(',').split(','))
# # python3 iptool.py -c 192.168.0.1/24 --location
# if args.location:
# sync_ip_location(ip_list) # 异步处理
# else:
# for x in ip_list:
# # python3 iptool.py -c 192.168.0.1/24
# print(x)
# else:
# print('Use -h to show help') | main() |
if __name__ == '__main__': | random_line_split |
iptool.py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Author:Cyan
"""
Try copying the cluttered IP range contents below to the file and use:
python3 ipParse.py -f filename --smart
192.168.1.0 192.168.2.1/24,192.168.3.4-7,192.168.5.1-.192.168.5.34、192.176.34.6\26、192.187.34.2-67,192.111.111.111,192.168.5.1 - 192.168.5.34 192.168.5.1. -- 0192.168.5.34,192.168.5.1--192.168.5.34、1.2.4.5、192.168.5.5-9
192.168.5.1~192.168.5.34,192.168.5. 1 ~ 192.168.05.0 123.3.3.3. 192.168.5.1~56 192.168.7.1
"""
import requests
from gevent import monkey; monkey.patch_socket()
from gevent.pool import Pool
import gevent
import re
import argparse
import ipaddress
import json
import dns.resolver
import logging
import urllib
import socket
import sys
import os
import concurrent.futures
import tldextract
requests.packages.urllib3.disable_warnings()
REG_CD = re.compile(
r'(?P<cd>((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){3})(?P<c1>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))-(?P<c2>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))$')
REG_SUBNET = re.compile(
r'((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){3}(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\/([0-9]|[1-2][0-9]|3[0-2])$')
REG_IP = re.compile(
r'((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){3}(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))$')
REG_IPRANGE = re.compile(
r'(?P<bd>((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){2})(?P<c1>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))\.(?P<d1>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))-(?P=bd)(?P<c2>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))\.(?P<d2>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))$')
REG_Domain = re.compile(r'^([A-Za-z0-9]\.|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9]\.){1,3}[A-Za-z]{2,6}$')
def replSpace(rep):
return rep.group().replace(' ', '')
def replPoint(rep):
return rep.group().strip('.')
def replZero(rep):
return rep.group().lstrip('0')
# IPLIST = []
# 保存并去重
# def save(ip):
# if ip not in IPLIST:
# IPLIST.append(ip)
# 处理 192.168.1.1-192.168.2.128 形式
def ipRange(item):
r=[]
res = REG_IPRANGE.match(item)
bd = res.group('bd')
c1 = int(res.group('c1'))
c2 = int(res.group('c2'))
d1 = int(res.group('d1'))
d2 = int(res.group('d2'))
if c1 == c2:
if d1 < d2:
for i in range(d1, d2 + 1):
r.append(bd + str(c1) + '.' + str(i))
else:
print(f'\033[1;31m请检查你的IP:{item}\033[0m')
elif c1 < c2:
for c in range(c1, c2 + 1):
for d in range(d1, 255):
if c == c2 and d > d2:
break
else:
r.append(bd + str(c) + '.' + str(d))
d1 = 0
else:
print(f'\033[1;31m请检查你的IP:{item}\033[0m')
return r
# 处理 192.168.2.1-243 形式
def dealCd(item):
r=[]
res = REG_CD.match(item)
cd = res.group('cd')
c1 = int(res.group('c1'))
c2 = int(res.group('c2'))
if c1 < c2:
for i in range(c1, c2 + 1):
r.append(cd + str(i))
else:
print(f'\033[1;31m请检查你的IP:{item}\033[0m')
return r
# 处理 192.168.1.0/24 形式
def dealSubnet(item):
r=[]
if int(re.match(r'.*/(\d+)',item).group(1))<=16:
print(f'too big range:{item}')
exit()
net = ipaddress.ip_network(item, strict=False)
for ip in net.hosts():
r.append(str(ip))
return r
# 将不同形式的 IP 交给不同的方法处理
def ipParse(iplist):
IPLIST=[]
for item in iplist:
# print(item)
if REG_IPRANGE.match(item): # 192.168.1.1-192.168.2.128
IPLIST.extend(ipRange(item))
elif REG_CD.match(item): # 192.168.2.1-243
IPLIST.extend(dealCd(item))
elif REG_SUBNET.match(item): # 192.168.2.1/24
IPLIST.extend(dealSubnet(item))
elif REG_IP.match(item):
IPLIST.append(item)
else:
logging.info(f'\033[1;31m请检查你的IP:{item}\033[0m')
r = list(set(IPLIST))
r.sort(key=IPLIST.index)
return r
# 处理无格式 IP 范围文件
def format(ipfile):
with open(ipfile, encoding="utf-8") as f:
content = f.read()
logging.info("-" * 80)
# 192.168.1.1 -- 254 将不规范的分割符(如: ~~ ~ -- -)全部替换成-,\替换成/
s1 = re.sub(r'\s*[-~]+\s*', '-', content).replace('\\','/').replace('"','').replace("'",'')
# 123. 34 .123 . 123 去掉之间多余的空格 -- 如果出错,请注释此行
s1 = re.sub(r'(\d+\s*\.\s*){3}\d+', replSpace, s1)
# .123.123.123.123 去掉左右两边误写的. -- 如果出错,请注释此行
s1 = re.sub(r'\.?(\d+\.*){3}\d+\.?', replPoint, s1)
s1 = re.sub(r'\d{2,}', replZero, s1) # 去掉 123.0.02.1 中 0 开头的多位数
s1 = re.split(r'[\n\s,,、;;]+', s1) # 以这些符号分隔成列表并去重
s1 = list({x for x in s1 if x !=''})
s1.sort()
logging.info(s1)
logging.info("-" * 80)
for x in ipParse(s1):
print(x)
def dns_record(domain):
green = "\x1b[1;32m"
cyan = "\x1b[1;36m"
clear = "\x1b[0m"
record_type = ["A","AAAA","CNAME","NS","MX","TXT","SOA","PTR","SPF","SRV","AXFR","IXFR",
"MAILB","URI","HIP","A6","AFSDB","APL","CAA","CDNSKEY","CDS",
"CSYNC","DHCID","DLV","DNAME","DNSKEY","DS","EUI48","EUI64",
"MB","MD","MF","MG","MINFO","MR","NAPTR","NINFO","NSAP","NSEC",
"NSEC3","NSEC3PARAM","NULL","NXT","OPENPGPKEY","OPT","PX","RP",
"RRSIG","RT","SIG","SSHFP","TA","TKEY","TLSA","TSIG",
"GPOS","HINFO","IPSECKEY","ISDN","KEY","KX","LOC","MAILA",
"UNSPEC","WKS","X25","CERT","ATMA","DOA","EID","GID","L32",
"L64","LP","NB","NBSTAT","NID","NIMLOC","NSAP-PTR","RKEY",
"SINK","SMIMEA","SVCB","TALINK","UID","UINFO","ZONEMD","HTTPS"]
for rt in record_type:
try:
r = dns.resolver.resolve(domain, rt)
except Exception as e:
print(rt + "\t" + str(e))
# print(e)
else:
# print(rt)
for v in r:
print(
green + rt + clear + "\t" +
cyan + str(v) + clear)
def ip_location(ip):
# try:
# requests.get(f"https://www.sogou.com/reventondc/external?key={ip}&type=2&charset=utf8&objid=20099801&userarea=d123&uuid=6a3e3dd2-d0cb-440c-ac45-a62125dee188&p_ip=180.101.49.12&callback=sogouCallback1620961932681")
# except Exception as e:
# pass
# else:
# try:
# requests.get("https://open.onebox.so.com/dataApi?callback=jQuery18301409029392462775_1620962038263&type=ip&src=onebox&tpl=0&num=1&query=ip&ip=180.101.49.12&url=ip&_=1620962046570")
# except Exception as e:
# pass
# try:
# requests.get("https://apiquark.sm.cn/rest?method=sc.number_ip_new&request_sc=shortcut_searcher::number_ip_new&callback=sc_ip_search_callback&q=103.235.46.39&callback=jsonp2")
# except Exception as e:
# pass
# try:
# requests.get("https://so.toutiao.com/2/wap/search/extra/ip_query?ip=103.235.46.39")
# except Exception:
# pass
ip=ip.strip()
# print(ip)
try:
resp=requests.get(f"https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?query=f{ip}&co=&resource_id=5809&t=1600743020566&ie=utf8&oe=gbk&cb=op_aladdin_callback&format=json&tn=baidu&cb=jQuery110208008102506768224_1600742984815&_=1600742984816")
# print(resp.text)
except Exception as e:
# print(e)
return ip, "Error: "+str(e)
j = json.loads(resp.text[42:-1])
# print(j)
if len(j['Result'])!=0:
# print(j['Result'][0])
return ip, j['Result'][0]['DisplayData']['resultData']['tplData']['location']
else:
# print(f"INFO: {ip} {j}")
# print(j['Result'])
return ip, j['Result']
def ip_reverse(ip):
# https://www.threatcrowd.org/searchApi/v2/ip/report/?ip=
try:
resp=requests.get(f"https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}&__cf_chl_jschl_tk__=b23e1ebddba7a8afcec8002ebe8161982a307678-1600841853-0-AdBviI4eBSvsCtV19ogQiOgQh8BZDLUSjLLWlPxcUmToHHMVBUzRMOttXDt0rU_oBQ9sjEco0JVg1HpkyolfayL92SM2O7_7QPM67RLnKw6bB2HLrDSbAe1isBru5CZQMW37d1m5MI-3maLEyCwpAx5M5n3gjSTPATv6XUK6GYvSdIIflKHKr8NI1wjWqe6YHdsdGshphzA5RP9IINVQ_q3mRfxz7YbZiW49E3sduJLtQjiFB1IaGapMdW_HMt_qbw_jJo4S7j_w-ZnEVKTCBpwR5LVACjy3p2rv_lTL7Uw1zW1J84fJ--sTRfKa1iZlN1-eENeG293SoP0IIGM0l-c",
timeout=10,
cookies={"__cfduid":"d1f527bf2b91e30ae3e5edc6392e873091600248379","cf_clearance":"1d01f377dd9b8c5c7b76a488c7b4adbd3da6055a-1600841859-0-1zd74c2a3az56d45067z127237b9-150"},
headers={"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36"},
verify=False,
)
except Exception as e:
print(f"Please manual access: https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}")
return e
# print(resp.text)
try:
j=json.loads(resp.text)
except Exception as e:
print(f"Please manual access: https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}")
return "Cloudflare DDos Detect!"
r=""
if j['response_code']!='0':
if len(j['resolutions'])>100:
j['resolutions']=j['resolutions'][:99]
for record in j['resolutions']:
r+=f"{record['last_resolved']}\t{record['domain']}\n"
return r[:-1]
else:
# print("Not Found!")
return "Not found any reverse information!"
def interactive_ip_reverse():
"""
interactive of ip reverse
"""
while True:
ip=input("Input IP: ").strip()
if not re.match(r"^(\d{1,3}\.){3}\d{1,3}$",ip):
print("\"%s\" is not a valid IP!"%ip)
print("-"*100)
continue
jobs=[
# gevent.spawn(ip_location, ip),
gevent.spawn(ip_reverse, ip),
]
gevent.joinall(jobs)
for job in jobs:
print(job.value)
print("-"*100)
def extract_host(url):
url=url.strip()
if (not url.startswith("http") and not url.startswith("//")):
url="https://"+url
# print(urllib.parse.urlparse(url)[1])
return urllib.parse.urlparse(url)[1]
my_resolver = dns.resolver.Resolver()
my_resolver.nameservers = ['8.8.8.8']
def getIP(url):
host=extract_host(url)
try:
google_record=[rdata.address for rdata in my_resolver.resolve(host, 'A')]
except Exception as e:
# print(f"\033[1;31m ERROR: {host} resolve error: {e.__class__.__name__}\033[0m")
google_record=[]
try:
socket_record=socket.gethostbyname_ex(host)[2]
except Exception as e:
# print(f"\033[1;31m ERROR: {host} resolve error: {e.__class__.__name__}\033[0m")
socket_record=[]
# print(google_record,socket_record)
socket_record.extend([x for x in google_record if x not in socket_record])
# print(google_record,socket_record)
if len(socket_record) == 0:
print(f"\033[1;31m ERROR: {host} resolve error\033[0m")
return host,socket_record
def sync_getIP(url_list):
r=[]
p=Pool(THREADS)
threads=[p.spawn(getIP, i) for i in url_list]
gevent.joinall(threads)
for item in threads:
r.append(item.value)
return r
def getTLD(file):
tld_list=set()
with open(file,"r") as f:
for x in f:
if x.strip()!="":
tld = tldextract.extract(x).registered_domain
if tld!="":
tld_list.add(tld)
for x in tld_list:
print(x)
def archive(domain_list):
sigleIP={}
info_pool=[]
for host,ip_list in sync_getIP(domain_list):
info_pool.append((host,ip_list))
if len(ip_list)==1:
sigleIP[ip_list[0]]=[]
# for ip in sigleIP:
# print("### "+ip)
# for info in info_pool:
# if ip in info[2]:
# print(info[1])
for info in info_pool:
for ip in info[1]:
| break
else:
print(info[0],info[1])
# print(sigleIP)
for i,v in sigleIP.items():
print(f"### {i}\t"+ip_location(i))
for t in v:
print(t)
print("### Nmap")
print(f"sudo nmap -Pn -sS -sV -T3 -p1-65535 --open {' '.join([ip for ip in sigleIP.keys()])}")
def sync_ip_location(ip_list):
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
for ip, located in executor.map(ip_location, ip_list):
print(ip, located)
THREADS=None
logging.basicConfig(format='%(message)s',
level=logging.INFO)
def main():
Useage = """
single
# ip # show local ip
# ip 8.8.8.8 # show location && provider
# ip www.baidu.com # show ip and location
multi
# ip -c 8.8.8.8/24 [--location] # show cidr
# ip -f iplist.txt [--format] [--archive] [--tld] [--location] # list all ip
# ip -dns www.baidu.com # check dns
# ip --interactive # show domain or ip location
# ip --history 8.8.8.8 # show history domain TODO
"""
argvlen = len(sys.argv)
if argvlen == 1:
os.system("ifconfig -l | xargs -n1 ipconfig getifaddr")
return
if argvlen == 2:
if REG_IP.match(sys.argv[1]):
print("\t".join(ip_location(sys.argv[1])))
elif REG_Domain.match(sys.argv[1]):
host, ip_list = getIP(sys.argv[1])
print(host)
for ip in ip_list:
print("\t".join(ip_location(ip)))
else:
print("please provider valid domain or ip")
return
parser = argparse.ArgumentParser()
# ip_parser=parser.add_argument_group("For IP list")
# # parser.description = 'Parse IP range like 192.168.2.3/26 10.0.4.1-10.0.4.9 10.0.0.1-254'
group = parser.add_mutually_exclusive_group()
# domain_parser=parser.add_argument_group("For domain list")
# reverse_parser=parser.add_argument_group("Reverse IP")
group.add_argument("-f", '--file', help="The file containing a list of IPs or domains")
group.add_argument("-c", '--cidr', help="Command line read a domains,IP or CIDR like 192.168.2.3/26,10.0.0.1-254,10.0.4.1-10.0.4.9")
group.add_argument("-dns", '--dns', help="Show dns record of domain")
parser.add_argument('--location', action="store_true", help="The location of IP")
# parser.add_argument('-t', "--threads", type=int, default=20, help="Number of threads(default 20)")
parser.add_argument('--format', action="store_true", help="Automatic analysis of messy file containing IPs")
parser.add_argument('--tld', action="store_true", help="Show TLD of domain")
# domain_parser.add_argument('--ip', action="store_true", help="show IP of domain")
# reverse_parser.add_argument('--interactive', action="store_true", help="open an interactive to get domain history of IP")
# domain_parser.add_argument('--archive', action="store_true", help="Archive IP and domain")
args = parser.parse_args()
if args.cidr:
ip_list = ipParse(args.cidr.strip(',').split(','))
if args.location:
sync_ip_location(ip_list)
else:
print("\n".join(ip_list))
logging.info(f'\033[0;36m共{len(ip_list)}个IP\033[0m')
return
if args.file:
if args.format:
format(args.file)
return
if args.tld:
getTLD(args.file)
return
if args.location:
with open(args.file, encoding="utf-8") as f:
ip_list = f.readlines()
# print(ip_list)
sync_ip_location(ip_list)
if args.dns:
dns_record(args.dns)
# if args.interactive:
# interactive_ip_reverse()
# if not args.file and not args.cidr:
# print("The argument requires the -f or -c")
# exit(1)
# if args.archive and not args.ip:
# print("The --archive argument requires the --ip")
# exit(1)
# if args.smart and not args.file:
# print("The --smart argument requires the -f or --file")
# exit(1)
# global THREADS
# THREADS=args.threads
# if args.ip:
# if args.file:
# if args.archive:
# # python3 iptool.py -f domain_list.txt --ip --archive
# with open(args.file, encoding="utf-8") as f:
# archive(f.readlines())
# else:
# # python3 iptool.py -f domain_list.txt --ip
# with open(args.file, encoding="utf-8") as f:
# for x,y in sync_getIP(f.readlines()):
# print(x,y)
# else:
# # python3 iptool.py -c www.baidu.com,www.qq.com --ip
# url_list=args.cidr.strip(',').split(',')
# for u in url_list:
# host,ip_list=getIP(u)
# print(host)
# for ip in ip_list:
# print(ip,ip_location(ip))
# elif args.file:
# if args.smart:
# # python3 iptool.py -f ip_or_CIDR_messy_list.txt
# smart(args.file)
# else:
# with open(args.file, encoding="utf-8") as f:
# ip_list=[i.strip() for i in f if i.strip() !='']
# # ip.sort()
# if args.location:
# # python3 iptool.py -f ip_or_CIDR_list.txt --location
# sync_ip_location(ipParse(ip_list)) # 异步处理
# else:
# for x in ipParse(ip_list):
# # python3 iptool.py -f ip_or_CIDR_list.txt
# print(x)
# elif args.cidr:
# ip_list=ipParse(args.cidr.strip(',').split(','))
# # python3 iptool.py -c 192.168.0.1/24 --location
# if args.location:
# sync_ip_location(ip_list) # 异步处理
# else:
# for x in ip_list:
# # python3 iptool.py -c 192.168.0.1/24
# print(x)
# else:
# print('Use -h to show help')
if __name__ == '__main__':
main()
| if ip in sigleIP.keys():
sigleIP[ip].append(info[0])
| conditional_block |
iptool.py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Author:Cyan
"""
Try copying the cluttered IP range contents below to the file and use:
python3 ipParse.py -f filename --smart
192.168.1.0 192.168.2.1/24,192.168.3.4-7,192.168.5.1-.192.168.5.34、192.176.34.6\26、192.187.34.2-67,192.111.111.111,192.168.5.1 - 192.168.5.34 192.168.5.1. -- 0192.168.5.34,192.168.5.1--192.168.5.34、1.2.4.5、192.168.5.5-9
192.168.5.1~192.168.5.34,192.168.5. 1 ~ 192.168.05.0 123.3.3.3. 192.168.5.1~56 192.168.7.1
"""
import requests
from gevent import monkey; monkey.patch_socket()
from gevent.pool import Pool
import gevent
import re
import argparse
import ipaddress
import json
import dns.resolver
import logging
import urllib
import socket
import sys
import os
import concurrent.futures
import tldextract
requests.packages.urllib3.disable_warnings()
REG_CD = re.compile(
r'(?P<cd>((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){3})(?P<c1>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))-(?P<c2>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))$')
REG_SUBNET = re.compile(
r'((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){3}(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\/([0-9]|[1-2][0-9]|3[0-2])$')
REG_IP = re.compile(
r'((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){3}(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))$')
REG_IPRANGE = re.compile(
r'(?P<bd>((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){2})(?P<c1>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))\.(?P<d1>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))-(?P=bd)(?P<c2>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))\.(?P<d2>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))$')
REG_Domain = re.compile(r'^([A-Za-z0-9]\.|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9]\.){1,3}[A-Za-z]{2,6}$')
def replSpace(rep):
return rep.group().replace(' ', '')
def replPoint(rep):
return rep.group().strip('.')
def replZero(rep):
return rep.group().lstrip('0')
# IPLIST = []
# 保存并去重
# def save(ip):
# if ip not in IPLIST:
# IPLIST.append(ip)
# 处理 192.168.1.1-192.168.2.128 形式
def ipRange(item):
r=[]
res = REG_IPRANGE.match(item)
bd = res.group('bd')
c1 = int(res.group('c1'))
c2 = int(res.group('c2'))
d1 = int(res.group('d1'))
d2 = int(res.group('d2'))
if c1 == c2:
if d1 < d2:
for i in range(d1, d2 + 1):
r.append(bd + str(c1) + '.' + str(i))
else:
print(f'\033[1;31m请检查你的IP:{item}\033[0m')
elif c1 < c2:
for c in range(c1, c2 + 1):
for d in range(d1, 255):
if c == c2 and d > d2:
break
else:
r.append(bd + str(c) + '.' + str(d))
d1 = 0
else:
print(f'\033[1;31m请检查你的IP:{item}\033[0m')
return r
# 处理 192.168.2.1-243 形式
def dealCd(item):
r=[]
res = REG_CD.match(item)
cd = res.group('cd')
c1 = int(res.group('c1'))
c2 = int(res.group('c2'))
if c1 < c2:
for i in range(c1, c2 + 1):
r.append(cd + str(i))
else:
print(f'\033[1;31m请检查你的IP:{item}\033[0m')
return r
# 处理 192.168.1.0/24 形式
def dealSubnet(item):
r=[]
if int(re.match(r'.*/(\d+)',item).group(1))<=16:
print(f'too big range:{item}')
exit()
net = ipaddress.ip_network(item, strict=False)
for ip in net.hosts():
r.append(str(ip))
return r
# 将不同形式的 IP 交给不同的方法处理
def ipParse(iplist):
IPLIST=[]
for item in iplist:
# print(item)
if REG_IPRANGE.match(item): # 192.168.1.1-192.168.2.128
IPLIST.extend(ipRange(item))
elif REG_CD.match(item): # 192.168.2.1-243
IPLIST.extend(dealCd(item))
elif REG_SUBNET.match(item): # 192.168.2.1/24
IPLIST.extend(dealSubnet(item))
elif REG_IP.match(item):
IPLIST.append(item)
else:
logging.info(f'\033[1;31m请检查你的IP:{item}\033[0m')
r = list(set(IPLIST))
r.sort(key=IPLIST.index)
return r
# 处理无格式 IP 范围文件
def format(ipfile):
with open(ipfile, encoding="utf-8") as f:
content = f.read()
logging.info("-" * 80)
# 192.168.1.1 -- 254 将不规范的分割符(如: ~~ ~ -- -)全部替换成-,\替换成/
s1 = re.sub(r'\s*[-~]+\s*', '-', content).replace('\\','/').replace('"','').replace("'",'')
# 123. 34 .123 . 123 去掉之间多余的空格 -- 如果出错,请注释此行
s1 = re.sub(r'(\d+\s*\.\s*){3}\d+', replSpace, s1)
# .123.123.123.123 去掉左右两边误写的. -- 如果出错,请注释此行
s1 = re.sub(r'\.?(\d+\.*){3}\d+\.?', replPoint, s1)
s1 = re.sub(r'\d{2,}', replZero, s1) # 去掉 123.0.02.1 中 0 开头的多位数
s1 = re.split(r'[\n\s,,、;;]+', s1) # 以这些符号分隔成列表并去重
s1 = list({x for x in s1 if x !=''})
s1.sort()
logging.info(s1)
logging.info("-" * 80)
for x in ipParse(s1):
print(x)
def dns_record(domain):
green = "\x1b[1;32m"
cyan = "\x1b[1;36m"
clear = "\x1b[0m"
record_type = ["A","AAAA","CNAME","NS","MX","TXT","SOA","PTR","SPF","SRV","AXFR","IXFR",
"MAILB","URI","HIP","A6","AFSDB","APL","CAA","CDNSKEY","CDS",
"CSYNC","DHCID","DLV","DNAME","DNSKEY","DS","EUI48","EUI64",
"MB","MD","MF","MG","MINFO","MR","NAPTR","NINFO","NSAP","NSEC",
"NSEC3","NSEC3PARAM","NULL","NXT","OPENPGPKEY","OPT","PX","RP",
"RRSIG","RT","SIG","SSHFP","TA","TKEY","TLSA","TSIG",
"GPOS","HINFO","IPSECKEY","ISDN","KEY","KX","LOC","MAILA",
"UNSPEC","WKS","X25","CERT","ATMA","DOA","EID","GID","L32",
"L64","LP","NB","NBSTAT","NID","NIMLOC","NSAP-PTR","RKEY",
"SINK","SMIMEA","SVCB","TALINK","UID","UINFO","ZONEMD","HTTPS"]
for rt in record_type:
try:
r = dns.resolver.resolve(domain, rt)
except Exception as e:
print(rt + "\t" + str(e))
# print(e)
else:
# print(rt)
for v in r:
print(
green + rt + clear + "\t" +
cyan + str(v) + clear)
def ip_location(ip):
# try:
# requests.get(f"https://www.sogou.com/reventondc/external?key={ip}&type=2&charset=utf8&objid=20099801&userarea=d123&uuid=6a3e3dd2-d0cb-440c-ac45-a62125dee188&p_ip=180.101.49.12&callback=sogouCallback1620961932681")
# except Exception as e:
# pass
# else:
# try:
# requests.get("https://open.onebox.so.com/dataApi?callback=jQuery18301409029392462775_1620962038263&type=ip&src=onebox&tpl=0&num=1&query=ip&ip=180.101.49.12&url=ip&_=1620962046570")
# except Exception as e:
# pass
# try:
# requests.get("https://apiquark.sm.cn/rest?method=sc.number_ip_new&request_sc=shortcut_searcher::number_ip_new&callback=sc_ip_search_callback&q=103.235.46.39&callback=jsonp2")
# except Exception as e:
# pass
# try:
# requests.get("https://so.toutiao.com/2/wap/search/extra/ip_query?ip=103.235.46.39")
# except Exception:
# pass
ip=ip.strip()
# print(ip)
try:
resp=requests.get(f"https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?query=f{ip}&co=&resource_id=5809&t=1600743020566&ie=utf8&oe=gbk&cb=op_aladdin_callback&format=json&tn=baidu&cb=jQuery110208008102506768224_1600742984815&_=1600742984816")
# print(resp.text)
except Exception as e:
# print(e)
return ip, "Error: "+str(e)
j = json.loads(resp.text[42:-1])
# print(j)
if len(j['Result'])!=0:
# print(j['Result'][0])
return ip, j['Result'][0]['DisplayData']['resultData']['tplData']['location']
else:
# print(f"INFO: {ip} {j}")
# print(j['Result'])
return ip, j['Result']
def ip_reverse(ip):
# https://www.threatcrowd.org/searchApi/v2/ip/report/?ip=
try:
resp=requests.get(f"https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}&__cf_chl_jschl_tk__=b23e1ebddba7a8afcec8002ebe8161982a307678-1600841853-0-AdBviI4eBSvsCtV19ogQiOgQh8BZDLUSjLLWlPxcUmToHHMVBUzRMOttXDt0rU_oBQ9sjEco0JVg1HpkyolfayL92SM2O7_7QPM67RLnKw6bB2HLrDSbAe1isBru5CZQMW37d1m5MI-3maLEyCwpAx5M5n3gjSTPATv6XUK6GYvSdIIflKHKr8NI1wjWqe6YHdsdGshphzA5RP9IINVQ_q3mRfxz7YbZiW49E3sduJLtQjiFB1IaGapMdW_HMt_qbw_jJo4S7j_w-ZnEVKTCBpwR5LVACjy3p2rv_lTL7Uw1zW1J84fJ--sTRfKa1iZlN1-eENeG293SoP0IIGM0l-c",
timeout=10,
cookies={"__cfduid":"d1f527bf2b91e30ae3e5edc6392e873091600248379","cf_clearance":"1d01f377dd9b8c5c7b76a488c7b4adbd3da6055a-1600841859-0-1zd74c2a3az56d45067z127237b9-150"},
headers={"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36"},
verify=False,
)
except Exception as e:
print(f"Please manual access: https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}")
return e
# print(resp.text)
try:
j=json.loads(resp.text)
except Exception as e:
print(f"Please manual access: https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}")
return "Cloudflare DDos Detect!"
r=""
if j['response_code']!='0':
if len(j['resolutions'])>100:
j['resolutions']=j['resolutions'][:99]
for record in j['resolutions']:
r+=f"{record['last_resolved']}\t{record['domain']}\n"
return r[:-1]
else:
# print("Not Found!")
return "Not found any reverse information!"
def interactive_ip_reverse():
"""
interactive of ip reverse
"""
while True:
ip=input("Input IP: ").strip()
if not re.match(r"^(\d{1,3}\.){3}\d{1,3}$",ip):
print("\"%s\" is not a valid IP!"%ip)
print("-"*100)
continue
jobs=[
| pawn(ip_location, ip),
gevent.spawn(ip_reverse, ip),
]
gevent.joinall(jobs)
for job in jobs:
print(job.value)
print("-"*100)
def extract_host(url):
url=url.strip()
if (not url.startswith("http") and not url.startswith("//")):
url="https://"+url
# print(urllib.parse.urlparse(url)[1])
return urllib.parse.urlparse(url)[1]
my_resolver = dns.resolver.Resolver()
my_resolver.nameservers = ['8.8.8.8']
def getIP(url):
host=extract_host(url)
try:
google_record=[rdata.address for rdata in my_resolver.resolve(host, 'A')]
except Exception as e:
# print(f"\033[1;31m ERROR: {host} resolve error: {e.__class__.__name__}\033[0m")
google_record=[]
try:
socket_record=socket.gethostbyname_ex(host)[2]
except Exception as e:
# print(f"\033[1;31m ERROR: {host} resolve error: {e.__class__.__name__}\033[0m")
socket_record=[]
# print(google_record,socket_record)
socket_record.extend([x for x in google_record if x not in socket_record])
# print(google_record,socket_record)
if len(socket_record) == 0:
print(f"\033[1;31m ERROR: {host} resolve error\033[0m")
return host,socket_record
def sync_getIP(url_list):
r=[]
p=Pool(THREADS)
threads=[p.spawn(getIP, i) for i in url_list]
gevent.joinall(threads)
for item in threads:
r.append(item.value)
return r
def getTLD(file):
tld_list=set()
with open(file,"r") as f:
for x in f:
if x.strip()!="":
tld = tldextract.extract(x).registered_domain
if tld!="":
tld_list.add(tld)
for x in tld_list:
print(x)
def archive(domain_list):
sigleIP={}
info_pool=[]
for host,ip_list in sync_getIP(domain_list):
info_pool.append((host,ip_list))
if len(ip_list)==1:
sigleIP[ip_list[0]]=[]
# for ip in sigleIP:
# print("### "+ip)
# for info in info_pool:
# if ip in info[2]:
# print(info[1])
for info in info_pool:
for ip in info[1]:
if ip in sigleIP.keys():
sigleIP[ip].append(info[0])
break
else:
print(info[0],info[1])
# print(sigleIP)
for i,v in sigleIP.items():
print(f"### {i}\t"+ip_location(i))
for t in v:
print(t)
print("### Nmap")
print(f"sudo nmap -Pn -sS -sV -T3 -p1-65535 --open {' '.join([ip for ip in sigleIP.keys()])}")
def sync_ip_location(ip_list):
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
for ip, located in executor.map(ip_location, ip_list):
print(ip, located)
THREADS=None
logging.basicConfig(format='%(message)s',
level=logging.INFO)
def main():
Useage = """
single
# ip # show local ip
# ip 8.8.8.8 # show location && provider
# ip www.baidu.com # show ip and location
multi
# ip -c 8.8.8.8/24 [--location] # show cidr
# ip -f iplist.txt [--format] [--archive] [--tld] [--location] # list all ip
# ip -dns www.baidu.com # check dns
# ip --interactive # show domain or ip location
# ip --history 8.8.8.8 # show history domain TODO
"""
argvlen = len(sys.argv)
if argvlen == 1:
os.system("ifconfig -l | xargs -n1 ipconfig getifaddr")
return
if argvlen == 2:
if REG_IP.match(sys.argv[1]):
print("\t".join(ip_location(sys.argv[1])))
elif REG_Domain.match(sys.argv[1]):
host, ip_list = getIP(sys.argv[1])
print(host)
for ip in ip_list:
print("\t".join(ip_location(ip)))
else:
print("please provider valid domain or ip")
return
parser = argparse.ArgumentParser()
# ip_parser=parser.add_argument_group("For IP list")
# # parser.description = 'Parse IP range like 192.168.2.3/26 10.0.4.1-10.0.4.9 10.0.0.1-254'
group = parser.add_mutually_exclusive_group()
# domain_parser=parser.add_argument_group("For domain list")
# reverse_parser=parser.add_argument_group("Reverse IP")
group.add_argument("-f", '--file', help="The file containing a list of IPs or domains")
group.add_argument("-c", '--cidr', help="Command line read a domains,IP or CIDR like 192.168.2.3/26,10.0.0.1-254,10.0.4.1-10.0.4.9")
group.add_argument("-dns", '--dns', help="Show dns record of domain")
parser.add_argument('--location', action="store_true", help="The location of IP")
# parser.add_argument('-t', "--threads", type=int, default=20, help="Number of threads(default 20)")
parser.add_argument('--format', action="store_true", help="Automatic analysis of messy file containing IPs")
parser.add_argument('--tld', action="store_true", help="Show TLD of domain")
# domain_parser.add_argument('--ip', action="store_true", help="show IP of domain")
# reverse_parser.add_argument('--interactive', action="store_true", help="open an interactive to get domain history of IP")
# domain_parser.add_argument('--archive', action="store_true", help="Archive IP and domain")
args = parser.parse_args()
if args.cidr:
ip_list = ipParse(args.cidr.strip(',').split(','))
if args.location:
sync_ip_location(ip_list)
else:
print("\n".join(ip_list))
logging.info(f'\033[0;36m共{len(ip_list)}个IP\033[0m')
return
if args.file:
if args.format:
format(args.file)
return
if args.tld:
getTLD(args.file)
return
if args.location:
with open(args.file, encoding="utf-8") as f:
ip_list = f.readlines()
# print(ip_list)
sync_ip_location(ip_list)
if args.dns:
dns_record(args.dns)
# if args.interactive:
# interactive_ip_reverse()
# if not args.file and not args.cidr:
# print("The argument requires the -f or -c")
# exit(1)
# if args.archive and not args.ip:
# print("The --archive argument requires the --ip")
# exit(1)
# if args.smart and not args.file:
# print("The --smart argument requires the -f or --file")
# exit(1)
# global THREADS
# THREADS=args.threads
# if args.ip:
# if args.file:
# if args.archive:
# # python3 iptool.py -f domain_list.txt --ip --archive
# with open(args.file, encoding="utf-8") as f:
# archive(f.readlines())
# else:
# # python3 iptool.py -f domain_list.txt --ip
# with open(args.file, encoding="utf-8") as f:
# for x,y in sync_getIP(f.readlines()):
# print(x,y)
# else:
# # python3 iptool.py -c www.baidu.com,www.qq.com --ip
# url_list=args.cidr.strip(',').split(',')
# for u in url_list:
# host,ip_list=getIP(u)
# print(host)
# for ip in ip_list:
# print(ip,ip_location(ip))
# elif args.file:
# if args.smart:
# # python3 iptool.py -f ip_or_CIDR_messy_list.txt
# smart(args.file)
# else:
# with open(args.file, encoding="utf-8") as f:
# ip_list=[i.strip() for i in f if i.strip() !='']
# # ip.sort()
# if args.location:
# # python3 iptool.py -f ip_or_CIDR_list.txt --location
# sync_ip_location(ipParse(ip_list)) # 异步处理
# else:
# for x in ipParse(ip_list):
# # python3 iptool.py -f ip_or_CIDR_list.txt
# print(x)
# elif args.cidr:
# ip_list=ipParse(args.cidr.strip(',').split(','))
# # python3 iptool.py -c 192.168.0.1/24 --location
# if args.location:
# sync_ip_location(ip_list) # 异步处理
# else:
# for x in ip_list:
# # python3 iptool.py -c 192.168.0.1/24
# print(x)
# else:
# print('Use -h to show help')
if __name__ == '__main__':
main()
| # gevent.s | identifier_name |
iptool.py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Author:Cyan
"""
Try copying the cluttered IP range contents below to the file and use:
python3 ipParse.py -f filename --smart
192.168.1.0 192.168.2.1/24,192.168.3.4-7,192.168.5.1-.192.168.5.34、192.176.34.6\26、192.187.34.2-67,192.111.111.111,192.168.5.1 - 192.168.5.34 192.168.5.1. -- 0192.168.5.34,192.168.5.1--192.168.5.34、1.2.4.5、192.168.5.5-9
192.168.5.1~192.168.5.34,192.168.5. 1 ~ 192.168.05.0 123.3.3.3. 192.168.5.1~56 192.168.7.1
"""
import requests
from gevent import monkey; monkey.patch_socket()
from gevent.pool import Pool
import gevent
import re
import argparse
import ipaddress
import json
import dns.resolver
import logging
import urllib
import socket
import sys
import os
import concurrent.futures
import tldextract
requests.packages.urllib3.disable_warnings()
REG_CD = re.compile(
r'(?P<cd>((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){3})(?P<c1>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))-(?P<c2>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))$')
REG_SUBNET = re.compile(
r'((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){3}(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\/([0-9]|[1-2][0-9]|3[0-2])$')
REG_IP = re.compile(
r'((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){3}(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))$')
REG_IPRANGE = re.compile(
r'(?P<bd>((([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5]))\.){2})(?P<c1>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))\.(?P<d1>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))-(?P=bd)(?P<c2>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))\.(?P<d2>(([1-9]?\d)|(1\d\d)|(2[0-4]\d)|(25[0-5])))$')
REG_Domain = re.compile(r'^([A-Za-z0-9]\.|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9]\.){1,3}[A-Za-z]{2,6}$')
def replSpace(rep):
return rep.group().replace(' ', '')
def replPoint(rep):
return rep.group().strip('.')
def replZero(rep):
return rep.group().lstrip('0')
# IPLIST = []
# 保存并去重
# def save(ip):
# if ip not in IPLIST:
# IPLIST.append(ip)
# 处理 192.168.1.1-192.168.2.128 形式
def ipRange(item):
r=[]
res = REG_IPRANGE.match(item)
bd = res.group('bd')
c1 = int(res.group('c1'))
c2 = int(res.group('c2'))
d1 = int(res.group('d1'))
d2 = int(res.group('d2'))
if c1 == c2:
if d1 < d2:
for i in range(d1, d2 + 1):
r.append(bd + str(c1) + '.' + str(i))
else:
print(f'\033[1;31m请检查你的IP:{item}\033[0m')
elif c1 < c2:
for c in range(c1, c2 + 1):
for d in range(d1, 255):
if c == c2 and d > d2:
break
else:
r.append(bd + str(c) + '.' + str(d))
d1 = 0
else:
print(f'\033[1;31m请检查你的IP:{item}\033[0m')
return r
# 处理 192.168.2.1-243 形式
def dealCd(item):
r=[]
res = REG_CD.match(item)
cd = res.group('cd')
c1 = int(res.group('c1'))
c2 = int(res.group('c2'))
if c1 < c2:
for i in range(c1, c2 + 1):
r.append(cd + str(i))
else:
print(f'\033[1;31m请检查你的IP:{item}\033[0m')
return r
# 处理 192.168.1.0/24 形式
def dealSubnet(item):
r=[]
if int(re.match(r'.*/(\d+)',item).group(1))<=16:
print(f'too big range:{item}')
exit()
net = ipaddress.ip_network(item, strict=False)
for ip in net.hosts():
r.append(str(ip))
return r
# 将不同形式的 IP 交给不同的方法处理
def ipParse(iplist):
IPLIST=[]
for item in iplist:
# print(item)
if REG_IPRANGE.match(item): # 192.168.1.1-192.168.2.128
IPLIST.extend(ipRange(item))
elif REG_CD.match(item): # 192.168.2.1-243
IPLIST.extend(dealCd(item))
elif REG_SUBNET.match(item): # 192.168.2.1/24
IPLIST.extend(dealSubnet(item))
elif REG_IP.match(item):
IPLIST.append(item)
else:
logging.info(f'\033[1;31m请检查你的IP:{item}\033[0m')
r = list(set(IPLIST))
r.sort(key=IPLIST.index)
return r
# 处理无格式 IP 范围文件
def format(ipfile):
with open(ipfile, encoding="utf-8") as f:
content = f.read()
logging.info("-" * 80)
# 192.168.1.1 -- 254 将不规范的分割符(如: ~~ ~ -- -)全部替换成-,\替换成/
s1 = re.sub(r'\s*[-~]+\s*', '-', content).replace('\\','/').replace('"','').replace("'",'')
# 123. 34 .123 . 123 去掉之间多余的空格 -- 如果出错,请注释此行
s1 = re.sub(r'(\d+\s*\.\s*){3}\d+', replSpace, s1)
# .123.123.123.123 去掉左右两边误写的. -- 如果出错,请注释此行
s1 = re.sub(r'\.?(\d+\.*){3}\d+\.?', replPoint, s1)
s1 = re.sub(r'\d{2,}', replZero, s1) # 去掉 123.0.02.1 中 0 开头的多位数
s1 = re.split(r'[\n\s,,、;;]+', s1) # 以这些符号分隔成列表并去重
s1 = list({x for x in s1 if x !=''})
s1.sort()
logging.info(s1)
logging.info("-" * 80)
for x in ipParse(s1):
print(x)
def dns_record(domain):
green = "\x1b[1;32m"
cyan = "\x1b[1;36m"
clear = "\x1b[0m"
record_type = ["A","AAAA","CNAME","NS","MX","TXT","SOA","PTR","SPF","SRV","AXFR","IXFR",
"MAILB","URI","HIP","A6","AFSDB","APL","CAA","CDNSKEY","CDS",
"CSYNC","DHCID","DLV","DNAME","DNSKEY","DS","EUI48","EUI64",
"MB","MD","MF","MG","MINFO","MR","NAPTR","NINFO","NSAP","NSEC",
"NSEC3","NSEC3PARAM","NULL","NXT","OPENPGPKEY","OPT","PX","RP",
"RRSIG","RT","SIG","SSHFP","TA","TKEY","TLSA","TSIG",
"GPOS","HINFO","IPSECKEY","ISDN","KEY","KX","LOC","MAILA",
"UNSPEC","WKS","X25","CERT","ATMA","DOA","EID","GID","L32",
"L64","LP","NB","NBSTAT","NID","NIMLOC","NSAP-PTR","RKEY",
"SINK","SMIMEA","SVCB","TALINK","UID","UINFO","ZONEMD","HTTPS"]
for rt in record_type:
try:
r = dns.resolver.resolve(domain, rt)
except Exception as e:
print(rt + "\t" + str(e))
# print(e)
else:
# print(rt)
for v in r:
print(
green + rt + clear + "\t" +
cyan + str(v) + clear)
def ip_location(ip):
# try:
# requests.get(f"https://www.sogou.com/reventondc/external?key={ip}&type=2&charset=utf8&objid=20099801&userarea=d123&uuid=6a3e3dd2-d0cb-440c-ac45-a62125dee188&p_ip=180.101.49.12&callback=sogouCallback1620961932681")
# except Exception as e:
# pass
# else:
# try:
# requests.get("https://open.onebox.so.com/dataApi?callback=jQuery18301409029392462775_1620962038263&type=ip&src=onebox&tpl=0&num=1&query=ip&ip=180.101.49.12&url=ip&_=1620962046570")
# except Exception as e:
# pass
# try:
# requests.get("https://apiquark.sm.cn/rest?method=sc.number_ip_new&request_sc=shortcut_searcher::number_ip_new&callback=sc_ip_search_callback&q=103.235.46.39&callback=jsonp2")
# except Exception as e:
# pass
# try:
# requests.get("https://so.toutiao.com/2/wap/search/extra/ip_query?ip=103.235.46.39")
# except Exception:
# pass
ip=ip.strip()
# print(ip)
try:
resp=requests.get(f"https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?query=f{ip}&co=&resource_id=5809&t=1600743020566&ie=utf8&oe=gbk&cb=op_aladdin_callback&format=json&tn=baidu&cb=jQuery110208008102506768224_1600742984815&_=1600742984816")
# print(resp.text)
except Exception as e:
# print(e)
return ip, "Error: "+str(e)
j = json.loads(resp.text[42:-1])
# print(j)
if len(j['Result'])!=0:
# print(j['Result'][0])
return ip, j['Result'][0]['DisplayData']['resultData']['tplData']['location']
else:
# print(f"INFO: {ip} {j}")
# print(j['Result'])
return ip, j['Result']
def ip_reverse(ip):
# https://www.threatcrowd.org/searchApi/v2/ip/report/?ip=
try:
resp=requests.get(f"https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}&__cf_chl_jschl_tk__=b23e1ebddba7a8afcec8002ebe8161982a307678-1600841853-0-AdBviI4eBSvsCtV19ogQiOgQh8BZDLUSjLLWlPxcUmToHHMVBUzRMOttXDt0rU_oBQ9sjEco0JVg1HpkyolfayL92SM2O7_7QPM67RLnKw6bB2HLrDSbAe1isBru5CZQMW37d1m5MI-3maLEyCwpAx5M5n3gjSTPATv6XUK6GYvSdIIflKHKr8NI1wjWqe6YHdsdGshphzA5RP9IINVQ_q3mRfxz7YbZiW49E3sduJLtQjiFB1IaGapMdW_HMt_qbw_jJo4S7j_w-ZnEVKTCBpwR5LVACjy3p2rv_lTL7Uw1zW1J84fJ--sTRfKa1iZlN1-eENeG293SoP0IIGM0l-c",
timeout=10,
cookies={"__cfduid":"d1f527bf2b91e30ae3e5edc6392e873091600248379","cf_clearance":"1d01f377dd9b8c5c7b76a488c7b4adbd3da6055a-1600841859-0-1zd74c2a3az56d45067z127237b9-150"},
headers={"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36"},
verify=False,
)
except Exception as e:
print(f"Please manual access: https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}")
return e
# print(resp.text)
try:
j=json.loads(resp.text)
except Exception as e:
print(f"Please manual access: https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}")
return "Cloudflare DDos Detect!"
r=""
if j['response_code']!='0':
if len(j['resolutions'])>100:
j['resolutions']=j['resolutions'][:99]
for record in j['resolutions']:
r+=f"{record['last_resolved']}\t{record['domain']}\n"
return r[:-1]
else:
# print("Not Found!")
return "Not found any reverse information!"
def interactive_ip_reverse():
"""
interactive of ip reverse
"""
while True:
ip=input("Input IP: ").strip()
if not re.match(r"^(\d{1,3}\.){3}\d{1,3}$",ip):
print("\"%s\" is not a valid IP!"%ip)
print("-"*100)
continue
jobs=[
# gevent.spawn(ip_location, ip),
gevent.spawn(ip_reverse, ip),
]
gevent.joinall(jobs)
for job in jobs:
print(job.value)
print("-"*100)
def extract_host(url):
url=url.strip()
if (not url.startswith("http") and not url.startswith("//")):
url="https://"+url
# print(urllib.parse.urlparse(url)[1])
return urllib.parse.urlparse(url)[1]
my_resolver = dns.resolver.Resolver()
my_resolver.nameservers = ['8.8.8.8']
def getIP(url):
host=extract_host(url)
try:
google_record=[rdata.address for rdata in my_resolver.resolve(host, 'A')]
except Exception as e:
# print(f"\033[1;31m ERROR: {host} resolve error: {e.__class__.__name__}\033[0m")
google_record=[]
try:
socket_record=socket.gethostbyname_ex(host)[2]
except Exception as e:
# print(f"\033[1;31m ERROR: {host} resolve error: {e.__class__.__name__}\033[0m")
socket_record=[]
# print(google_record,socket_record)
socket_record.extend([x for x in google_record if x not in socket_record])
# print(google_record,socket_record)
if len(socket_record) == 0:
print(f"\033[1;31m ERROR: {host} resolve error\033[0m")
return host,socket_record
def sync_getIP(url_list):
r=[]
p=Pool(THREADS)
threads=[p.spawn(getIP, i) for i in url_list]
gevent.joinall(threads)
for item in threads:
r.append(item.value)
return r
def getTLD(file):
tld_list=set()
with open(file,"r") as f:
for x in f:
if x.strip()!="":
tld = tldextract.extract(x).registered_domain
if tld!="":
tld_list.add(tld)
for x in tld_list:
print(x)
def archive(domain_list):
sigleIP={}
info_pool=[]
for host,ip_list in sync_getIP(domain_list):
info_pool.append((host,ip_list))
if len(ip_list)==1:
sigleIP[ip_list[0]]=[]
# for ip in sigleIP:
# print("### "+ip)
# for info in info_pool:
# if ip in info[2]:
| def main():
Useage = """
single
# ip # show local ip
# ip 8.8.8.8 # show location && provider
# ip www.baidu.com # show ip and location
multi
# ip -c 8.8.8.8/24 [--location] # show cidr
# ip -f iplist.txt [--format] [--archive] [--tld] [--location] # list all ip
# ip -dns www.baidu.com # check dns
# ip --interactive # show domain or ip location
# ip --history 8.8.8.8 # show history domain TODO
"""
argvlen = len(sys.argv)
if argvlen == 1:
os.system("ifconfig -l | xargs -n1 ipconfig getifaddr")
return
if argvlen == 2:
if REG_IP.match(sys.argv[1]):
print("\t".join(ip_location(sys.argv[1])))
elif REG_Domain.match(sys.argv[1]):
host, ip_list = getIP(sys.argv[1])
print(host)
for ip in ip_list:
print("\t".join(ip_location(ip)))
else:
print("please provider valid domain or ip")
return
parser = argparse.ArgumentParser()
# ip_parser=parser.add_argument_group("For IP list")
# # parser.description = 'Parse IP range like 192.168.2.3/26 10.0.4.1-10.0.4.9 10.0.0.1-254'
group = parser.add_mutually_exclusive_group()
# domain_parser=parser.add_argument_group("For domain list")
# reverse_parser=parser.add_argument_group("Reverse IP")
group.add_argument("-f", '--file', help="The file containing a list of IPs or domains")
group.add_argument("-c", '--cidr', help="Command line read a domains,IP or CIDR like 192.168.2.3/26,10.0.0.1-254,10.0.4.1-10.0.4.9")
group.add_argument("-dns", '--dns', help="Show dns record of domain")
parser.add_argument('--location', action="store_true", help="The location of IP")
# parser.add_argument('-t', "--threads", type=int, default=20, help="Number of threads(default 20)")
parser.add_argument('--format', action="store_true", help="Automatic analysis of messy file containing IPs")
parser.add_argument('--tld', action="store_true", help="Show TLD of domain")
# domain_parser.add_argument('--ip', action="store_true", help="show IP of domain")
# reverse_parser.add_argument('--interactive', action="store_true", help="open an interactive to get domain history of IP")
# domain_parser.add_argument('--archive', action="store_true", help="Archive IP and domain")
args = parser.parse_args()
if args.cidr:
ip_list = ipParse(args.cidr.strip(',').split(','))
if args.location:
sync_ip_location(ip_list)
else:
print("\n".join(ip_list))
logging.info(f'\033[0;36m共{len(ip_list)}个IP\033[0m')
return
if args.file:
if args.format:
format(args.file)
return
if args.tld:
getTLD(args.file)
return
if args.location:
with open(args.file, encoding="utf-8") as f:
ip_list = f.readlines()
# print(ip_list)
sync_ip_location(ip_list)
if args.dns:
dns_record(args.dns)
# if args.interactive:
# interactive_ip_reverse()
# if not args.file and not args.cidr:
# print("The argument requires the -f or -c")
# exit(1)
# if args.archive and not args.ip:
# print("The --archive argument requires the --ip")
# exit(1)
# if args.smart and not args.file:
# print("The --smart argument requires the -f or --file")
# exit(1)
# global THREADS
# THREADS=args.threads
# if args.ip:
# if args.file:
# if args.archive:
# # python3 iptool.py -f domain_list.txt --ip --archive
# with open(args.file, encoding="utf-8") as f:
# archive(f.readlines())
# else:
# # python3 iptool.py -f domain_list.txt --ip
# with open(args.file, encoding="utf-8") as f:
# for x,y in sync_getIP(f.readlines()):
# print(x,y)
# else:
# # python3 iptool.py -c www.baidu.com,www.qq.com --ip
# url_list=args.cidr.strip(',').split(',')
# for u in url_list:
# host,ip_list=getIP(u)
# print(host)
# for ip in ip_list:
# print(ip,ip_location(ip))
# elif args.file:
# if args.smart:
# # python3 iptool.py -f ip_or_CIDR_messy_list.txt
# smart(args.file)
# else:
# with open(args.file, encoding="utf-8") as f:
# ip_list=[i.strip() for i in f if i.strip() !='']
# # ip.sort()
# if args.location:
# # python3 iptool.py -f ip_or_CIDR_list.txt --location
# sync_ip_location(ipParse(ip_list)) # 异步处理
# else:
# for x in ipParse(ip_list):
# # python3 iptool.py -f ip_or_CIDR_list.txt
# print(x)
# elif args.cidr:
# ip_list=ipParse(args.cidr.strip(',').split(','))
# # python3 iptool.py -c 192.168.0.1/24 --location
# if args.location:
# sync_ip_location(ip_list) # 异步处理
# else:
# for x in ip_list:
# # python3 iptool.py -c 192.168.0.1/24
# print(x)
# else:
# print('Use -h to show help')
if __name__ == '__main__':
main()
| # print(info[1])
for info in info_pool:
for ip in info[1]:
if ip in sigleIP.keys():
sigleIP[ip].append(info[0])
break
else:
print(info[0],info[1])
# print(sigleIP)
for i,v in sigleIP.items():
print(f"### {i}\t"+ip_location(i))
for t in v:
print(t)
print("### Nmap")
print(f"sudo nmap -Pn -sS -sV -T3 -p1-65535 --open {' '.join([ip for ip in sigleIP.keys()])}")
def sync_ip_location(ip_list):
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
for ip, located in executor.map(ip_location, ip_list):
print(ip, located)
THREADS=None
logging.basicConfig(format='%(message)s',
level=logging.INFO)
| identifier_body |
electron.js | import React, { Component } from 'react';
import Prism from 'prismjs';
import {
CodeBlock,
Section,
Link,
ComponentDescription,
SideScrollMenu,
PageTitle,
ComponentSubtitle,
CodeInline,
Helmet,
} from '@components';
const sections = [
{ name: 'Install' },
{ name: 'Setup' },
{ name: 'Usage' },
{ name: 'Examples' },
];
const dependencies = `npm i material-bread electron react react-dom react-native-web react-native-svg modal-enhanced-react-native-web @babel/core @babel/plugin-proposal-class-properties @babel/plugin-proposal-object-rest-spread @babel/plugin-transform-flow-strip-types @babel/plugin-transform-regenerator @babel/plugin-transform-runtime @babel/plugin-proposal-export-default-from css-loader file-loader style-loader webpack webpack-cli webpack-dev-server
`;
const code = `import React, { Component } from "react";
import Root from "./Root";
import { BreadProvider } from "material-bread";
export default class App extends Component {
render() {
return (
<BreadProvider>
<Root />
</BreadProvider>
);
}
}`;
const html = `<!DOCTYPE html>
<html>
<head>
<title>Material Bread Electron</title>
<meta charset="utf-8" />
</head>
<body>
<div id="app"></div>
<script
type="text/javascript"
src="http://localhost:7000/bundle.js"
></script>
</body>
</html>
`;
const mainJs = `const { app, BrowserWindow } = require("electron");
let win;
const createWindow = () => {
win = new BrowserWindow({
width: 800,
minWidth: 500,
height: 620,
minHeight: 500,
center: true,
show: false
});
win.loadURL(\`file://${__dirname}/index.html\`);
win.on("closed", () => {
win = null;
});
win.once("ready-to-show", () => {
win.show();
});
};
app.on("ready", createWindow);
app.on("window-all-closed", () => {
app.quit();
});
app.on("activate", () => {
if (win === null) {
createWindow();
}
});
`;
const rendererJs = `import React from "react";
import { render, unmountComponentAtNode } from "react-dom";
const root = document.getElementById("app");
const renderApp = () => {
const App = require("./App").default;
if (root) render(<App />, root);
};
renderApp();
if (module && module.hot != null && typeof module.hot.accept === "function") {
module.hot.accept(["./App"], () =>
setImmediate(() => {
unmountComponentAtNode(root);
renderApp();
})
);
}`;
const webpack = `const path = require("path");
module.exports = {
mode: "development",
entry: {
app: path.join(__dirname, "src", "renderer.js")
},
node: {
__filename: true,
__dirname: true
},
module: {
rules: [
{
test: /\.(js|jsx)$/,
exclude: /node_modules\/(?!(material-bread|react-native-vector-icons)\/).*/,
use: {
loader: "babel-loader",
options: {
presets: ["@babel/preset-env", "@babel/preset-react"],
plugins: [
"@babel/plugin-transform-flow-strip-types",
"@babel/plugin-proposal-class-properties",
"@babel/plugin-proposal-object-rest-spread",
"@babel/plugin-transform-runtime",
"@babel/plugin-transform-regenerator",
"@babel/plugin-proposal-export-default-from"
]
}
}
},
{
test: /\.html$/,
use: [
{
loader: "html-loader"
}
]
},
{
test: /\.css$/,
use: ["style-loader", "css-loader"]
},
{
test: /\.(png|woff|woff2|eot|ttf|svg)$/,
loader: "file-loader?limit=100000"
}
]
},
resolve: {
alias: {
"react-native": "react-native-web"
}
},
output: {
filename: "bundle.js"
},
target: "electron-renderer",
devServer: {
contentBase: path.join(__dirname, "src"),
port: 7000
}
};`;
const appjs = `import React, { Component } from "react";
import { View } from "react-native";
import { Fab } from "material-bread";
const materialFont = new FontFace(
"MaterialIcons",
"url(../node_modules/react-native-vector-icons/Fonts/MaterialIcons.ttf)"
);
document.fonts.add(materialFont);
class App extends Component {
render() {
return (
<View>
<Fab />
</View>
);
}
}
export default App;`;
const scripts = `"server": "webpack-dev-server --config ./webpack.config.js",
"electron": "electron ./src/main.js",
`;
class Index extends Component {
componentDidMount() |
render() {
return (
<div style={styles.container}>
<Helmet title={'React Native Electron'} />
<PageTitle>Electron</PageTitle>
<ComponentSubtitle
description={
'Build cross platform desktop apps with JavaScript, HTML, and CSS'
}
/>
<SideScrollMenu items={sections} />
<Section
name="Install"
id="install"
href="/getting-started/electron#install">
<div className="row">
<CodeBlock
code={'npm i material-bread'}
style={styles.code}
fontSize={12}
canCopy
small
/>
</div>
<div className="row">or</div>
<div className="row">
<CodeBlock
code={'yarn add material-bread'}
style={styles.code}
fontSize={12}
canCopy
small
/>
</div>
</Section>
<Section
name="Setup"
id="setup"
href="/getting-started/electron#setup ">
<ComponentDescription
text={
<div>
There are essentially three steps involved in getting Material
Bread working on Electron.
<ol>
<li>Set up React on Electron</li>
<li>Set up React-Web on Electron</li>
<li>Set up Material Bread and vector icons</li>
</ol>
The quickest and easiest way to get started is to check out the
example repo linked below. If you're familiar with setting up
<CodeInline code="react" type="" /> and{' '}
<CodeInline code="react-native-web" type="" /> with electron
then you can skip to the section about webpack config and{' '}
<CodeInline code="app.js" type="file" />.
</div>
}
/>
<div style={styles.subSection}>
<h3 style={styles.h3}>Install dependencies</h3>
<ComponentDescription
text={
<div>
This includes <CodeInline code="react" type="" />,{' '}
<CodeInline code="react-native" type="" />
, <CodeInline code="react-native-web" type="" />,{' '}
<CodeInline code="electron" type="" />, required babel
plugins, and webpack loaders.
</div>
}
/>
<CodeBlock
code={dependencies}
style={styles.code}
canCopy
small
fontSize={12}
/>
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>HTML entry</h3>
<ComponentDescription
text={
<div>
Create a src folder with{' '}
<CodeInline code="index.html" type="file" /> to act as an
entry
</div>
}
/>
<CodeBlock code={html} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create main.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="main.js" type="file" /> file in src
that will create a window and load the{' '}
<CodeInline code="index.html" type="file" />
file.
</div>
}
/>
<CodeBlock code={mainJs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create renderer.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="renderer.js" type="file" /> file in
src that will load react into the html file with hot
reloading.
</div>
}
/>
<CodeBlock code={rendererJs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create webpack.config.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="webpack.config.js" type="file" />{' '}
file in the root of the project that will handle babel
plugins, loaders, electron-renderer, output our bundle, and
alias react-native.
</div>
}
/>
<CodeBlock code={webpack} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create App.js and add Icons</h3>
<ComponentDescription
text={
<div>
Create <CodeInline code="App.js " type="file" />
component in src. Add the FontFace function below to add the
material icons to the package.
</div>
}
/>
<CodeBlock code={appjs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Add scipts</h3>
<ComponentDescription
text={
<div>
Add webpack server script and electron server to{' '}
<CodeInline code="package.json" type="file" />.
</div>
}
/>
<CodeBlock code={scripts} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Finish</h3>
<ComponentDescription
text={
<div>
Finally open up two console tabs, run{' '}
<CodeInline code="npm run server" type="" />
in one and <CodeInline code="npm run electron" type="" /> in
the other. You should now see your app running with Material
Bread components. Keep in mind this a very minimal setup,
there are plenty of other great guides setting up{' '}
<CodeInline code="react" type="" /> and
<CodeInline code="react-native" type="" /> with{' '}
<CodeInline code="electron" type="" />.
</div>
}
/>
</div>
</Section>
<Section name="Usage" id="usage" href="/getting-started/electron#usage">
<ComponentDescription
text={
<div>
Simply wrap your app or root in the{' '}
<CodeInline code="BreadProvider" type="element" /> and start
developing. You can learn about customizing on the
<Link href="/style/theme"> theme page</Link>.
</div>
}
/>
<CodeBlock code={code} canCopy />
</Section>
<Section
name="Examples"
id="examples"
href="/getting-started/electron#examples">
<ComponentDescription
text={
<div>
For a quick start with minimal set up with{' '}
<CodeInline code="react-native-web" type="" />,
<CodeInline code="electron" type="" />, and{' '}
<CodeInline code="materal-bread" type="" />, checkout the
example below
</div>
}
/>
<Link
href="https://github.com/codypearce/material-bread-electron-example"
style={{ fontSize: 18, whitespace: 'wrap' }}>
Minimal React Native Electron Example
</Link>
</Section>
</div>
);
}
}
const styles = {
container: { marginBottom: 60 },
code: {},
h3: {
fontWeight: 400,
marginBottom: 8,
},
subSection: {
marginTop: 40,
},
};
export default Index;
| {
Prism.highlightAll();
} | identifier_body |
electron.js | import React, { Component } from 'react';
import Prism from 'prismjs';
import {
CodeBlock,
Section,
Link,
ComponentDescription,
SideScrollMenu,
PageTitle,
ComponentSubtitle,
CodeInline,
Helmet,
} from '@components';
const sections = [
{ name: 'Install' },
{ name: 'Setup' },
{ name: 'Usage' },
{ name: 'Examples' },
];
const dependencies = `npm i material-bread electron react react-dom react-native-web react-native-svg modal-enhanced-react-native-web @babel/core @babel/plugin-proposal-class-properties @babel/plugin-proposal-object-rest-spread @babel/plugin-transform-flow-strip-types @babel/plugin-transform-regenerator @babel/plugin-transform-runtime @babel/plugin-proposal-export-default-from css-loader file-loader style-loader webpack webpack-cli webpack-dev-server
`;
const code = `import React, { Component } from "react";
import Root from "./Root";
import { BreadProvider } from "material-bread";
export default class App extends Component {
render() {
return (
<BreadProvider>
<Root />
</BreadProvider>
);
}
}`;
const html = `<!DOCTYPE html>
<html>
<head>
<title>Material Bread Electron</title>
<meta charset="utf-8" />
</head>
<body>
<div id="app"></div>
<script
type="text/javascript"
src="http://localhost:7000/bundle.js"
></script>
</body>
</html>
`;
const mainJs = `const { app, BrowserWindow } = require("electron");
let win;
const createWindow = () => {
win = new BrowserWindow({
width: 800,
minWidth: 500,
height: 620,
minHeight: 500,
center: true,
show: false
});
win.loadURL(\`file://${__dirname}/index.html\`);
win.on("closed", () => {
win = null;
});
win.once("ready-to-show", () => {
win.show();
});
};
app.on("ready", createWindow);
app.on("window-all-closed", () => {
app.quit();
});
app.on("activate", () => {
if (win === null) {
createWindow();
}
});
`;
const rendererJs = `import React from "react";
import { render, unmountComponentAtNode } from "react-dom";
const root = document.getElementById("app");
const renderApp = () => {
const App = require("./App").default;
if (root) render(<App />, root);
};
renderApp();
if (module && module.hot != null && typeof module.hot.accept === "function") {
module.hot.accept(["./App"], () =>
setImmediate(() => {
unmountComponentAtNode(root);
renderApp();
})
);
}`;
const webpack = `const path = require("path");
module.exports = {
mode: "development",
entry: {
app: path.join(__dirname, "src", "renderer.js")
},
node: {
__filename: true,
__dirname: true
},
module: {
rules: [
{
test: /\.(js|jsx)$/,
exclude: /node_modules\/(?!(material-bread|react-native-vector-icons)\/).*/,
use: {
loader: "babel-loader",
options: {
presets: ["@babel/preset-env", "@babel/preset-react"],
plugins: [
"@babel/plugin-transform-flow-strip-types",
"@babel/plugin-proposal-class-properties",
"@babel/plugin-proposal-object-rest-spread",
"@babel/plugin-transform-runtime",
"@babel/plugin-transform-regenerator",
"@babel/plugin-proposal-export-default-from"
]
}
}
},
{
test: /\.html$/,
use: [
{
loader: "html-loader"
}
]
},
{
test: /\.css$/,
use: ["style-loader", "css-loader"]
},
{
test: /\.(png|woff|woff2|eot|ttf|svg)$/,
loader: "file-loader?limit=100000"
}
]
},
resolve: {
alias: {
"react-native": "react-native-web"
}
},
output: {
filename: "bundle.js"
},
target: "electron-renderer",
devServer: {
contentBase: path.join(__dirname, "src"),
port: 7000
}
};`;
const appjs = `import React, { Component } from "react";
import { View } from "react-native";
import { Fab } from "material-bread";
const materialFont = new FontFace(
"MaterialIcons",
"url(../node_modules/react-native-vector-icons/Fonts/MaterialIcons.ttf)"
);
document.fonts.add(materialFont);
class App extends Component {
render() {
return (
<View>
<Fab />
</View>
);
}
}
export default App;`;
const scripts = `"server": "webpack-dev-server --config ./webpack.config.js",
"electron": "electron ./src/main.js",
`;
class Index extends Component {
| () {
Prism.highlightAll();
}
render() {
return (
<div style={styles.container}>
<Helmet title={'React Native Electron'} />
<PageTitle>Electron</PageTitle>
<ComponentSubtitle
description={
'Build cross platform desktop apps with JavaScript, HTML, and CSS'
}
/>
<SideScrollMenu items={sections} />
<Section
name="Install"
id="install"
href="/getting-started/electron#install">
<div className="row">
<CodeBlock
code={'npm i material-bread'}
style={styles.code}
fontSize={12}
canCopy
small
/>
</div>
<div className="row">or</div>
<div className="row">
<CodeBlock
code={'yarn add material-bread'}
style={styles.code}
fontSize={12}
canCopy
small
/>
</div>
</Section>
<Section
name="Setup"
id="setup"
href="/getting-started/electron#setup ">
<ComponentDescription
text={
<div>
There are essentially three steps involved in getting Material
Bread working on Electron.
<ol>
<li>Set up React on Electron</li>
<li>Set up React-Web on Electron</li>
<li>Set up Material Bread and vector icons</li>
</ol>
The quickest and easiest way to get started is to check out the
example repo linked below. If you're familiar with setting up
<CodeInline code="react" type="" /> and{' '}
<CodeInline code="react-native-web" type="" /> with electron
then you can skip to the section about webpack config and{' '}
<CodeInline code="app.js" type="file" />.
</div>
}
/>
<div style={styles.subSection}>
<h3 style={styles.h3}>Install dependencies</h3>
<ComponentDescription
text={
<div>
This includes <CodeInline code="react" type="" />,{' '}
<CodeInline code="react-native" type="" />
, <CodeInline code="react-native-web" type="" />,{' '}
<CodeInline code="electron" type="" />, required babel
plugins, and webpack loaders.
</div>
}
/>
<CodeBlock
code={dependencies}
style={styles.code}
canCopy
small
fontSize={12}
/>
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>HTML entry</h3>
<ComponentDescription
text={
<div>
Create a src folder with{' '}
<CodeInline code="index.html" type="file" /> to act as an
entry
</div>
}
/>
<CodeBlock code={html} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create main.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="main.js" type="file" /> file in src
that will create a window and load the{' '}
<CodeInline code="index.html" type="file" />
file.
</div>
}
/>
<CodeBlock code={mainJs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create renderer.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="renderer.js" type="file" /> file in
src that will load react into the html file with hot
reloading.
</div>
}
/>
<CodeBlock code={rendererJs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create webpack.config.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="webpack.config.js" type="file" />{' '}
file in the root of the project that will handle babel
plugins, loaders, electron-renderer, output our bundle, and
alias react-native.
</div>
}
/>
<CodeBlock code={webpack} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create App.js and add Icons</h3>
<ComponentDescription
text={
<div>
Create <CodeInline code="App.js " type="file" />
component in src. Add the FontFace function below to add the
material icons to the package.
</div>
}
/>
<CodeBlock code={appjs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Add scipts</h3>
<ComponentDescription
text={
<div>
Add webpack server script and electron server to{' '}
<CodeInline code="package.json" type="file" />.
</div>
}
/>
<CodeBlock code={scripts} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Finish</h3>
<ComponentDescription
text={
<div>
Finally open up two console tabs, run{' '}
<CodeInline code="npm run server" type="" />
in one and <CodeInline code="npm run electron" type="" /> in
the other. You should now see your app running with Material
Bread components. Keep in mind this a very minimal setup,
there are plenty of other great guides setting up{' '}
<CodeInline code="react" type="" /> and
<CodeInline code="react-native" type="" /> with{' '}
<CodeInline code="electron" type="" />.
</div>
}
/>
</div>
</Section>
<Section name="Usage" id="usage" href="/getting-started/electron#usage">
<ComponentDescription
text={
<div>
Simply wrap your app or root in the{' '}
<CodeInline code="BreadProvider" type="element" /> and start
developing. You can learn about customizing on the
<Link href="/style/theme"> theme page</Link>.
</div>
}
/>
<CodeBlock code={code} canCopy />
</Section>
<Section
name="Examples"
id="examples"
href="/getting-started/electron#examples">
<ComponentDescription
text={
<div>
For a quick start with minimal set up with{' '}
<CodeInline code="react-native-web" type="" />,
<CodeInline code="electron" type="" />, and{' '}
<CodeInline code="materal-bread" type="" />, checkout the
example below
</div>
}
/>
<Link
href="https://github.com/codypearce/material-bread-electron-example"
style={{ fontSize: 18, whitespace: 'wrap' }}>
Minimal React Native Electron Example
</Link>
</Section>
</div>
);
}
}
const styles = {
container: { marginBottom: 60 },
code: {},
h3: {
fontWeight: 400,
marginBottom: 8,
},
subSection: {
marginTop: 40,
},
};
export default Index;
| componentDidMount | identifier_name |
electron.js | import React, { Component } from 'react';
import Prism from 'prismjs';
import {
CodeBlock,
Section,
Link,
ComponentDescription,
SideScrollMenu,
PageTitle,
ComponentSubtitle,
CodeInline,
Helmet,
} from '@components';
const sections = [
{ name: 'Install' },
{ name: 'Setup' },
{ name: 'Usage' },
{ name: 'Examples' },
];
const dependencies = `npm i material-bread electron react react-dom react-native-web react-native-svg modal-enhanced-react-native-web @babel/core @babel/plugin-proposal-class-properties @babel/plugin-proposal-object-rest-spread @babel/plugin-transform-flow-strip-types @babel/plugin-transform-regenerator @babel/plugin-transform-runtime @babel/plugin-proposal-export-default-from css-loader file-loader style-loader webpack webpack-cli webpack-dev-server
`;
const code = `import React, { Component } from "react";
import Root from "./Root";
import { BreadProvider } from "material-bread";
export default class App extends Component {
render() {
return (
<BreadProvider>
<Root /> | </BreadProvider>
);
}
}`;
const html = `<!DOCTYPE html>
<html>
<head>
<title>Material Bread Electron</title>
<meta charset="utf-8" />
</head>
<body>
<div id="app"></div>
<script
type="text/javascript"
src="http://localhost:7000/bundle.js"
></script>
</body>
</html>
`;
const mainJs = `const { app, BrowserWindow } = require("electron");
let win;
const createWindow = () => {
win = new BrowserWindow({
width: 800,
minWidth: 500,
height: 620,
minHeight: 500,
center: true,
show: false
});
win.loadURL(\`file://${__dirname}/index.html\`);
win.on("closed", () => {
win = null;
});
win.once("ready-to-show", () => {
win.show();
});
};
app.on("ready", createWindow);
app.on("window-all-closed", () => {
app.quit();
});
app.on("activate", () => {
if (win === null) {
createWindow();
}
});
`;
const rendererJs = `import React from "react";
import { render, unmountComponentAtNode } from "react-dom";
const root = document.getElementById("app");
const renderApp = () => {
const App = require("./App").default;
if (root) render(<App />, root);
};
renderApp();
if (module && module.hot != null && typeof module.hot.accept === "function") {
module.hot.accept(["./App"], () =>
setImmediate(() => {
unmountComponentAtNode(root);
renderApp();
})
);
}`;
const webpack = `const path = require("path");
module.exports = {
mode: "development",
entry: {
app: path.join(__dirname, "src", "renderer.js")
},
node: {
__filename: true,
__dirname: true
},
module: {
rules: [
{
test: /\.(js|jsx)$/,
exclude: /node_modules\/(?!(material-bread|react-native-vector-icons)\/).*/,
use: {
loader: "babel-loader",
options: {
presets: ["@babel/preset-env", "@babel/preset-react"],
plugins: [
"@babel/plugin-transform-flow-strip-types",
"@babel/plugin-proposal-class-properties",
"@babel/plugin-proposal-object-rest-spread",
"@babel/plugin-transform-runtime",
"@babel/plugin-transform-regenerator",
"@babel/plugin-proposal-export-default-from"
]
}
}
},
{
test: /\.html$/,
use: [
{
loader: "html-loader"
}
]
},
{
test: /\.css$/,
use: ["style-loader", "css-loader"]
},
{
test: /\.(png|woff|woff2|eot|ttf|svg)$/,
loader: "file-loader?limit=100000"
}
]
},
resolve: {
alias: {
"react-native": "react-native-web"
}
},
output: {
filename: "bundle.js"
},
target: "electron-renderer",
devServer: {
contentBase: path.join(__dirname, "src"),
port: 7000
}
};`;
const appjs = `import React, { Component } from "react";
import { View } from "react-native";
import { Fab } from "material-bread";
const materialFont = new FontFace(
"MaterialIcons",
"url(../node_modules/react-native-vector-icons/Fonts/MaterialIcons.ttf)"
);
document.fonts.add(materialFont);
class App extends Component {
render() {
return (
<View>
<Fab />
</View>
);
}
}
export default App;`;
const scripts = `"server": "webpack-dev-server --config ./webpack.config.js",
"electron": "electron ./src/main.js",
`;
class Index extends Component {
componentDidMount() {
Prism.highlightAll();
}
render() {
return (
<div style={styles.container}>
<Helmet title={'React Native Electron'} />
<PageTitle>Electron</PageTitle>
<ComponentSubtitle
description={
'Build cross platform desktop apps with JavaScript, HTML, and CSS'
}
/>
<SideScrollMenu items={sections} />
<Section
name="Install"
id="install"
href="/getting-started/electron#install">
<div className="row">
<CodeBlock
code={'npm i material-bread'}
style={styles.code}
fontSize={12}
canCopy
small
/>
</div>
<div className="row">or</div>
<div className="row">
<CodeBlock
code={'yarn add material-bread'}
style={styles.code}
fontSize={12}
canCopy
small
/>
</div>
</Section>
<Section
name="Setup"
id="setup"
href="/getting-started/electron#setup ">
<ComponentDescription
text={
<div>
There are essentially three steps involved in getting Material
Bread working on Electron.
<ol>
<li>Set up React on Electron</li>
<li>Set up React-Web on Electron</li>
<li>Set up Material Bread and vector icons</li>
</ol>
The quickest and easiest way to get started is to check out the
example repo linked below. If you're familiar with setting up
<CodeInline code="react" type="" /> and{' '}
<CodeInline code="react-native-web" type="" /> with electron
then you can skip to the section about webpack config and{' '}
<CodeInline code="app.js" type="file" />.
</div>
}
/>
<div style={styles.subSection}>
<h3 style={styles.h3}>Install dependencies</h3>
<ComponentDescription
text={
<div>
This includes <CodeInline code="react" type="" />,{' '}
<CodeInline code="react-native" type="" />
, <CodeInline code="react-native-web" type="" />,{' '}
<CodeInline code="electron" type="" />, required babel
plugins, and webpack loaders.
</div>
}
/>
<CodeBlock
code={dependencies}
style={styles.code}
canCopy
small
fontSize={12}
/>
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>HTML entry</h3>
<ComponentDescription
text={
<div>
Create a src folder with{' '}
<CodeInline code="index.html" type="file" /> to act as an
entry
</div>
}
/>
<CodeBlock code={html} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create main.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="main.js" type="file" /> file in src
that will create a window and load the{' '}
<CodeInline code="index.html" type="file" />
file.
</div>
}
/>
<CodeBlock code={mainJs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create renderer.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="renderer.js" type="file" /> file in
src that will load react into the html file with hot
reloading.
</div>
}
/>
<CodeBlock code={rendererJs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create webpack.config.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="webpack.config.js" type="file" />{' '}
file in the root of the project that will handle babel
plugins, loaders, electron-renderer, output our bundle, and
alias react-native.
</div>
}
/>
<CodeBlock code={webpack} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create App.js and add Icons</h3>
<ComponentDescription
text={
<div>
Create <CodeInline code="App.js " type="file" />
component in src. Add the FontFace function below to add the
material icons to the package.
</div>
}
/>
<CodeBlock code={appjs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Add scipts</h3>
<ComponentDescription
text={
<div>
Add webpack server script and electron server to{' '}
<CodeInline code="package.json" type="file" />.
</div>
}
/>
<CodeBlock code={scripts} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Finish</h3>
<ComponentDescription
text={
<div>
Finally open up two console tabs, run{' '}
<CodeInline code="npm run server" type="" />
in one and <CodeInline code="npm run electron" type="" /> in
the other. You should now see your app running with Material
Bread components. Keep in mind this a very minimal setup,
there are plenty of other great guides setting up{' '}
<CodeInline code="react" type="" /> and
<CodeInline code="react-native" type="" /> with{' '}
<CodeInline code="electron" type="" />.
</div>
}
/>
</div>
</Section>
<Section name="Usage" id="usage" href="/getting-started/electron#usage">
<ComponentDescription
text={
<div>
Simply wrap your app or root in the{' '}
<CodeInline code="BreadProvider" type="element" /> and start
developing. You can learn about customizing on the
<Link href="/style/theme"> theme page</Link>.
</div>
}
/>
<CodeBlock code={code} canCopy />
</Section>
<Section
name="Examples"
id="examples"
href="/getting-started/electron#examples">
<ComponentDescription
text={
<div>
For a quick start with minimal set up with{' '}
<CodeInline code="react-native-web" type="" />,
<CodeInline code="electron" type="" />, and{' '}
<CodeInline code="materal-bread" type="" />, checkout the
example below
</div>
}
/>
<Link
href="https://github.com/codypearce/material-bread-electron-example"
style={{ fontSize: 18, whitespace: 'wrap' }}>
Minimal React Native Electron Example
</Link>
</Section>
</div>
);
}
}
const styles = {
container: { marginBottom: 60 },
code: {},
h3: {
fontWeight: 400,
marginBottom: 8,
},
subSection: {
marginTop: 40,
},
};
export default Index; | random_line_split |
|
nfa.rs | //! The structure for defining non-deterministic finite automata.
use crate::automata::alphabet;
use crate::automata::dfa::DFA;
use crate::automata::dfa::RuleExecutable;
use crate::automata::pattern::Pattern;
use crate::automata::state::State;
use crate::automata::state::Transition;
use crate::automata::state;
use crate::automata::symbol::Symbol;
use crate::data::matrix::Matrix;
use itertools::Itertools;
use std::collections::BTreeSet;
use std::collections::HashMap;
use std::ops::RangeInclusive;
use crate::prelude::*;
// =========================================
// === Non-Deterministic Finite Automata ===
// =========================================
/// A state identifier based on a set of states.
///
/// This is used during the NFA -> DFA transformation, where multiple states can merge together due
/// to the collapsing of epsilon transitions.
type StateSetId = BTreeSet<state::Identifier>;
/// The definition of a [NFA](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton) for a
/// given set of symbols, states, and transitions (specifically a NFA with ε-moves).
///
/// A NFA is a finite state automaton that accepts or rejects a given sequence of symbols. In
/// contrast with a DFA, the NFA may transition between states _without_ reading any new symbol
/// through use of
/// [epsilon links](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton#NFA_with_%CE%B5-moves).
///
/// ```text
/// ┌───┐ 'N' ┌───┐ ┌───┐ 'F' ┌───┐ ┌───┐ 'A' ┌───┐
/// │ 0 │ ----> │ 1 │ -> │ 2 │ ----> │ 3 │ -> │ 3 │ ----> │ 3 │
/// └───┘ └───┘ ε └───┘ └───┘ ε └───┘ └───┘
/// ```
#[derive(Clone,Debug,Default,PartialEq,Eq)]
pub struct NFA {
/// A set of disjoint intervals over the input alphabet.
pub alphabet_segmentation:alphabet::Segmentation,
/// A set of named NFA states, with (epsilon) transitions.
pub states:Vec<State>,
}
impl NFA {
/// Adds a new state to the NFA and returns its identifier.
pub fn new_state(&mut self) -> state::Identifier {
let id = self.states.len();
self.states.push(State::default());
state::Identifier{id}
}
/// Creates an epsilon transition between two states.
///
/// Whenever the automaton happens to be in `source` state it can immediately transition to the
/// `target` state. It is, however, not _required_ to do so.
pub fn connect(&mut self, source:state::Identifier, target:state::Identifier) {
self.states[source.id].epsilon_links.push(target);
}
/// Creates an ordinary transition for a range of symbols.
///
/// If any symbol from such range happens to be the input when the automaton is in the `source`
/// state, it will immediately transition to the `target` state.
pub fn connect_via
( &mut self
, source : state::Identifier
, target_state : state::Identifier
, symbols : &RangeInclusive<Symbol>
) {
self.alphabet_segmentation.insert(symbols.clone());
self.states[source.id].links.push(Transition{symbols:symbols.clone(),target_state});
}
/// Transforms a pattern to an NFA using the algorithm described
/// [here](https://www.youtube.com/watch?v=RYNN-tb9WxI).
/// The asymptotic complexity is linear in number of symbols.
pub fn new_pattern(&mut self, source:state::Identifier, pattern:&Pattern) -> state::Identifier {
let current = self.new_state();
self.connect(source,current);
match pattern {
Pattern::Range(range) => {
let state = self.new_state();
self.connect_via(current,state,range);
state
},
Pattern::Many(body) => {
let s1 = self.new_state();
let s2 = self.new_pattern(s1,body);
let s3 = self.new_state();
self.connect(current,s1);
self.connect(current,s3);
self.connect(s2,s3);
self.connect(s3,s1);
s3
},
Pattern::Seq(patterns) => {
patterns.iter().fold(current,|s,pat| self.new_pattern(s,pat))
},
Pattern::Or(patterns) => {
let states = patterns.iter().map(|pat| self.new_pattern(current,pat)).collect_vec();
let end = self.new_state();
for state in states {
self.connect(state,end);
}
end
},
Pattern::Always => current,
}
}
/// Merges states that are connected by epsilon links, using an algorithm based on the one shown
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
fn eps_matrix(&self) -> Vec<StateSetId> {
fn fill_eps_matrix
( nfa : &NFA
, states : &mut Vec<StateSetId>
, visited : &mut Vec<bool>
, state : state::Identifier
) {
let mut state_set = StateSetId::new();
visited[state.id] = true;
state_set.insert(state);
for &target in &nfa.states[state.id].epsilon_links {
if !visited[target.id] {
fill_eps_matrix(nfa,states,visited,target);
}
state_set.insert(target);
state_set.extend(states[target.id].iter());
}
states[state.id] = state_set;
}
let mut states = vec![StateSetId::new(); self.states.len()];
for id in 0..self.states.len() {
let mut visited = vec![false; states.len()];
fill_eps_matrix(self,&mut states,&mut visited,state::Identifier{id});
}
states
}
/// Computes a transition matrix `(state, symbol) => state` for the NFA, ignoring epsilon links.
fn nfa_matrix(&self) -> Matrix<state::Identifier> {
let mut matrix = Matrix::new(self.states.len(),self.alphabet_segmentation.divisions.len()) | for (state_ix, source) in self.states.iter().enumerate() {
let targets = source.targets(&self.alphabet_segmentation);
for (voc_ix, &target) in targets.iter().enumerate() {
matrix[(state_ix,voc_ix)] = target;
}
}
matrix
}
}
// === Trait Impls ===
impl From<&NFA> for DFA {
/// Transforms an NFA into a DFA, based on the algorithm described
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
/// The asymptotic complexity is quadratic in number of states.
fn from(nfa:&NFA) -> Self {
let nfa_mat = nfa.nfa_matrix();
let eps_mat = nfa.eps_matrix();
let mut dfa_mat = Matrix::new(0,nfa.alphabet_segmentation.divisions.len());
let mut dfa_eps_ixs = Vec::<StateSetId>::new();
let mut dfa_eps_map = HashMap::<StateSetId,state::Identifier>::new();
dfa_eps_ixs.push(eps_mat[0].clone());
dfa_eps_map.insert(eps_mat[0].clone(),state::Identifier::from(0));
let mut i = 0;
while i < dfa_eps_ixs.len() {
dfa_mat.new_row();
for voc_ix in 0..nfa.alphabet_segmentation.divisions.len() {
let mut eps_set = StateSetId::new();
for &eps_ix in &dfa_eps_ixs[i] {
let tgt = nfa_mat[(eps_ix.id,voc_ix)];
if tgt != state::Identifier::INVALID {
eps_set.extend(eps_mat[tgt.id].iter());
}
}
if !eps_set.is_empty() {
dfa_mat[(i,voc_ix)] = match dfa_eps_map.get(&eps_set) {
Some(&id) => id,
None => {
let id = state::Identifier::new(dfa_eps_ixs.len());
dfa_eps_ixs.push(eps_set.clone());
dfa_eps_map.insert(eps_set,id);
id
},
};
}
}
i += 1;
}
let mut callbacks = vec![None; dfa_eps_ixs.len()];
let priority = dfa_eps_ixs.len();
for (dfa_ix, epss) in dfa_eps_ixs.into_iter().enumerate() {
let has_name = |&key:&state::Identifier| nfa.states[key.id].name.is_some();
if let Some(eps) = epss.into_iter().find(has_name) {
let code = nfa.states[eps.id].name.as_ref().cloned().unwrap();
callbacks[dfa_ix] = Some(RuleExecutable {code,priority});
}
}
let alphabet_segmentation = nfa.alphabet_segmentation.clone();
let links = dfa_mat;
DFA{alphabet_segmentation,links,callbacks}
}
}
// ===========
// == Tests ==
// ===========
#[cfg(test)]
pub mod tests {
extern crate test;
use crate::automata::dfa;
use super::*;
use test::Bencher;
/// NFA that accepts a newline '\n'.
pub fn newline() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(10..=10,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![10, 11].as_slice()),
}
}
/// NFA that accepts any letter in the range a..=z.
pub fn letter() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(97..=122,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![97, 123].as_slice()),
}
}
/// NFA that accepts any number of spaces ' '.
pub fn spaces() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![2]),
State::from(vec![(32..=32,3)]),
State::from(vec![4]),
State::from(vec![5,8]),
State::from(vec![6]),
State::from(vec![(32..=32,7)]),
State::from(vec![8]),
State::from(vec![5,9]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![0, 32, 33].as_slice()),
}
}
/// NFA that accepts one letter a..=z or many spaces ' '.
pub fn letter_and_spaces() -> NFA {
NFA {
states:vec![
State::from(vec![1,3]),
State::from(vec![(97..=122,2)]),
State::from(vec![11]).named("group_0_rule_0"),
State::from(vec![4]),
State::from(vec![(32..=32,5)]),
State::from(vec![6]),
State::from(vec![7,10]),
State::from(vec![8]),
State::from(vec![(32..=32,9)]),
State::from(vec![10]),
State::from(vec![7,11]).named("group_0_rule_1"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![32, 33, 97, 123].as_slice()),
}
}
#[test]
fn test_to_dfa_newline() {
assert_eq!(DFA::from(&newline()),dfa::tests::newline());
}
#[test]
fn test_to_dfa_letter() {
assert_eq!(DFA::from(&letter()),dfa::tests::letter());
}
#[test]
fn test_to_dfa_spaces() {
assert_eq!(DFA::from(&spaces()),dfa::tests::spaces());
}
#[test]
fn test_to_dfa_letter_and_spaces() {
assert_eq!(DFA::from(&letter_and_spaces()),dfa::tests::letter_and_spaces());
}
#[bench]
fn bench_to_dfa_newline(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&newline()))
}
#[bench]
fn bench_to_dfa_letter(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&letter()))
}
#[bench]
fn bench_to_dfa_spaces(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&spaces()))
}
#[bench]
fn bench_to_dfa_letter_and_spaces(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&letter_and_spaces()))
}
}
| ;
| identifier_name |
nfa.rs | //! The structure for defining non-deterministic finite automata.
use crate::automata::alphabet;
use crate::automata::dfa::DFA;
use crate::automata::dfa::RuleExecutable;
use crate::automata::pattern::Pattern;
use crate::automata::state::State;
use crate::automata::state::Transition;
use crate::automata::state;
use crate::automata::symbol::Symbol;
use crate::data::matrix::Matrix;
use itertools::Itertools;
use std::collections::BTreeSet;
use std::collections::HashMap;
use std::ops::RangeInclusive;
use crate::prelude::*;
// =========================================
// === Non-Deterministic Finite Automata ===
// =========================================
/// A state identifier based on a set of states.
///
/// This is used during the NFA -> DFA transformation, where multiple states can merge together due
/// to the collapsing of epsilon transitions.
type StateSetId = BTreeSet<state::Identifier>;
/// The definition of a [NFA](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton) for a
/// given set of symbols, states, and transitions (specifically a NFA with ε-moves).
///
/// A NFA is a finite state automaton that accepts or rejects a given sequence of symbols. In
/// contrast with a DFA, the NFA may transition between states _without_ reading any new symbol
/// through use of
/// [epsilon links](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton#NFA_with_%CE%B5-moves).
///
/// ```text
/// ┌───┐ 'N' ┌───┐ ┌───┐ 'F' ┌───┐ ┌───┐ 'A' ┌───┐
/// │ 0 │ ----> │ 1 │ -> │ 2 │ ----> │ 3 │ -> │ 3 │ ----> │ 3 │
/// └───┘ └───┘ ε └───┘ └───┘ ε └───┘ └───┘
/// ```
#[derive(Clone,Debug,Default,PartialEq,Eq)]
pub struct NFA {
/// A set of disjoint intervals over the input alphabet.
pub alphabet_segmentation:alphabet::Segmentation,
/// A set of named NFA states, with (epsilon) transitions.
pub states:Vec<State>,
}
impl NFA {
/// Adds a new state to the NFA and returns its identifier.
pub fn new_state(&mut self) -> state::Identifier {
let id = self.states.len();
self.states.push(State::default());
state::Identifier{id}
}
/// Creates an epsilon transition between two states.
///
/// Whenever the automaton happens to be in `source` state it can immediately transition to the
/// `target` state. It is, however, not _required_ to do so.
pub fn connect(&mut self, source:state::Identifier, target:state::Identifier) {
self.states[source.id].epsilon_links.push(target);
}
/// Creates an ordinary transition for a range of symbols.
///
/// If any symbol from such range happens to be the input when the automaton is in the `source`
/// state, it will immediately transition to the `target` state.
pub fn connect_via
( &mut self
, source : state::Identifier
, target_state : state::Identifier
, symbols : &RangeInclusive<Symbol>
) {
self.alphabet_segmentation.insert(symbols.clone());
self.states[source.id].links.push(Transition{symbols:symbols.clone(),target_state});
}
/// Transforms a pattern to an NFA using the algorithm described
/// [here](https://www.youtube.com/watch?v=RYNN-tb9WxI).
/// The asymptotic complexity is linear in number of symbols.
pub fn new_pattern(&mut self, source:state::Identifier, pattern:&Pattern) -> state::Identifier {
let current = self.new_state();
self.connect(source,current);
match pattern {
Pattern::Range(range) => {
let state = self.new_state();
self.connect_via(current,state,range);
state
},
Pattern::Many(body) => {
let s1 = self.new_state();
let s2 = self.new_pattern(s1,body);
let s3 = self.new_state();
self.connect(current,s1);
self.connect(current,s3);
self.connect(s2,s3);
self.connect(s3,s1);
s3
},
Pattern::Seq(patterns) => {
patterns.iter().fold(current,|s,pat| self.new_pattern(s,pat))
},
Pattern::Or(patterns) => {
let states = patterns.iter().map(|pat| self.new_pattern(current,pat)).collect_vec();
let end = self.new_state();
for state in states {
self.connect(state,end);
}
end
},
Pattern::Always => current,
}
}
/// Merges states that are connected by epsilon links, using an algorithm based on the one shown
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
fn eps_matrix(&self) -> Vec<StateSetId> {
fn fill_eps_matrix
( nfa : &NFA
, states : &mut Vec<StateSetId>
, visited : &mut Vec<bool>
, state : state::Identifier
) {
let mut state_set = StateSetId::new();
visited[state.id] = true;
state_set.insert(state);
for &target in &nfa.states[state.id].epsilon_links {
if !visited[target.id] {
fill_eps_matrix(nfa,states,visited,target);
}
state_set.insert(target);
state_set.extend(states[target.id].iter());
}
states[state.id] = state_set;
}
let mut states = vec![StateSetId::new(); self.states.len()];
for id in 0..self.states.len() {
let mut visited = vec![false; states.len()];
fill_eps_matrix(self,&mut states,&mut visited,state::Identifier{id});
}
states
}
/// Computes a transition matrix `(state, symbol) => state` for the NFA, ignoring epsilon links.
fn nfa_matrix(&self) -> Matrix<state::Identifier> {
let mut matrix = Matrix::new(self.states.len(),self.alphabet_segmentation.divisions.len());
for (state_ix, source) in self.states.iter().enumerate() {
let targets = source.targets(&self.alphabet_segmentation);
for (voc_ix, &target) in targets.iter().enumerate() {
matrix[(state_ix,voc_ix)] = target;
}
}
matrix
}
}
// === Trait Impls ===
impl From<&NFA> for DFA {
/// Transforms an NFA into a DFA, based on the algorithm described
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
/// The asymptotic complexity is quadratic in number of states.
fn from(nfa:&NFA) -> Self {
let nfa_mat = nfa.nfa_matrix();
let eps_mat = nfa.eps_matrix();
let mut dfa_mat = Matrix::new(0,nfa.alphabet_segmentation.divisions.len());
let mut dfa_eps_ixs = Vec::<StateSetId>::new();
let mut dfa_eps_map = HashMap::<StateSetId,state::Identifier>::new();
dfa_eps_ixs.push(eps_mat[0].clone());
dfa_eps_map.insert(eps_mat[0].clone(),state::Identifier::from(0));
let mut i = 0;
while i < dfa_eps_ixs.len() {
dfa_mat.new_row();
for voc_ix in 0..nfa.alphabet_segmentation.divisions.len() {
let mut eps_set = StateSetId::new();
for &eps_ix in &dfa_eps_ixs[i] {
let tgt = nfa_mat[(eps_ix.id,voc_ix)];
if tgt != state::Identifier::INVALID {
eps_set.extend(eps_mat[tgt.id].iter());
}
}
if !eps_set.is_empty() {
dfa_mat[(i,voc_ix)] = match dfa_eps_map.get(&eps_set) {
Some(&id) => id,
None => {
let id = state::Identifier::new(dfa_eps_ixs.len());
dfa_eps_ixs.push(eps_set.clone());
dfa_eps_map.insert(eps_set,id);
id
},
};
}
}
i += 1;
}
let mut callbacks = vec![None; dfa_eps_ixs.len()];
let priority = dfa_eps_ixs.len();
for (dfa_ix, epss) in dfa_eps_ixs.into_iter().enumerate() {
let has_name = |&key:&state::Identifier| nfa.states[key.id].name.is_some();
if let Some(eps) = epss.into_iter().find(has_name) {
let code = nfa.states[eps.id].name.as_ref().cloned().unwrap();
callbacks[dfa_ix] = Some(RuleExecutable {code,priority});
}
}
let alphabet_segmentation = nfa.alphabet_segmentation.clone();
let links = dfa_mat;
DFA{alphabet_segmentation,links,callbacks}
}
}
// ===========
// == Tests ==
// ===========
#[cfg(test)]
pub mod tests {
extern crate test;
use crate::automata::dfa;
use super::*;
use test::Bencher;
/// NFA that accepts a newline '\n'.
pub fn newline() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(10..=10,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![10, 11].as_slice()),
}
}
/// NFA that accepts any letter in the range a..=z.
pub fn letter() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(97..=122,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![97, 123].as_slice()),
}
}
/// NFA that accepts any number of spaces ' '.
pub fn spaces() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![2]),
State::from(vec![(32..=32,3)]),
State::from(vec![4]),
State::from(vec![5,8]),
State::from(vec![6]),
State::from(vec![(32..=32,7)]),
State::from(vec![8]),
State::from(vec![5,9]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![0, 32, 33].as_slice()),
}
}
/// NFA that accepts one letter a..=z or many spaces ' '.
pub fn letter_and_spaces() -> NFA {
NFA {
states:vec![
State::from(vec![1,3]),
State::from(vec![(97..=122,2)]),
S | _letter() {
assert_eq!(DFA::from(&letter()),dfa::tests::letter());
}
#[test]
fn test_to_dfa_spaces() {
assert_eq!(DFA::from(&spaces()),dfa::tests::spaces());
}
#[test]
fn test_to_dfa_letter_and_spaces() {
assert_eq!(DFA::from(&letter_and_spaces()),dfa::tests::letter_and_spaces());
}
#[bench]
fn bench_to_dfa_newline(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&newline()))
}
#[bench]
fn bench_to_dfa_letter(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&letter()))
}
#[bench]
fn bench_to_dfa_spaces(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&spaces()))
}
#[bench]
fn bench_to_dfa_letter_and_spaces(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&letter_and_spaces()))
}
}
| tate::from(vec![11]).named("group_0_rule_0"),
State::from(vec![4]),
State::from(vec![(32..=32,5)]),
State::from(vec![6]),
State::from(vec![7,10]),
State::from(vec![8]),
State::from(vec![(32..=32,9)]),
State::from(vec![10]),
State::from(vec![7,11]).named("group_0_rule_1"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![32, 33, 97, 123].as_slice()),
}
}
#[test]
fn test_to_dfa_newline() {
assert_eq!(DFA::from(&newline()),dfa::tests::newline());
}
#[test]
fn test_to_dfa | identifier_body |
nfa.rs | //! The structure for defining non-deterministic finite automata.
use crate::automata::alphabet;
use crate::automata::dfa::DFA;
use crate::automata::dfa::RuleExecutable;
use crate::automata::pattern::Pattern;
use crate::automata::state::State;
use crate::automata::state::Transition;
use crate::automata::state;
use crate::automata::symbol::Symbol;
use crate::data::matrix::Matrix;
use itertools::Itertools;
use std::collections::BTreeSet;
use std::collections::HashMap;
use std::ops::RangeInclusive;
use crate::prelude::*;
// =========================================
// === Non-Deterministic Finite Automata ===
// =========================================
/// A state identifier based on a set of states.
///
/// This is used during the NFA -> DFA transformation, where multiple states can merge together due
/// to the collapsing of epsilon transitions.
type StateSetId = BTreeSet<state::Identifier>;
/// The definition of a [NFA](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton) for a
/// given set of symbols, states, and transitions (specifically a NFA with ε-moves).
///
/// A NFA is a finite state automaton that accepts or rejects a given sequence of symbols. In
/// contrast with a DFA, the NFA may transition between states _without_ reading any new symbol
/// through use of
/// [epsilon links](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton#NFA_with_%CE%B5-moves).
///
/// ```text
/// ┌───┐ 'N' ┌───┐ ┌───┐ 'F' ┌───┐ ┌───┐ 'A' ┌───┐
/// │ 0 │ ----> │ 1 │ -> │ 2 │ ----> │ 3 │ -> │ 3 │ ----> │ 3 │
/// └───┘ └───┘ ε └───┘ └───┘ ε └───┘ └───┘
/// ```
#[derive(Clone,Debug,Default,PartialEq,Eq)]
pub struct NFA {
/// A set of disjoint intervals over the input alphabet.
pub alphabet_segmentation:alphabet::Segmentation,
/// A set of named NFA states, with (epsilon) transitions.
pub states:Vec<State>,
}
impl NFA {
/// Adds a new state to the NFA and returns its identifier.
pub fn new_state(&mut self) -> state::Identifier {
let id = self.states.len();
self.states.push(State::default());
state::Identifier{id}
}
/// Creates an epsilon transition between two states.
///
/// Whenever the automaton happens to be in `source` state it can immediately transition to the
/// `target` state. It is, however, not _required_ to do so.
pub fn connect(&mut self, source:state::Identifier, target:state::Identifier) {
self.states[source.id].epsilon_links.push(target);
}
/// Creates an ordinary transition for a range of symbols.
///
/// If any symbol from such range happens to be the input when the automaton is in the `source`
/// state, it will immediately transition to the `target` state.
pub fn connect_via
( &mut self
, source : state::Identifier
, target_state : state::Identifier
, symbols : &RangeInclusive<Symbol>
) {
self.alphabet_segmentation.insert(symbols.clone());
self.states[source.id].links.push(Transition{symbols:symbols.clone(),target_state});
}
/// Transforms a pattern to an NFA using the algorithm described
/// [here](https://www.youtube.com/watch?v=RYNN-tb9WxI).
/// The asymptotic complexity is linear in number of symbols.
pub fn new_pattern(&mut self, source:state::Identifier, pattern:&Pattern) -> state::Identifier {
let current = self.new_state();
self.connect(source,current);
match pattern {
Pattern::Range(range) => {
let state = self.new_state();
self.connect_via(current,state,range);
state
},
Pattern::Many(body) => {
let s1 = self.new_state();
let s2 = self.new_pattern(s1,body);
let s3 = self.new_state();
self.connect(current,s1);
self.connect(current,s3);
self.connect(s2,s3);
self.connect(s3,s1);
s3
},
Pattern::Seq(patterns) => {
patterns.iter().fold(current,|s,pat| self.new_pattern(s,pat))
},
Pattern::Or(patterns) => {
let states = patterns.iter().map(|pat| self.new_pattern(current,pat)).collect_vec();
let end = self.new_state();
for state in states {
self.connect(state,end);
}
end
},
Pattern::Always => current,
}
}
/// Merges states that are connected by epsilon links, using an algorithm based on the one shown
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
fn eps_matrix(&self) -> Vec<StateSetId> {
fn fill_eps_matrix
( nfa : &NFA
, states : &mut Vec<StateSetId>
, visited : &mut Vec<bool>
, state : state::Identifier
) {
let mut state_set = StateSetId::new();
visited[state.id] = true;
state_set.insert(state);
for &target in &nfa.states[state.id].epsilon_links {
if !visited[target.id] {
fill_eps_matrix(nfa,states,visited,target);
}
state_set.insert(target);
state_set.extend(states[target.id].iter());
}
states[state.id] = state_set;
}
let mut states = vec![StateSetId::new(); self.states.len()];
for id in 0..self.states.len() {
let mut visited = vec![false; states.len()];
fill_eps_matrix(self,&mut states,&mut visited,state::Identifier{id});
}
states
}
/// Computes a transition matrix `(state, symbol) => state` for the NFA, ignoring epsilon links.
fn nfa_matrix(&self) -> Matrix<state::Identifier> {
let mut matrix = Matrix::new(self.states.len(),self.alphabet_segmentation.divisions.len());
for (state_ix, source) in self.states.iter().enumerate() {
let targets = source.targets(&self.alphabet_segmentation);
for (voc_ix, &target) in targets.iter().enumerate() {
matrix[(state_ix,voc_ix)] = target;
}
}
matrix
}
}
// === Trait Impls ===
impl From<&NFA> for DFA {
/// Transforms an NFA into a DFA, based on the algorithm described
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
/// The asymptotic complexity is quadratic in number of states.
fn from(nfa:&NFA) -> Self {
let nfa_mat = nfa.nfa_matrix();
let eps_mat = nfa.eps_matrix();
let mut dfa_mat = Matrix::new(0,nfa.alphabet_segmentation.divisions.len());
let mut dfa_eps_ixs = Vec::<StateSetId>::new();
let mut dfa_eps_map = HashMap::<StateSetId,state::Identifier>::new();
dfa_eps_ixs.push(eps_mat[0].clone());
dfa_eps_map.insert(eps_mat[0].clone(),state::Identifier::from(0));
let mut i = 0;
while i < dfa_eps_ixs.len() {
dfa_mat.new_row();
for voc_ix in 0..nfa.alphabet_segmentation.divisions.len() {
let mut eps_set = StateSetId::new();
for &eps_ix in &dfa_eps_ixs[i] {
let tgt = nfa_mat[(eps_ix.id,voc_ix)];
if tgt != state::Identifier::INVALID {
eps_set.extend(eps_mat[tgt.id].iter());
}
}
if !eps_set.is_empty() {
dfa_mat[(i,voc_ix)] = match dfa_eps_map.get(&eps_set) {
Some(&id) => id,
None => {
let id = state::Identifier::new(dfa_eps_ixs.len());
dfa_eps_ixs.push(eps_set.clone());
dfa_eps_map.insert(eps_set,id);
id
},
};
}
}
i += 1;
}
let mut callbacks = vec![None; dfa_eps_ixs.len()];
let priority = dfa_eps_ixs.len();
for (dfa_ix, epss) in dfa_eps_ixs.into_iter().enumerate() {
let has_name = |&key:&state::Identifier| nfa.states[key.id].name.is_some();
if let Some(eps) = epss.into_iter().find(has_name) {
let code = nfa.states[eps.id].name.as_ref().cloned().unwrap();
callbacks[dfa_ix] = Some(RuleExecutable {code,priority}); |
DFA{alphabet_segmentation,links,callbacks}
}
}
// ===========
// == Tests ==
// ===========
#[cfg(test)]
pub mod tests {
extern crate test;
use crate::automata::dfa;
use super::*;
use test::Bencher;
/// NFA that accepts a newline '\n'.
pub fn newline() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(10..=10,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![10, 11].as_slice()),
}
}
/// NFA that accepts any letter in the range a..=z.
pub fn letter() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(97..=122,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![97, 123].as_slice()),
}
}
/// NFA that accepts any number of spaces ' '.
pub fn spaces() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![2]),
State::from(vec![(32..=32,3)]),
State::from(vec![4]),
State::from(vec![5,8]),
State::from(vec![6]),
State::from(vec![(32..=32,7)]),
State::from(vec![8]),
State::from(vec![5,9]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![0, 32, 33].as_slice()),
}
}
/// NFA that accepts one letter a..=z or many spaces ' '.
pub fn letter_and_spaces() -> NFA {
NFA {
states:vec![
State::from(vec![1,3]),
State::from(vec![(97..=122,2)]),
State::from(vec![11]).named("group_0_rule_0"),
State::from(vec![4]),
State::from(vec![(32..=32,5)]),
State::from(vec![6]),
State::from(vec![7,10]),
State::from(vec![8]),
State::from(vec![(32..=32,9)]),
State::from(vec![10]),
State::from(vec![7,11]).named("group_0_rule_1"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![32, 33, 97, 123].as_slice()),
}
}
#[test]
fn test_to_dfa_newline() {
assert_eq!(DFA::from(&newline()),dfa::tests::newline());
}
#[test]
fn test_to_dfa_letter() {
assert_eq!(DFA::from(&letter()),dfa::tests::letter());
}
#[test]
fn test_to_dfa_spaces() {
assert_eq!(DFA::from(&spaces()),dfa::tests::spaces());
}
#[test]
fn test_to_dfa_letter_and_spaces() {
assert_eq!(DFA::from(&letter_and_spaces()),dfa::tests::letter_and_spaces());
}
#[bench]
fn bench_to_dfa_newline(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&newline()))
}
#[bench]
fn bench_to_dfa_letter(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&letter()))
}
#[bench]
fn bench_to_dfa_spaces(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&spaces()))
}
#[bench]
fn bench_to_dfa_letter_and_spaces(bencher:&mut Bencher) {
bencher.iter(|| DFA::from(&letter_and_spaces()))
}
} | }
}
let alphabet_segmentation = nfa.alphabet_segmentation.clone();
let links = dfa_mat; | random_line_split |
compressed_arith.go | package sparse
import (
"github.com/james-bowman/sparse/blas"
"gonum.org/v1/gonum/mat"
)
// MulMatRawVec computes the matrix vector product between lhs and rhs and stores
// the result in out
func MulMatRawVec(lhs *CSR, rhs []float64, out []float64) {
m, n := lhs.Dims()
if len(rhs) != n {
panic(mat.ErrShape)
}
if len(out) != m {
panic(mat.ErrShape)
}
blas.Dusmv(false, 1, lhs.RawMatrix(), rhs, 1, out, 1)
}
// temporaryWorkspace returns a new CSR matrix w with the size of r x c with
// initial capacity allocated for nnz non-zero elements and
// returns a callback to defer which performs cleanup at the return of the call.
// This should be used when a method receiver is the same pointer as an input argument.
func (c *CSR) temporaryWorkspace(row, col, nnz int, clear bool) (w *CSR, restore func()) {
w = getWorkspace(row, col, nnz, clear)
return w, func() {
c.cloneCSR(w)
putWorkspace(w)
}
}
// spalloc ensures appropriate storage is allocated for the receiver sparse matrix
// ensuring it is row * col dimensions and checking for any overlap or aliasing
// between operands a or b with c in which case a temporary isolated workspace is
// allocated and the returned value isTemp is true with restore representing a
// function to clean up and restore the workspace once finished.
func (c *CSR) spalloc(a mat.Matrix, b mat.Matrix) (m *CSR, isTemp bool, restore func()) {
var nnz int
m = c
row, _ := a.Dims()
_, col := b.Dims()
lSp, lIsSp := a.(Sparser)
rSp, rIsSp := b.(Sparser)
if lIsSp && rIsSp {
nnz = lSp.NNZ() + rSp.NNZ()
} else {
// assume 10% of elements will be non-zero
nnz = row * col / 10
}
if c.checkOverlap(a) || c.checkOverlap(b) {
if !c.IsZero() && (row != c.matrix.I || col != c.matrix.J) {
panic(mat.ErrShape)
}
m, restore = c.temporaryWorkspace(row, col, nnz, true)
isTemp = true
} else {
c.reuseAs(row, col, nnz, true)
}
return
}
// Mul takes the matrix product of the supplied matrices a and b and stores the result
// in the receiver. Some specific optimisations are available for operands of certain
// sparse formats e.g. CSR * CSR uses Gustavson Algorithm (ACM 1978) for fast
// sparse matrix multiplication.
// If the number of columns does not equal the number of rows in b, Mul will panic.
func (c *CSR) Mul(a, b mat.Matrix) {
ar, ac := a.Dims()
br, bc := b.Dims()
if ac != br {
panic(mat.ErrShape)
}
if m, temp, restore := c.spalloc(a, b); temp {
defer restore()
c = m
}
lhs, isLCsr := a.(*CSR)
rhs, isRCsr := b.(*CSR)
if isLCsr && isRCsr {
// handle CSR * CSR
c.mulCSRCSR(lhs, rhs)
return
}
if dia, ok := a.(*DIA); ok {
if isRCsr {
// handle DIA * CSR
c.mulDIACSR(dia, rhs, false)
return
}
// handle DIA * mat.Matrix
c.mulDIAMat(dia, b, false)
return
}
if dia, ok := b.(*DIA); ok {
if isLCsr {
// handle CSR * DIA
c.mulDIACSR(dia, lhs, true)
return
}
// handle mat.Matrix * DIA
c.mulDIAMat(dia, a, true)
return
}
// TODO: handle cases where both matrices are DIA
srcA, isLSparse := a.(TypeConverter)
srcB, isRSparse := b.(TypeConverter)
if isLSparse {
if isRSparse {
// handle Sparser * Sparser
c.mulCSRCSR(srcA.ToCSR(), srcB.ToCSR())
return
}
// handle Sparser * mat.Matrix
c.mulCSRMat(srcA.ToCSR(), b)
return
}
if isRSparse {
// handle mat.Matrix * Sparser
w := getWorkspace(bc, ar, bc*ar/10, true)
bt := srcB.ToCSC().T().(*CSR)
w.mulCSRMat(bt, a.T())
c.Clone(w.T())
putWorkspace(w)
return
}
// handle mat.Matrix * mat.Matrix
row := getFloats(ac, false)
defer putFloats(row)
var v float64
for i := 0; i < ar; i++ {
for ci := range row {
row[ci] = a.At(i, ci)
}
for j := 0; j < bc; j++ {
v = 0
for ci, e := range row {
if e != 0 {
v += e * b.At(ci, j)
}
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulCSRCSR handles CSR = CSR * CSR using Gustavson Algorithm (ACM 1978)
func (c *CSR) mulCSRCSR(lhs *CSR, rhs *CSR) {
ar, _ := lhs.Dims()
_, bc := rhs.Dims()
spa := NewSPA(bc)
// rows in C
for i := 0; i < ar; i++ {
// each element t in row i of A
for t := lhs.matrix.Indptr[i]; t < lhs.matrix.Indptr[i+1]; t++ {
begin := rhs.matrix.Indptr[lhs.matrix.Ind[t]]
end := rhs.matrix.Indptr[lhs.matrix.Ind[t]+1]
spa.Scatter(rhs.matrix.Data[begin:end], rhs.matrix.Ind[begin:end], lhs.matrix.Data[t], &c.matrix.Ind)
}
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulCSRMat handles CSR = CSR * mat.Matrix
func (c *CSR) mulCSRMat(lhs *CSR, b mat.Matrix) {
ar, _ := lhs.Dims()
_, bc := b.Dims()
// handle case where matrix A is CSR (matrix B can be any implementation of mat.Matrix)
for i := 0; i < ar; i++ {
for j := 0; j < bc; j++ {
var v float64
// TODO Consider converting all Sparser args to CSR
for k := lhs.matrix.Indptr[i]; k < lhs.matrix.Indptr[i+1]; k++ {
v += lhs.matrix.Data[k] * b.At(lhs.matrix.Ind[k], j)
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulDIACSR handles CSR = DIA * CSR (or CSR = CSR * DIA if trans == true)
func (c *CSR) mulDIACSR(dia *DIA, other *CSR, trans bool) {
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if other.matrix.Ind[k] < len(diagonal) {
v = other.matrix.Data[k] * diagonal[other.matrix.Ind[k]]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if i < len(diagonal) {
v = other.matrix.Data[k] * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// mulDIAMat handles CSR = DIA * mat.Matrix (or CSR = mat.Matrix * DIA if trans == true)
func (c *CSR) mulDIAMat(dia *DIA, other mat.Matrix, trans bool) {
_, cols := other.Dims()
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if k < len(diagonal) {
v = other.At(i, k) * diagonal[k]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if i < len(diagonal) {
v = other.At(i, k) * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// Sub subtracts matrix b from a and stores the result in the receiver.
// If matrices a and b are not the same shape then the method will panic.
func (c *CSR) Sub(a, b mat.Matrix) {
c.addScaled(a, b, 1, -1)
}
// Add adds matrices a and b together and stores the result in the receiver.
// If matrices a and b are not the same shape then the method will panic.
func (c *CSR) Add(a, b mat.Matrix) {
c.addScaled(a, b, 1, 1)
}
// addScaled adds matrices a and b scaling them by a and b respectively before hand.
func (c *CSR) addScaled(a mat.Matrix, b mat.Matrix, alpha float64, beta float64) {
ar, ac := a.Dims()
br, bc := b.Dims()
if ar != br || ac != bc {
panic(mat.ErrShape)
}
if m, temp, restore := c.spalloc(a, b); temp {
defer restore()
c = m
}
lCsr, lIsCsr := a.(*CSR)
rCsr, rIsCsr := b.(*CSR)
// TODO optimisation for DIA matrices
if lIsCsr && rIsCsr {
c.addCSRCSR(lCsr, rCsr, alpha, beta)
return
}
if lIsCsr {
c.addCSR(lCsr, b, alpha, beta)
return
}
if rIsCsr {
c.addCSR(rCsr, a, beta, alpha)
return
}
// dumb addition with no sparcity optimisations/savings
for i := 0; i < ar; i++ {
for j := 0; j < ac; j++ {
v := alpha*a.At(i, j) + beta*b.At(i, j)
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// addCSR adds a CSR matrix to any implementation of mat.Matrix and stores the
// result in the receiver.
func (c *CSR) addCSR(csr *CSR, other mat.Matrix, alpha float64, beta float64) {
ar, ac := csr.Dims()
spa := NewSPA(ac)
a := csr.RawMatrix()
if dense, isDense := other.(mat.RawMatrixer); isDense {
for i := 0; i < ar; i++ {
begin := csr.matrix.Indptr[i]
end := csr.matrix.Indptr[i+1]
rawOther := dense.RawMatrix()
r := rawOther.Data[i*rawOther.Stride : i*rawOther.Stride+rawOther.Cols]
spa.AccumulateDense(r, beta, &c.matrix.Ind)
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < ar; i++ {
begin := csr.matrix.Indptr[i]
end := csr.matrix.Indptr[i+1]
for j := 0; j < ac; j++ {
v := other.At(i, j)
if v != 0 {
spa.ScatterValue(v, j, beta, &c.matrix.Ind)
}
}
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// addCSRCSR adds 2 CSR matrices together storing the result in the receiver.
// Matrices a and b are scaled by alpha and beta respectively before addition.
// This method is specially optimised to take advantage of the sparsity patterns
// of the 2 CSR matrices.
func (c *CSR) addCSRCSR(lhs *CSR, rhs *CSR, alpha float64, beta float64) {
ar, ac := lhs.Dims()
a := lhs.RawMatrix()
b := rhs.RawMatrix()
spa := NewSPA(ac)
var begin, end int
for i := 0; i < ar; i++ {
begin, end = a.Indptr[i], a.Indptr[i+1]
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
begin, end = b.Indptr[i], b.Indptr[i+1]
spa.Scatter(b.Data[begin:end], b.Ind[begin:end], beta, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// SPA is a SParse Accumulator used to construct the results of sparse
// arithmetic operations in linear time.
type SPA struct {
// w contains flags for indices containing non-zero values
w []int
// x contains all the values in dense representation (including zero values)
y []float64
// nnz is the Number of Non-Zero elements
nnz int
// generation is used to compare values of w to see if they have been set
// in the current row (generation). This avoids needing to reset all values
// during the GatherAndZero operation at the end of
// construction for each row/column vector.
generation int
}
// NewSPA creates a new SParse Accumulator of length n. If accumulating
// rows for a CSR matrix then n should be equal to the number of columns
// in the resulting matrix.
func | (n int) *SPA {
return &SPA{
w: make([]int, n),
y: make([]float64, n),
}
}
// ScatterVec accumulates the sparse vector x by multiplying the elements
// by alpha and adding them to the corresponding elements in the SPA
// (SPA += alpha * x)
func (s *SPA) ScatterVec(x *Vector, alpha float64, ind *[]int) {
s.Scatter(x.data, x.ind, alpha, ind)
}
// Scatter accumulates the sparse vector x by multiplying the elements by
// alpha and adding them to the corresponding elements in the SPA (SPA += alpha * x)
func (s *SPA) Scatter(x []float64, indx []int, alpha float64, ind *[]int) {
for i, index := range indx {
s.ScatterValue(x[i], index, alpha, ind)
}
}
// ScatterValue accumulates a single value by multiplying the value by alpha
// and adding it to the corresponding element in the SPA (SPA += alpha * x)
func (s *SPA) ScatterValue(val float64, index int, alpha float64, ind *[]int) {
if s.w[index] < s.generation+1 {
s.w[index] = s.generation + 1
*ind = append(*ind, index)
s.y[index] = alpha * val
} else {
s.y[index] += alpha * val
}
}
// AccumulateDense accumulates the dense vector x by multiplying the non-zero elements
// by alpha and adding them to the corresponding elements in the SPA (SPA += alpha * x)
// This is the dense version of the Scatter method for sparse vectors.
func (s *SPA) AccumulateDense(x []float64, alpha float64, ind *[]int) {
for i, val := range x {
if val != 0 {
s.ScatterValue(val, i, alpha, ind)
}
}
}
// Gather gathers the non-zero values from the SPA and appends them to
// end of the supplied sparse vector.
func (s SPA) Gather(data *[]float64, ind *[]int) {
for _, index := range (*ind)[s.nnz:] {
*data = append(*data, s.y[index])
//y[index] = 0
}
}
// GatherAndZero gathers the non-zero values from the SPA and appends them
// to the end of the supplied sparse vector. The SPA is also zeroed
// ready to start accumulating the next row/column vector.
func (s *SPA) GatherAndZero(data *[]float64, ind *[]int) {
s.Gather(data, ind)
s.nnz = len(*ind)
s.generation++
}
| NewSPA | identifier_name |
compressed_arith.go | package sparse
import (
"github.com/james-bowman/sparse/blas"
"gonum.org/v1/gonum/mat"
)
// MulMatRawVec computes the matrix vector product between lhs and rhs and stores
// the result in out
func MulMatRawVec(lhs *CSR, rhs []float64, out []float64) {
m, n := lhs.Dims()
if len(rhs) != n {
panic(mat.ErrShape)
}
if len(out) != m {
panic(mat.ErrShape)
}
blas.Dusmv(false, 1, lhs.RawMatrix(), rhs, 1, out, 1)
}
// temporaryWorkspace returns a new CSR matrix w with the size of r x c with
// initial capacity allocated for nnz non-zero elements and
// returns a callback to defer which performs cleanup at the return of the call.
// This should be used when a method receiver is the same pointer as an input argument.
func (c *CSR) temporaryWorkspace(row, col, nnz int, clear bool) (w *CSR, restore func()) {
w = getWorkspace(row, col, nnz, clear)
return w, func() {
c.cloneCSR(w)
putWorkspace(w)
}
}
// spalloc ensures appropriate storage is allocated for the receiver sparse matrix
// ensuring it is row * col dimensions and checking for any overlap or aliasing
// between operands a or b with c in which case a temporary isolated workspace is
// allocated and the returned value isTemp is true with restore representing a
// function to clean up and restore the workspace once finished.
func (c *CSR) spalloc(a mat.Matrix, b mat.Matrix) (m *CSR, isTemp bool, restore func()) {
var nnz int
m = c
row, _ := a.Dims()
_, col := b.Dims()
lSp, lIsSp := a.(Sparser)
rSp, rIsSp := b.(Sparser)
if lIsSp && rIsSp {
nnz = lSp.NNZ() + rSp.NNZ()
} else {
// assume 10% of elements will be non-zero
nnz = row * col / 10
}
if c.checkOverlap(a) || c.checkOverlap(b) {
if !c.IsZero() && (row != c.matrix.I || col != c.matrix.J) {
panic(mat.ErrShape)
}
m, restore = c.temporaryWorkspace(row, col, nnz, true)
isTemp = true
} else {
c.reuseAs(row, col, nnz, true)
}
return
}
// Mul takes the matrix product of the supplied matrices a and b and stores the result
// in the receiver. Some specific optimisations are available for operands of certain
// sparse formats e.g. CSR * CSR uses Gustavson Algorithm (ACM 1978) for fast
// sparse matrix multiplication.
// If the number of columns does not equal the number of rows in b, Mul will panic.
func (c *CSR) Mul(a, b mat.Matrix) {
ar, ac := a.Dims()
br, bc := b.Dims()
if ac != br {
panic(mat.ErrShape)
}
if m, temp, restore := c.spalloc(a, b); temp |
lhs, isLCsr := a.(*CSR)
rhs, isRCsr := b.(*CSR)
if isLCsr && isRCsr {
// handle CSR * CSR
c.mulCSRCSR(lhs, rhs)
return
}
if dia, ok := a.(*DIA); ok {
if isRCsr {
// handle DIA * CSR
c.mulDIACSR(dia, rhs, false)
return
}
// handle DIA * mat.Matrix
c.mulDIAMat(dia, b, false)
return
}
if dia, ok := b.(*DIA); ok {
if isLCsr {
// handle CSR * DIA
c.mulDIACSR(dia, lhs, true)
return
}
// handle mat.Matrix * DIA
c.mulDIAMat(dia, a, true)
return
}
// TODO: handle cases where both matrices are DIA
srcA, isLSparse := a.(TypeConverter)
srcB, isRSparse := b.(TypeConverter)
if isLSparse {
if isRSparse {
// handle Sparser * Sparser
c.mulCSRCSR(srcA.ToCSR(), srcB.ToCSR())
return
}
// handle Sparser * mat.Matrix
c.mulCSRMat(srcA.ToCSR(), b)
return
}
if isRSparse {
// handle mat.Matrix * Sparser
w := getWorkspace(bc, ar, bc*ar/10, true)
bt := srcB.ToCSC().T().(*CSR)
w.mulCSRMat(bt, a.T())
c.Clone(w.T())
putWorkspace(w)
return
}
// handle mat.Matrix * mat.Matrix
row := getFloats(ac, false)
defer putFloats(row)
var v float64
for i := 0; i < ar; i++ {
for ci := range row {
row[ci] = a.At(i, ci)
}
for j := 0; j < bc; j++ {
v = 0
for ci, e := range row {
if e != 0 {
v += e * b.At(ci, j)
}
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulCSRCSR handles CSR = CSR * CSR using Gustavson Algorithm (ACM 1978)
func (c *CSR) mulCSRCSR(lhs *CSR, rhs *CSR) {
ar, _ := lhs.Dims()
_, bc := rhs.Dims()
spa := NewSPA(bc)
// rows in C
for i := 0; i < ar; i++ {
// each element t in row i of A
for t := lhs.matrix.Indptr[i]; t < lhs.matrix.Indptr[i+1]; t++ {
begin := rhs.matrix.Indptr[lhs.matrix.Ind[t]]
end := rhs.matrix.Indptr[lhs.matrix.Ind[t]+1]
spa.Scatter(rhs.matrix.Data[begin:end], rhs.matrix.Ind[begin:end], lhs.matrix.Data[t], &c.matrix.Ind)
}
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulCSRMat handles CSR = CSR * mat.Matrix
func (c *CSR) mulCSRMat(lhs *CSR, b mat.Matrix) {
ar, _ := lhs.Dims()
_, bc := b.Dims()
// handle case where matrix A is CSR (matrix B can be any implementation of mat.Matrix)
for i := 0; i < ar; i++ {
for j := 0; j < bc; j++ {
var v float64
// TODO Consider converting all Sparser args to CSR
for k := lhs.matrix.Indptr[i]; k < lhs.matrix.Indptr[i+1]; k++ {
v += lhs.matrix.Data[k] * b.At(lhs.matrix.Ind[k], j)
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulDIACSR handles CSR = DIA * CSR (or CSR = CSR * DIA if trans == true)
func (c *CSR) mulDIACSR(dia *DIA, other *CSR, trans bool) {
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if other.matrix.Ind[k] < len(diagonal) {
v = other.matrix.Data[k] * diagonal[other.matrix.Ind[k]]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if i < len(diagonal) {
v = other.matrix.Data[k] * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// mulDIAMat handles CSR = DIA * mat.Matrix (or CSR = mat.Matrix * DIA if trans == true)
func (c *CSR) mulDIAMat(dia *DIA, other mat.Matrix, trans bool) {
_, cols := other.Dims()
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if k < len(diagonal) {
v = other.At(i, k) * diagonal[k]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if i < len(diagonal) {
v = other.At(i, k) * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// Sub subtracts matrix b from a and stores the result in the receiver.
// If matrices a and b are not the same shape then the method will panic.
func (c *CSR) Sub(a, b mat.Matrix) {
c.addScaled(a, b, 1, -1)
}
// Add adds matrices a and b together and stores the result in the receiver.
// If matrices a and b are not the same shape then the method will panic.
func (c *CSR) Add(a, b mat.Matrix) {
c.addScaled(a, b, 1, 1)
}
// addScaled adds matrices a and b scaling them by a and b respectively before hand.
func (c *CSR) addScaled(a mat.Matrix, b mat.Matrix, alpha float64, beta float64) {
ar, ac := a.Dims()
br, bc := b.Dims()
if ar != br || ac != bc {
panic(mat.ErrShape)
}
if m, temp, restore := c.spalloc(a, b); temp {
defer restore()
c = m
}
lCsr, lIsCsr := a.(*CSR)
rCsr, rIsCsr := b.(*CSR)
// TODO optimisation for DIA matrices
if lIsCsr && rIsCsr {
c.addCSRCSR(lCsr, rCsr, alpha, beta)
return
}
if lIsCsr {
c.addCSR(lCsr, b, alpha, beta)
return
}
if rIsCsr {
c.addCSR(rCsr, a, beta, alpha)
return
}
// dumb addition with no sparcity optimisations/savings
for i := 0; i < ar; i++ {
for j := 0; j < ac; j++ {
v := alpha*a.At(i, j) + beta*b.At(i, j)
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// addCSR adds a CSR matrix to any implementation of mat.Matrix and stores the
// result in the receiver.
func (c *CSR) addCSR(csr *CSR, other mat.Matrix, alpha float64, beta float64) {
ar, ac := csr.Dims()
spa := NewSPA(ac)
a := csr.RawMatrix()
if dense, isDense := other.(mat.RawMatrixer); isDense {
for i := 0; i < ar; i++ {
begin := csr.matrix.Indptr[i]
end := csr.matrix.Indptr[i+1]
rawOther := dense.RawMatrix()
r := rawOther.Data[i*rawOther.Stride : i*rawOther.Stride+rawOther.Cols]
spa.AccumulateDense(r, beta, &c.matrix.Ind)
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < ar; i++ {
begin := csr.matrix.Indptr[i]
end := csr.matrix.Indptr[i+1]
for j := 0; j < ac; j++ {
v := other.At(i, j)
if v != 0 {
spa.ScatterValue(v, j, beta, &c.matrix.Ind)
}
}
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// addCSRCSR adds 2 CSR matrices together storing the result in the receiver.
// Matrices a and b are scaled by alpha and beta respectively before addition.
// This method is specially optimised to take advantage of the sparsity patterns
// of the 2 CSR matrices.
func (c *CSR) addCSRCSR(lhs *CSR, rhs *CSR, alpha float64, beta float64) {
ar, ac := lhs.Dims()
a := lhs.RawMatrix()
b := rhs.RawMatrix()
spa := NewSPA(ac)
var begin, end int
for i := 0; i < ar; i++ {
begin, end = a.Indptr[i], a.Indptr[i+1]
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
begin, end = b.Indptr[i], b.Indptr[i+1]
spa.Scatter(b.Data[begin:end], b.Ind[begin:end], beta, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// SPA is a SParse Accumulator used to construct the results of sparse
// arithmetic operations in linear time.
type SPA struct {
// w contains flags for indices containing non-zero values
w []int
// x contains all the values in dense representation (including zero values)
y []float64
// nnz is the Number of Non-Zero elements
nnz int
// generation is used to compare values of w to see if they have been set
// in the current row (generation). This avoids needing to reset all values
// during the GatherAndZero operation at the end of
// construction for each row/column vector.
generation int
}
// NewSPA creates a new SParse Accumulator of length n. If accumulating
// rows for a CSR matrix then n should be equal to the number of columns
// in the resulting matrix.
func NewSPA(n int) *SPA {
return &SPA{
w: make([]int, n),
y: make([]float64, n),
}
}
// ScatterVec accumulates the sparse vector x by multiplying the elements
// by alpha and adding them to the corresponding elements in the SPA
// (SPA += alpha * x)
func (s *SPA) ScatterVec(x *Vector, alpha float64, ind *[]int) {
s.Scatter(x.data, x.ind, alpha, ind)
}
// Scatter accumulates the sparse vector x by multiplying the elements by
// alpha and adding them to the corresponding elements in the SPA (SPA += alpha * x)
func (s *SPA) Scatter(x []float64, indx []int, alpha float64, ind *[]int) {
for i, index := range indx {
s.ScatterValue(x[i], index, alpha, ind)
}
}
// ScatterValue accumulates a single value by multiplying the value by alpha
// and adding it to the corresponding element in the SPA (SPA += alpha * x)
func (s *SPA) ScatterValue(val float64, index int, alpha float64, ind *[]int) {
if s.w[index] < s.generation+1 {
s.w[index] = s.generation + 1
*ind = append(*ind, index)
s.y[index] = alpha * val
} else {
s.y[index] += alpha * val
}
}
// AccumulateDense accumulates the dense vector x by multiplying the non-zero elements
// by alpha and adding them to the corresponding elements in the SPA (SPA += alpha * x)
// This is the dense version of the Scatter method for sparse vectors.
func (s *SPA) AccumulateDense(x []float64, alpha float64, ind *[]int) {
for i, val := range x {
if val != 0 {
s.ScatterValue(val, i, alpha, ind)
}
}
}
// Gather gathers the non-zero values from the SPA and appends them to
// end of the supplied sparse vector.
func (s SPA) Gather(data *[]float64, ind *[]int) {
for _, index := range (*ind)[s.nnz:] {
*data = append(*data, s.y[index])
//y[index] = 0
}
}
// GatherAndZero gathers the non-zero values from the SPA and appends them
// to the end of the supplied sparse vector. The SPA is also zeroed
// ready to start accumulating the next row/column vector.
func (s *SPA) GatherAndZero(data *[]float64, ind *[]int) {
s.Gather(data, ind)
s.nnz = len(*ind)
s.generation++
}
| {
defer restore()
c = m
} | conditional_block |
compressed_arith.go | package sparse
import (
"github.com/james-bowman/sparse/blas"
"gonum.org/v1/gonum/mat"
)
// MulMatRawVec computes the matrix vector product between lhs and rhs and stores
// the result in out
func MulMatRawVec(lhs *CSR, rhs []float64, out []float64) {
m, n := lhs.Dims()
if len(rhs) != n {
panic(mat.ErrShape)
}
if len(out) != m {
panic(mat.ErrShape)
}
blas.Dusmv(false, 1, lhs.RawMatrix(), rhs, 1, out, 1)
}
// temporaryWorkspace returns a new CSR matrix w with the size of r x c with
// initial capacity allocated for nnz non-zero elements and
// returns a callback to defer which performs cleanup at the return of the call.
// This should be used when a method receiver is the same pointer as an input argument.
func (c *CSR) temporaryWorkspace(row, col, nnz int, clear bool) (w *CSR, restore func()) |
// spalloc ensures appropriate storage is allocated for the receiver sparse matrix
// ensuring it is row * col dimensions and checking for any overlap or aliasing
// between operands a or b with c in which case a temporary isolated workspace is
// allocated and the returned value isTemp is true with restore representing a
// function to clean up and restore the workspace once finished.
func (c *CSR) spalloc(a mat.Matrix, b mat.Matrix) (m *CSR, isTemp bool, restore func()) {
var nnz int
m = c
row, _ := a.Dims()
_, col := b.Dims()
lSp, lIsSp := a.(Sparser)
rSp, rIsSp := b.(Sparser)
if lIsSp && rIsSp {
nnz = lSp.NNZ() + rSp.NNZ()
} else {
// assume 10% of elements will be non-zero
nnz = row * col / 10
}
if c.checkOverlap(a) || c.checkOverlap(b) {
if !c.IsZero() && (row != c.matrix.I || col != c.matrix.J) {
panic(mat.ErrShape)
}
m, restore = c.temporaryWorkspace(row, col, nnz, true)
isTemp = true
} else {
c.reuseAs(row, col, nnz, true)
}
return
}
// Mul takes the matrix product of the supplied matrices a and b and stores the result
// in the receiver. Some specific optimisations are available for operands of certain
// sparse formats e.g. CSR * CSR uses Gustavson Algorithm (ACM 1978) for fast
// sparse matrix multiplication.
// If the number of columns does not equal the number of rows in b, Mul will panic.
func (c *CSR) Mul(a, b mat.Matrix) {
ar, ac := a.Dims()
br, bc := b.Dims()
if ac != br {
panic(mat.ErrShape)
}
if m, temp, restore := c.spalloc(a, b); temp {
defer restore()
c = m
}
lhs, isLCsr := a.(*CSR)
rhs, isRCsr := b.(*CSR)
if isLCsr && isRCsr {
// handle CSR * CSR
c.mulCSRCSR(lhs, rhs)
return
}
if dia, ok := a.(*DIA); ok {
if isRCsr {
// handle DIA * CSR
c.mulDIACSR(dia, rhs, false)
return
}
// handle DIA * mat.Matrix
c.mulDIAMat(dia, b, false)
return
}
if dia, ok := b.(*DIA); ok {
if isLCsr {
// handle CSR * DIA
c.mulDIACSR(dia, lhs, true)
return
}
// handle mat.Matrix * DIA
c.mulDIAMat(dia, a, true)
return
}
// TODO: handle cases where both matrices are DIA
srcA, isLSparse := a.(TypeConverter)
srcB, isRSparse := b.(TypeConverter)
if isLSparse {
if isRSparse {
// handle Sparser * Sparser
c.mulCSRCSR(srcA.ToCSR(), srcB.ToCSR())
return
}
// handle Sparser * mat.Matrix
c.mulCSRMat(srcA.ToCSR(), b)
return
}
if isRSparse {
// handle mat.Matrix * Sparser
w := getWorkspace(bc, ar, bc*ar/10, true)
bt := srcB.ToCSC().T().(*CSR)
w.mulCSRMat(bt, a.T())
c.Clone(w.T())
putWorkspace(w)
return
}
// handle mat.Matrix * mat.Matrix
row := getFloats(ac, false)
defer putFloats(row)
var v float64
for i := 0; i < ar; i++ {
for ci := range row {
row[ci] = a.At(i, ci)
}
for j := 0; j < bc; j++ {
v = 0
for ci, e := range row {
if e != 0 {
v += e * b.At(ci, j)
}
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulCSRCSR handles CSR = CSR * CSR using Gustavson Algorithm (ACM 1978)
func (c *CSR) mulCSRCSR(lhs *CSR, rhs *CSR) {
ar, _ := lhs.Dims()
_, bc := rhs.Dims()
spa := NewSPA(bc)
// rows in C
for i := 0; i < ar; i++ {
// each element t in row i of A
for t := lhs.matrix.Indptr[i]; t < lhs.matrix.Indptr[i+1]; t++ {
begin := rhs.matrix.Indptr[lhs.matrix.Ind[t]]
end := rhs.matrix.Indptr[lhs.matrix.Ind[t]+1]
spa.Scatter(rhs.matrix.Data[begin:end], rhs.matrix.Ind[begin:end], lhs.matrix.Data[t], &c.matrix.Ind)
}
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulCSRMat handles CSR = CSR * mat.Matrix
func (c *CSR) mulCSRMat(lhs *CSR, b mat.Matrix) {
ar, _ := lhs.Dims()
_, bc := b.Dims()
// handle case where matrix A is CSR (matrix B can be any implementation of mat.Matrix)
for i := 0; i < ar; i++ {
for j := 0; j < bc; j++ {
var v float64
// TODO Consider converting all Sparser args to CSR
for k := lhs.matrix.Indptr[i]; k < lhs.matrix.Indptr[i+1]; k++ {
v += lhs.matrix.Data[k] * b.At(lhs.matrix.Ind[k], j)
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulDIACSR handles CSR = DIA * CSR (or CSR = CSR * DIA if trans == true)
func (c *CSR) mulDIACSR(dia *DIA, other *CSR, trans bool) {
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if other.matrix.Ind[k] < len(diagonal) {
v = other.matrix.Data[k] * diagonal[other.matrix.Ind[k]]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if i < len(diagonal) {
v = other.matrix.Data[k] * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// mulDIAMat handles CSR = DIA * mat.Matrix (or CSR = mat.Matrix * DIA if trans == true)
func (c *CSR) mulDIAMat(dia *DIA, other mat.Matrix, trans bool) {
_, cols := other.Dims()
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if k < len(diagonal) {
v = other.At(i, k) * diagonal[k]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if i < len(diagonal) {
v = other.At(i, k) * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// Sub subtracts matrix b from a and stores the result in the receiver.
// If matrices a and b are not the same shape then the method will panic.
func (c *CSR) Sub(a, b mat.Matrix) {
c.addScaled(a, b, 1, -1)
}
// Add adds matrices a and b together and stores the result in the receiver.
// If matrices a and b are not the same shape then the method will panic.
func (c *CSR) Add(a, b mat.Matrix) {
c.addScaled(a, b, 1, 1)
}
// addScaled adds matrices a and b scaling them by a and b respectively before hand.
func (c *CSR) addScaled(a mat.Matrix, b mat.Matrix, alpha float64, beta float64) {
ar, ac := a.Dims()
br, bc := b.Dims()
if ar != br || ac != bc {
panic(mat.ErrShape)
}
if m, temp, restore := c.spalloc(a, b); temp {
defer restore()
c = m
}
lCsr, lIsCsr := a.(*CSR)
rCsr, rIsCsr := b.(*CSR)
// TODO optimisation for DIA matrices
if lIsCsr && rIsCsr {
c.addCSRCSR(lCsr, rCsr, alpha, beta)
return
}
if lIsCsr {
c.addCSR(lCsr, b, alpha, beta)
return
}
if rIsCsr {
c.addCSR(rCsr, a, beta, alpha)
return
}
// dumb addition with no sparcity optimisations/savings
for i := 0; i < ar; i++ {
for j := 0; j < ac; j++ {
v := alpha*a.At(i, j) + beta*b.At(i, j)
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// addCSR adds a CSR matrix to any implementation of mat.Matrix and stores the
// result in the receiver.
func (c *CSR) addCSR(csr *CSR, other mat.Matrix, alpha float64, beta float64) {
ar, ac := csr.Dims()
spa := NewSPA(ac)
a := csr.RawMatrix()
if dense, isDense := other.(mat.RawMatrixer); isDense {
for i := 0; i < ar; i++ {
begin := csr.matrix.Indptr[i]
end := csr.matrix.Indptr[i+1]
rawOther := dense.RawMatrix()
r := rawOther.Data[i*rawOther.Stride : i*rawOther.Stride+rawOther.Cols]
spa.AccumulateDense(r, beta, &c.matrix.Ind)
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < ar; i++ {
begin := csr.matrix.Indptr[i]
end := csr.matrix.Indptr[i+1]
for j := 0; j < ac; j++ {
v := other.At(i, j)
if v != 0 {
spa.ScatterValue(v, j, beta, &c.matrix.Ind)
}
}
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// addCSRCSR adds 2 CSR matrices together storing the result in the receiver.
// Matrices a and b are scaled by alpha and beta respectively before addition.
// This method is specially optimised to take advantage of the sparsity patterns
// of the 2 CSR matrices.
func (c *CSR) addCSRCSR(lhs *CSR, rhs *CSR, alpha float64, beta float64) {
ar, ac := lhs.Dims()
a := lhs.RawMatrix()
b := rhs.RawMatrix()
spa := NewSPA(ac)
var begin, end int
for i := 0; i < ar; i++ {
begin, end = a.Indptr[i], a.Indptr[i+1]
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
begin, end = b.Indptr[i], b.Indptr[i+1]
spa.Scatter(b.Data[begin:end], b.Ind[begin:end], beta, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// SPA is a SParse Accumulator used to construct the results of sparse
// arithmetic operations in linear time.
type SPA struct {
// w contains flags for indices containing non-zero values
w []int
// x contains all the values in dense representation (including zero values)
y []float64
// nnz is the Number of Non-Zero elements
nnz int
// generation is used to compare values of w to see if they have been set
// in the current row (generation). This avoids needing to reset all values
// during the GatherAndZero operation at the end of
// construction for each row/column vector.
generation int
}
// NewSPA creates a new SParse Accumulator of length n. If accumulating
// rows for a CSR matrix then n should be equal to the number of columns
// in the resulting matrix.
func NewSPA(n int) *SPA {
return &SPA{
w: make([]int, n),
y: make([]float64, n),
}
}
// ScatterVec accumulates the sparse vector x by multiplying the elements
// by alpha and adding them to the corresponding elements in the SPA
// (SPA += alpha * x)
func (s *SPA) ScatterVec(x *Vector, alpha float64, ind *[]int) {
s.Scatter(x.data, x.ind, alpha, ind)
}
// Scatter accumulates the sparse vector x by multiplying the elements by
// alpha and adding them to the corresponding elements in the SPA (SPA += alpha * x)
func (s *SPA) Scatter(x []float64, indx []int, alpha float64, ind *[]int) {
for i, index := range indx {
s.ScatterValue(x[i], index, alpha, ind)
}
}
// ScatterValue accumulates a single value by multiplying the value by alpha
// and adding it to the corresponding element in the SPA (SPA += alpha * x)
func (s *SPA) ScatterValue(val float64, index int, alpha float64, ind *[]int) {
if s.w[index] < s.generation+1 {
s.w[index] = s.generation + 1
*ind = append(*ind, index)
s.y[index] = alpha * val
} else {
s.y[index] += alpha * val
}
}
// AccumulateDense accumulates the dense vector x by multiplying the non-zero elements
// by alpha and adding them to the corresponding elements in the SPA (SPA += alpha * x)
// This is the dense version of the Scatter method for sparse vectors.
func (s *SPA) AccumulateDense(x []float64, alpha float64, ind *[]int) {
for i, val := range x {
if val != 0 {
s.ScatterValue(val, i, alpha, ind)
}
}
}
// Gather gathers the non-zero values from the SPA and appends them to
// end of the supplied sparse vector.
func (s SPA) Gather(data *[]float64, ind *[]int) {
for _, index := range (*ind)[s.nnz:] {
*data = append(*data, s.y[index])
//y[index] = 0
}
}
// GatherAndZero gathers the non-zero values from the SPA and appends them
// to the end of the supplied sparse vector. The SPA is also zeroed
// ready to start accumulating the next row/column vector.
func (s *SPA) GatherAndZero(data *[]float64, ind *[]int) {
s.Gather(data, ind)
s.nnz = len(*ind)
s.generation++
}
| {
w = getWorkspace(row, col, nnz, clear)
return w, func() {
c.cloneCSR(w)
putWorkspace(w)
}
} | identifier_body |
compressed_arith.go | package sparse
import (
"github.com/james-bowman/sparse/blas"
"gonum.org/v1/gonum/mat"
)
// MulMatRawVec computes the matrix vector product between lhs and rhs and stores
// the result in out
func MulMatRawVec(lhs *CSR, rhs []float64, out []float64) {
m, n := lhs.Dims()
if len(rhs) != n {
panic(mat.ErrShape)
}
if len(out) != m {
panic(mat.ErrShape)
}
blas.Dusmv(false, 1, lhs.RawMatrix(), rhs, 1, out, 1)
}
// temporaryWorkspace returns a new CSR matrix w with the size of r x c with
// initial capacity allocated for nnz non-zero elements and
// returns a callback to defer which performs cleanup at the return of the call.
// This should be used when a method receiver is the same pointer as an input argument.
func (c *CSR) temporaryWorkspace(row, col, nnz int, clear bool) (w *CSR, restore func()) {
w = getWorkspace(row, col, nnz, clear)
return w, func() {
c.cloneCSR(w)
putWorkspace(w)
}
}
// spalloc ensures appropriate storage is allocated for the receiver sparse matrix
// ensuring it is row * col dimensions and checking for any overlap or aliasing
// between operands a or b with c in which case a temporary isolated workspace is
// allocated and the returned value isTemp is true with restore representing a
// function to clean up and restore the workspace once finished.
func (c *CSR) spalloc(a mat.Matrix, b mat.Matrix) (m *CSR, isTemp bool, restore func()) {
var nnz int
m = c
row, _ := a.Dims()
_, col := b.Dims()
lSp, lIsSp := a.(Sparser)
rSp, rIsSp := b.(Sparser)
if lIsSp && rIsSp {
nnz = lSp.NNZ() + rSp.NNZ()
} else {
// assume 10% of elements will be non-zero
nnz = row * col / 10
}
if c.checkOverlap(a) || c.checkOverlap(b) {
if !c.IsZero() && (row != c.matrix.I || col != c.matrix.J) {
panic(mat.ErrShape)
}
m, restore = c.temporaryWorkspace(row, col, nnz, true)
isTemp = true
} else {
c.reuseAs(row, col, nnz, true)
}
return
}
// Mul takes the matrix product of the supplied matrices a and b and stores the result
// in the receiver. Some specific optimisations are available for operands of certain
// sparse formats e.g. CSR * CSR uses Gustavson Algorithm (ACM 1978) for fast
// sparse matrix multiplication.
// If the number of columns does not equal the number of rows in b, Mul will panic.
func (c *CSR) Mul(a, b mat.Matrix) {
ar, ac := a.Dims()
br, bc := b.Dims()
if ac != br {
panic(mat.ErrShape)
}
if m, temp, restore := c.spalloc(a, b); temp {
defer restore()
c = m
}
lhs, isLCsr := a.(*CSR)
rhs, isRCsr := b.(*CSR)
if isLCsr && isRCsr {
// handle CSR * CSR
c.mulCSRCSR(lhs, rhs)
return
}
if dia, ok := a.(*DIA); ok {
if isRCsr {
// handle DIA * CSR
c.mulDIACSR(dia, rhs, false)
return
}
// handle DIA * mat.Matrix
c.mulDIAMat(dia, b, false)
return
}
if dia, ok := b.(*DIA); ok {
if isLCsr {
// handle CSR * DIA
c.mulDIACSR(dia, lhs, true)
return
}
// handle mat.Matrix * DIA
c.mulDIAMat(dia, a, true)
return
}
// TODO: handle cases where both matrices are DIA
srcA, isLSparse := a.(TypeConverter)
srcB, isRSparse := b.(TypeConverter)
if isLSparse {
if isRSparse {
// handle Sparser * Sparser
c.mulCSRCSR(srcA.ToCSR(), srcB.ToCSR())
return
}
// handle Sparser * mat.Matrix
c.mulCSRMat(srcA.ToCSR(), b)
return
}
if isRSparse {
// handle mat.Matrix * Sparser
w := getWorkspace(bc, ar, bc*ar/10, true)
bt := srcB.ToCSC().T().(*CSR)
w.mulCSRMat(bt, a.T())
c.Clone(w.T())
putWorkspace(w)
return
}
// handle mat.Matrix * mat.Matrix
row := getFloats(ac, false)
defer putFloats(row)
var v float64
for i := 0; i < ar; i++ {
for ci := range row {
row[ci] = a.At(i, ci)
}
for j := 0; j < bc; j++ {
v = 0
for ci, e := range row {
if e != 0 {
v += e * b.At(ci, j)
}
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulCSRCSR handles CSR = CSR * CSR using Gustavson Algorithm (ACM 1978)
func (c *CSR) mulCSRCSR(lhs *CSR, rhs *CSR) {
ar, _ := lhs.Dims()
_, bc := rhs.Dims() | for i := 0; i < ar; i++ {
// each element t in row i of A
for t := lhs.matrix.Indptr[i]; t < lhs.matrix.Indptr[i+1]; t++ {
begin := rhs.matrix.Indptr[lhs.matrix.Ind[t]]
end := rhs.matrix.Indptr[lhs.matrix.Ind[t]+1]
spa.Scatter(rhs.matrix.Data[begin:end], rhs.matrix.Ind[begin:end], lhs.matrix.Data[t], &c.matrix.Ind)
}
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulCSRMat handles CSR = CSR * mat.Matrix
func (c *CSR) mulCSRMat(lhs *CSR, b mat.Matrix) {
ar, _ := lhs.Dims()
_, bc := b.Dims()
// handle case where matrix A is CSR (matrix B can be any implementation of mat.Matrix)
for i := 0; i < ar; i++ {
for j := 0; j < bc; j++ {
var v float64
// TODO Consider converting all Sparser args to CSR
for k := lhs.matrix.Indptr[i]; k < lhs.matrix.Indptr[i+1]; k++ {
v += lhs.matrix.Data[k] * b.At(lhs.matrix.Ind[k], j)
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulDIACSR handles CSR = DIA * CSR (or CSR = CSR * DIA if trans == true)
func (c *CSR) mulDIACSR(dia *DIA, other *CSR, trans bool) {
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if other.matrix.Ind[k] < len(diagonal) {
v = other.matrix.Data[k] * diagonal[other.matrix.Ind[k]]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if i < len(diagonal) {
v = other.matrix.Data[k] * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// mulDIAMat handles CSR = DIA * mat.Matrix (or CSR = mat.Matrix * DIA if trans == true)
func (c *CSR) mulDIAMat(dia *DIA, other mat.Matrix, trans bool) {
_, cols := other.Dims()
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if k < len(diagonal) {
v = other.At(i, k) * diagonal[k]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if i < len(diagonal) {
v = other.At(i, k) * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// Sub subtracts matrix b from a and stores the result in the receiver.
// If matrices a and b are not the same shape then the method will panic.
func (c *CSR) Sub(a, b mat.Matrix) {
c.addScaled(a, b, 1, -1)
}
// Add adds matrices a and b together and stores the result in the receiver.
// If matrices a and b are not the same shape then the method will panic.
func (c *CSR) Add(a, b mat.Matrix) {
c.addScaled(a, b, 1, 1)
}
// addScaled adds matrices a and b scaling them by a and b respectively before hand.
func (c *CSR) addScaled(a mat.Matrix, b mat.Matrix, alpha float64, beta float64) {
ar, ac := a.Dims()
br, bc := b.Dims()
if ar != br || ac != bc {
panic(mat.ErrShape)
}
if m, temp, restore := c.spalloc(a, b); temp {
defer restore()
c = m
}
lCsr, lIsCsr := a.(*CSR)
rCsr, rIsCsr := b.(*CSR)
// TODO optimisation for DIA matrices
if lIsCsr && rIsCsr {
c.addCSRCSR(lCsr, rCsr, alpha, beta)
return
}
if lIsCsr {
c.addCSR(lCsr, b, alpha, beta)
return
}
if rIsCsr {
c.addCSR(rCsr, a, beta, alpha)
return
}
// dumb addition with no sparcity optimisations/savings
for i := 0; i < ar; i++ {
for j := 0; j < ac; j++ {
v := alpha*a.At(i, j) + beta*b.At(i, j)
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// addCSR adds a CSR matrix to any implementation of mat.Matrix and stores the
// result in the receiver.
func (c *CSR) addCSR(csr *CSR, other mat.Matrix, alpha float64, beta float64) {
ar, ac := csr.Dims()
spa := NewSPA(ac)
a := csr.RawMatrix()
if dense, isDense := other.(mat.RawMatrixer); isDense {
for i := 0; i < ar; i++ {
begin := csr.matrix.Indptr[i]
end := csr.matrix.Indptr[i+1]
rawOther := dense.RawMatrix()
r := rawOther.Data[i*rawOther.Stride : i*rawOther.Stride+rawOther.Cols]
spa.AccumulateDense(r, beta, &c.matrix.Ind)
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < ar; i++ {
begin := csr.matrix.Indptr[i]
end := csr.matrix.Indptr[i+1]
for j := 0; j < ac; j++ {
v := other.At(i, j)
if v != 0 {
spa.ScatterValue(v, j, beta, &c.matrix.Ind)
}
}
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// addCSRCSR adds 2 CSR matrices together storing the result in the receiver.
// Matrices a and b are scaled by alpha and beta respectively before addition.
// This method is specially optimised to take advantage of the sparsity patterns
// of the 2 CSR matrices.
func (c *CSR) addCSRCSR(lhs *CSR, rhs *CSR, alpha float64, beta float64) {
ar, ac := lhs.Dims()
a := lhs.RawMatrix()
b := rhs.RawMatrix()
spa := NewSPA(ac)
var begin, end int
for i := 0; i < ar; i++ {
begin, end = a.Indptr[i], a.Indptr[i+1]
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
begin, end = b.Indptr[i], b.Indptr[i+1]
spa.Scatter(b.Data[begin:end], b.Ind[begin:end], beta, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// SPA is a SParse Accumulator used to construct the results of sparse
// arithmetic operations in linear time.
type SPA struct {
// w contains flags for indices containing non-zero values
w []int
// x contains all the values in dense representation (including zero values)
y []float64
// nnz is the Number of Non-Zero elements
nnz int
// generation is used to compare values of w to see if they have been set
// in the current row (generation). This avoids needing to reset all values
// during the GatherAndZero operation at the end of
// construction for each row/column vector.
generation int
}
// NewSPA creates a new SParse Accumulator of length n. If accumulating
// rows for a CSR matrix then n should be equal to the number of columns
// in the resulting matrix.
func NewSPA(n int) *SPA {
return &SPA{
w: make([]int, n),
y: make([]float64, n),
}
}
// ScatterVec accumulates the sparse vector x by multiplying the elements
// by alpha and adding them to the corresponding elements in the SPA
// (SPA += alpha * x)
func (s *SPA) ScatterVec(x *Vector, alpha float64, ind *[]int) {
s.Scatter(x.data, x.ind, alpha, ind)
}
// Scatter accumulates the sparse vector x by multiplying the elements by
// alpha and adding them to the corresponding elements in the SPA (SPA += alpha * x)
func (s *SPA) Scatter(x []float64, indx []int, alpha float64, ind *[]int) {
for i, index := range indx {
s.ScatterValue(x[i], index, alpha, ind)
}
}
// ScatterValue accumulates a single value by multiplying the value by alpha
// and adding it to the corresponding element in the SPA (SPA += alpha * x)
func (s *SPA) ScatterValue(val float64, index int, alpha float64, ind *[]int) {
if s.w[index] < s.generation+1 {
s.w[index] = s.generation + 1
*ind = append(*ind, index)
s.y[index] = alpha * val
} else {
s.y[index] += alpha * val
}
}
// AccumulateDense accumulates the dense vector x by multiplying the non-zero elements
// by alpha and adding them to the corresponding elements in the SPA (SPA += alpha * x)
// This is the dense version of the Scatter method for sparse vectors.
func (s *SPA) AccumulateDense(x []float64, alpha float64, ind *[]int) {
for i, val := range x {
if val != 0 {
s.ScatterValue(val, i, alpha, ind)
}
}
}
// Gather gathers the non-zero values from the SPA and appends them to
// end of the supplied sparse vector.
func (s SPA) Gather(data *[]float64, ind *[]int) {
for _, index := range (*ind)[s.nnz:] {
*data = append(*data, s.y[index])
//y[index] = 0
}
}
// GatherAndZero gathers the non-zero values from the SPA and appends them
// to the end of the supplied sparse vector. The SPA is also zeroed
// ready to start accumulating the next row/column vector.
func (s *SPA) GatherAndZero(data *[]float64, ind *[]int) {
s.Gather(data, ind)
s.nnz = len(*ind)
s.generation++
} | spa := NewSPA(bc)
// rows in C | random_line_split |
IP6_Address.go | // SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
//
// This software is provided under under a slightly modified version
// of the Apache Software License. See the accompanying LICENSE file
// for more information.
//
import array
from six import string_types
type IP6_Address: struct {
ADDRESS_BYTE_SIZE = 16
//A Hex Group is a 16-bit unit of the address
TOTAL_HEX_GROUPS = 8
HEX_GROUP_SIZE = 4 //Size in characters
TOTAL_SEPARATORS = TOTAL_HEX_GROUPS - 1
ADDRESS_TEXT_SIZE = (TOTAL_HEX_GROUPS * HEX_GROUP_SIZE) + TOTAL_SEPARATORS
SEPARATOR = ":"
SCOPE_SEPARATOR = "%"
//############################################################################################################
// Constructor and construction helpers
func (self TYPE) __init__(address interface{}){
//The internal representation of an IP6 address is a 16-byte array
self.__bytes = array.array('B', b'\0' * self.ADDRESS_BYTE_SIZE)
self.__scope_id = ""
//Invoke a constructor based on the type of the argument
if isinstance(address, string_types) {
self.__from_string(address)
} else {
self.__from_bytes(address)
func (self TYPE) __from_string(address interface{}){
//Separate the Scope ID, if present
if self.__is_a_scoped_address(address) {
split_parts = address.split(self.SCOPE_SEPARATOR)
address = split_parts[0]
if split_parts[1] == "" {
raise Exception("Empty scope ID")
self.__scope_id = split_parts[1]
//Expand address if it's in compressed form
if self.__is_address_in_compressed_form(address) {
address = self.__expand_compressed_address(address)
//Insert leading zeroes where needed
address = self.__insert_leading_zeroes(address)
//Sanity check
if len(address) != self.ADDRESS_TEXT_SIZE {
raise Exception('IP6_Address - from_string - address size != ' + str(self.ADDRESS_TEXT_SIZE))
//Split address into hex groups
hex_groups = address.split(self.SEPARATOR)
if len(hex_groups) != self.TOTAL_HEX_GROUPS {
raise Exception('IP6_Address - parsed hex groups != ' + str(self.TOTAL_HEX_GROUPS))
//For each hex group, convert it into integer words
offset = 0
for group in hex_groups:
if len(group) != self.HEX_GROUP_SIZE {
raise Exception('IP6_Address - parsed hex group length != ' + str(self.HEX_GROUP_SIZE))
group_as_int = int(group, 16)
self.__bytes[offset] = (group_as_int & 0xFF00) >> 8
self.__bytes[offset + 1] = (group_as_int & 0x00FF)
offset += 2
func (self TYPE) __from_bytes(theBytes interface{}){
if len(theBytes) != self.ADDRESS_BYTE_SIZE {
raise Exception ("IP6_Address - from_bytes - array size != " + str(self.ADDRESS_BYTE_SIZE))
self.__bytes = theBytes
//############################################################################################################
// Projectors
func (self TYPE) as_string(compress_address = true, scoped_address = true interface{}){
s = ""
for i, v in enumerate(self.__bytes):
s += hex(v)[2:].rjust(2, '0')
if i % 2 == 1 {
s += self.SEPARATOR
s = s[:-1].upper()
if compress_address {
s = self.__trim_leading_zeroes(s)
s = self.__trim_longest_zero_chain(s)
if scoped_address and self.get_scope_id() != "" {
s += self.SCOPE_SEPARATOR + self.__scope_id
return s
func (self TYPE) as_bytes(){
return self.__bytes
func (self TYPE) __str__(){
return self.as_string()
func (self TYPE) get_scope_id(){
return self.__scope_id
func (self TYPE) get_unscoped_address(){
return self.as_string(true, false) //Compressed address = true, Scoped address = false
//############################################################################################################
// Semantic helpers
func (self TYPE) is_multicast(){
return self.__bytes[0] == 0xFF
func (self TYPE) is_unicast(){
return self.__bytes[0] == 0xFE
func (self TYPE) is_link_local_unicast(){
return self.is_unicast() and (self.__bytes[1] & 0xC0 == 0x80)
func (self TYPE) is_site_local_unicast(){
return self.is_unicast() and (self.__bytes[1] & 0xC0 == 0xC0)
func (self TYPE) is_unique_local_unicast(){
return self.__bytes[0] == 0xFD
func (self TYPE) get_human_readable_address_type(){
if self.is_multicast() {
return "multicast"
elif self.is_unicast() {
if self.is_link_local_unicast() {
return "link-local unicast"
elif self.is_site_local_unicast() {
return "site-local unicast"
} else {
return "unicast"
elif self.is_unique_local_unicast() {
return "unique-local unicast"
} else {
return "unknown type"
//############################################################################################################
//Expansion helpers
//Predicate - returns whether an address is in compressed form
func (self TYPE) __is_address_in_compressed_form(address interface{}){
//Sanity check - triple colon detection (not detected by searches of double colon)
if address.count(self.SEPARATOR * 3) > 0 {
raise Exception("IP6_Address - found triple colon")
//Count the double colon marker
compression_marker_count = self.__count_compression_marker(address)
if compression_marker_count == 0 {
return false
elif compression_marker_count == 1 {
return true
} else {
raise Exception("IP6_Address - more than one compression marker (\"::\") found")
//Returns how many hex groups are present, in a compressed address
func (self TYPE) __count_compressed_groups(address interface{}){
trimmed_address = address.replace(self.SEPARATOR * 2, self.SEPARATOR) //Replace "::" with ":"
return trimmed_address.count(self.SEPARATOR) + 1
//Counts how many compression markers are present
func (self TYPE) __count_compression_marker(address interface{}){
return address.count(self.SEPARATOR * 2) //Count occurrences of "::"
//Inserts leading zeroes in every hex group
func (self TYPE) __insert_leading_zeroes(address interface{}){
hex_groups = address.split(self.SEPARATOR)
new_address = ""
for hex_group in hex_groups:
if len(hex_group) < 4 {
hex_group = hex_group.rjust(4, "0")
new_address += hex_group + self.SEPARATOR
return new_address[:-1] //Trim the last colon
//Expands a compressed address
func (self TYPE) __expand_compressed_address(address interface{}){
group_count = self.__count_compressed_groups(address)
groups_to_insert = self.TOTAL_HEX_GROUPS - group_count
pos = address.find(self.SEPARATOR * 2) + 1
while groups_to_insert:
address = address[:pos] + "0000" + self.SEPARATOR + address[pos:]
pos += 5
groups_to_insert -= 1
//Replace the compression marker with a single colon
address = address.replace(self.SEPARATOR * 2, self.SEPARATOR)
return address
//############################################################################################################
//Compression helpers
func (self TYPE) __trim_longest_zero_chain(address interface{}){
chain_size = 8
while chain_size > 0:
groups = address.split(self.SEPARATOR)
for index, group in enumerate(groups):
//Find the first zero | if group == "0" {
start_index = index
end_index = index
//Find the end of this chain of zeroes
while end_index < 7 and groups[end_index + 1] == "0":
end_index += 1
//If the zero chain matches the current size, trim it
found_size = end_index - start_index + 1
if found_size == chain_size {
address = self.SEPARATOR.join(groups[0:start_index]) + self.SEPARATOR * 2 + self.SEPARATOR.join(groups[(end_index+1):])
return address
//No chain of this size found, try with a lower size
chain_size -= 1
return address
//Trims all leading zeroes from every hex group
func (self TYPE) __trim_leading_zeroes(theStr interface{}){
groups = theStr.split(self.SEPARATOR)
theStr = ""
for group in groups:
group = group.lstrip("0") + self.SEPARATOR
if group == self.SEPARATOR {
group = "0" + self.SEPARATOR
theStr += group
return theStr[:-1]
//############################################################################################################
@classmethod
func is_a_valid_text_representation(cls, text_representation interface{}){
try:
//Capitalize on the constructor's ability to detect invalid text representations of an IP6 address
IP6_Address(text_representation)
return true
except Exception:
return false
func (self TYPE) __is_a_scoped_address(text_representation interface{}){
return text_representation.count(self.SCOPE_SEPARATOR) == 1 | random_line_split |
|
BasePersonFactory.ts | import { Action, actionSaveData } from '../Action/ActionFactory';
import { MyGame } from '../../Tool/System/Game';
import { SelfHome } from '../Building/SelfHome';
import ProgressNotice from '../../UI/Prefab/ProgressNotice_script';
export interface PersonPos {
cityId: number;
buildingId: number;
}
export interface MapPos {
x: number;
y: number;
}
export class BasePerson {
//任务姓名
name: string;
//攻击力
attack: number;
//防御力
def: number;
//统率
command: number;
//智力
intelligence: number;
//魅力
charm: number;
//政治
politics: number;
//性别
sex: number;
//个人技能
presonSkillIdArr: number[];
//武器装备
equipAttack: number;
//防御装备
equipDef: number;
//首饰
equipJewelry: number;
//坐骑
equipHorse: number;
//唯一id
personId: number;
//位置
personPos: PersonPos;
//家的位置
//就是一个城市id
homePos: number;
//大地图移动的目的地的位置
goalCityMapPos: MapPos;
//现在在大地图上的位置
nowMapPos: MapPos;
//大地图移动的目的地
goalCityId: number;
//当前的人物的物品数据
//物品id -> 物品数量
itemObj: { [itemId: number]: number };
//货币
money: number;
//体力
power: number;
//是否在战斗中
//暂定是不记录战斗信息
inInBattle: boolean;
//自宅
home: SelfHome;
//是否是主角
isUserRole: boolean;
//正在执行的行动id
nowActionIds: number[];
//正在执行的行动
nowActions: Action[];
//正在执行的动作的进度保存
nowActionData: { [actionId: number]: actionSaveData };
//绑定一个progressBar
travelProgressNotice: ProgressNotice;
//上一个城市
lastCityId: number;
constructor() {
}
/**
* 改变人物大地图上的位置
*/
changeMapPos(person: BasePerson, addMinutes: number) {
if (!person.goalCityMapPos) {
return;
}
if (!MyGame.GameTool.judgeEqualPos(person.nowMapPos, person.goalCityMapPos)) {
//还没有到达目的地
if (MyGame.MapRandomEvent.judgeMapRandomEvent(person)) {
return;
}
//移动的距离
let moveNum = addMinutes * MyGame.MAP_MOVE_SPEED_MINUTE;
//这边暂时不使用三角函数计算,减少计算量
let disX = Math.abs(person.goalCityMapPos.x - person.nowMapPos.x);
let disY = Math.abs(person.goalCityMapPos.y - person.nowMapPos.y);
let dis = Math.sqrt(disX * disX + disY * disY);
let addX = disX / dis * moveNum;
let addY = disY / dis * moveNum;
//改变体力
this.changePowerNum(-1 * MyGame.MAP_MOVE_COST_POWER_MINUTE * addMinutes);
//x距离增加
if (person.goalCityMapPos.x !== person.nowMapPos.x) {
if (person.goalCityMapPos.x > person.nowMapPos.x) {
person.nowMapPos.x = person.nowMapPos.x + addX;
if (person.nowMapPos.x >= person.goalCityMapPos.x) {
person.nowMapPos.x = person.goalCityMapPos.x;
}
} else {
person.nowMapPos.x = person.nowMapPos.x - addX;
if (person.nowMapPos.x <= person.goalCityMapPos.x) {
person.nowMapPos.x = person.goalCityMapPos.x;
}
}
}
//y距离增加
if (person.goalCityMapPos.y !== person.nowMapPos.y) {
if (person.goalCityMapPos.y > person.nowMapPos.y) {
person.nowMapPos.y = person.nowMapPos.y + addY;
if (person.nowMapPos.y >= person.goalCityMapPos.y) {
person.nowMapPos.y = person.goalCityMapPos.y;
}
} else {
person.nowMapPos.y = person.nowMapPos.y - addY;
if (person.nowMapPos.y <= person.goalCityMapPos.y) {
person.nowMapPos.y = person.goalCityMapPos.y;
}
}
}
//改变进度条
if (this.travelProgressNotice) {
let lastCityData = MyGame.GameManager.gameDataSave.getCityById(this.lastCityId);
if (lastCityData) {
let disXTotal = Math.abs(person.goalCityMapPos.x - lastCityData.cityPos.x);
let disYTotal = Math.abs(person.goalCityMapPos.y - lastCityData.cityPos.y);
let disTotal = Math.sqrt(disXTotal * disXTotal + disYTotal * disYTotal);
this.travelProgressNotice.updateProgressNum(1 - (dis / disTotal));
}
}
if (MyGame.GameTool.judgeEqualPos(person.nowMapPos, person.goalCityMapPos)) {
person.personPos.cityId = person.goalCityId;
person.nowMapPos = person.goalCityMapPos;
person.goalCityMapPos = undefined;
person.goalCityId = undefined;
if (this.mapMoveFinishCb) {
this.mapMoveFinishCb();
if (this.isUserRole) {
MyGame.GameManager.gameSpeedResetting();
}
}
if (this.travelProgressNotice) {
this.travelProgressNotice.hide(false);
}
}
}
}
/**
* 前往一个城市
* @param cityId
*/
goToCity(cityId: number) {
if (this.inInBattle) {
return;
}
if (cityId === this.personPos.cityId) {
return;
}
this.goalCityMapPos = MyGame.GameManager.gameDataSave.getCityById(cityId).cityPos;
if (MyGame.GameTool.judgeEqualPos(this.nowMapPos, this.goalCityMapPos)) {
//修正一下
this.personPos.cityId = cityId;
return;
}
this.goalCityId = cityId;
//如果当前有大地图坐标的话就以这个数据为出发点,否则使用当前城市的大地图坐标为出发点
if (this.personPos.cityId !== MyGame.USER_IN_FIELD) {
let cityPos = MyGame.GameManager.gameDataSave.getCityById(this.personPos.cityId).cityPos;
this.nowMapPos = MyGame.GameTool.createMapPos(cityPos.x, cityPos.y);
}
this.lastCityId = this.personPos.cityId;
//立马出城
this.personPos.cityId = MyGame.USER_IN_FIELD;
}
//前往一个设施
goToBuilding(buildingId: number) {
if (this.inInBattle) {
return;
}
if (buildingId === MyGame.SELF_HOUSE_ID) {
//自宅
if (this.personPos.cityId === this.homePos) {
this.personPos.buildingId = buildingId;
return;
}
}
let nearCityData = MyGame.GameTool.getNearBuildingCity(buildingId, this.personPos.cityId, undefined, this);
if (nearCityData.cityId !== this.personPos.cityId) {
this.goToCity(nearCityData.cityId);
return;
}
//城市内的建筑是立马到达的
this.personPos.buildingId = buildingId;
}
//获得了物品
getItem(rewardArr: number[]) {
if (rewardArr.length === 0) {
return;
}
if (rewardArr.length % 2 !== 0) {
MyGame.LogTool.showLog(`奖励列表错误 ${rewardArr}`);
return;
}
let i;
for (i = 0; i < rewardArr.length; i++) {
let id = rewardArr[i];
let num = rewardArr[i + 1];
if (!this.itemObj[id]) {
this.itemObj[id] = 0;
}
this.itemObj[id] = this.itemObj[id] + num;
i++;
}
}
//更新行动
timeUpdateAction(addMinutes: number) {
this.nowActions.forEach(function (action: Action) {
action.timeUpdate(addMinutes, this);
}.bind(this));
}
//时间变化函数
timeUpdate(addMinutes: number) {
}
//日期变化函数
dayUpdate() {
}
/**
* 移除一个物品
* @param itemId 物品id
* @param removeNum 移除数量
*/
removeItemByItemId(itemId: number, removeNum: number) {
if (this.itemObj[itemId]) {
this.itemObj[itemId] = this.itemObj[itemId] - removeNum;
if (this.itemObj[itemId] < 0) {
MyGame.LogTool.showLog(`removeItemByItemId error ! removeNum is ${removeNum} , nowNum is ${this.itemObj[itemId]}`);
this.itemObj[itemId] = 0;
}
}
}
//获取存储的数据
getSaveData() {
return {
name: this.name,
attack: this.attack,
def: this.def,
command: this.command,
intelligence: this.intelligence,
charm: this.charm,
politics: this.politics,
sex: this.sex,
presonSkillIdArr: this.presonSkillIdArr,
equipAttack: this.equipAttack,
equipDef: this.equipDef,
equipJewelry: this.equipJewelry,
equipHorse: this.equipHorse,
personId: this.personId,
personPos: this.personPos,
homePos: this.homePos,
goalCityMapPos: this.goalCityMapPos,
nowMapPos: this.nowMapPos,
goalCityId: this.goalCityId,
itemObj: this.itemObj,
money: this.money,
power: this.power,
inInBattle: this.inInBattle,
nowActionIds: this.nowActionIds,
nowActionData: this.nowActionData,
lastCityId: this.lastCityId
}
}
//死亡回调
/**
* @param personAttack 击杀者
*/
deadCb(personAttack: BasePerson) {
MyGame.LogTool.showLog(`${personAttack.name} 击杀了 ${this.name}`);
}
//开始战斗的回调
startBattleCb() {
this.inInBattle = true;
}
//战斗结束回调
battleFinishCb() {
this.inInBattle = false;
}
//触发大地图随机事件
mapRandomEventCb() {
}
//移动结束的回调
mapMoveFinishCb() {
}
//行动结束回掉
actionFinishCb() {
this.nowActions = this.nowActions.filter(function (action: Action) {
return !action.isFinish(); | inInHomePos(): boolean {
return this.personPos.cityId === this.homePos;
}
/**
* 获取物品的数量
*/
getItemTotalNum(): number {
let totalNum = 0;
for (var key in this.itemObj) {
if (!this.itemObj.hasOwnProperty(key)) {
continue;
}
totalNum = totalNum + this.itemObj[key];
}
return totalNum;
}
/**
* 增加物品数量
*/
addItemNum(itemId: number, num: number) {
this.itemObj[itemId] = (this.itemObj[itemId] || 0) + num;
if (this.itemObj[itemId] < 0) {
MyGame.LogTool.showLog(`addItemNum error ! now num is ${this.itemObj[itemId]}`);
this.itemObj[itemId] = 0;
}
}
/**
* 设置物品数量
*/
setItemNum(itemId: number, num: number) {
this.itemObj[itemId] = num;
}
/**
* 改变金钱数量
* @param changeMoneyNum 改变金钱数量
*/
changeMoneyNum(changeMoneyNum: number) {
this.money = this.money + changeMoneyNum;
MyGame.LogTool.showLog(`money change num is ${changeMoneyNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 直接设置当前的金钱数量
* @param newMoneyNum
*/
setMoneyNum(newMoneyNum: number) {
this.money = newMoneyNum;
MyGame.LogTool.showLog(`money now num is ${newMoneyNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 改变体力数量
* @param changePowerNum
*/
changePowerNum(changePowerNum: number) {
this.power = this.power + changePowerNum;
if (this.power > MyGame.MAX_POWER) {
this.power = MyGame.MAX_POWER;
}
if (this.power < 0) {
this.power = 0;
}
this.power = Math.floor(this.power * 100000) / 100000;
MyGame.LogTool.showLog(`power change num is ${changePowerNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 直接设置当前的体力数量
* @param newPowerNum
*/
setPowerNum(newPowerNum: number) {
this.power = newPowerNum;
if (this.power > MyGame.MAX_POWER) {
this.power = MyGame.MAX_POWER;
}
if (this.power < 0) {
this.power = 0;
}
this.power = Math.floor(this.power * 100000) / 100000;
MyGame.LogTool.showLog(`power now num is ${newPowerNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 设置所在的地点
*/
setPersonCityPos(cityId: number) {
this.personPos.cityId = cityId;
}
/**
* 增加一个行动
*/
addOneAction(action: Action) {
this.nowActions.push(action);
action.start(this);
}
} | }.bind(this));
}
/**
* 判断是否在自己家所在的城市
*/ | random_line_split |
BasePersonFactory.ts | import { Action, actionSaveData } from '../Action/ActionFactory';
import { MyGame } from '../../Tool/System/Game';
import { SelfHome } from '../Building/SelfHome';
import ProgressNotice from '../../UI/Prefab/ProgressNotice_script';
export interface PersonPos {
cityId: number;
buildingId: number;
}
export interface MapPos {
x: number;
y: number;
}
export class BasePerson {
//任务姓名
name: string;
//攻击力
attack: number;
//防御力
def: number;
//统率
command: number;
//智力
intelligence: number;
//魅力
charm: number;
//政治
politics: number;
//性别
sex: number;
//个人技能
presonSkillIdArr: number[];
//武器装备
equipAttack: number;
//防御装备
equipDef: number;
//首饰
equipJewelry: number;
//坐骑
equipHorse: number;
//唯一id
personId: number;
//位置
personPos: PersonPos;
//家的位置
//就是一个城市id
homePos: number;
//大地图移动的目的地的位置
goalCityMapPos: MapPos;
//现在在大地图上的位置
nowMapPos: MapPos;
//大地图移动的目的地
goalCityId: number;
//当前的人物的物品数据
//物品id -> 物品数量
itemObj: { [itemId: number]: number };
//货币
money: number;
//体力
power: number;
//是否在战斗中
//暂定是不记录战斗信息
inInBattle: boolean;
//自宅
home: SelfHome;
//是否是主角
isUserRole: boolean;
//正在执行的行动id
nowActionIds: number[];
//正在执行的行动
nowActions: Action[];
//正在执行的动作的进度保存
nowActionData: { [actionId: number]: actionSaveData };
//绑定一个progressBar
travelProgressNotice: ProgressNotice;
//上一个城市
lastCityId: number;
constructor() {
}
/**
* 改变人物大地图上的位置
*/
changeMapPos(person: BasePerson, addMinutes: number) {
if (!person.goalCityMapPos) {
return;
}
if (!MyGame.GameTool.judgeEqualPos(person.nowMapPos, person.goalCityMapPos)) {
//还没有到达目的地
if (MyGame.MapRandomEvent.judgeMapRandomEvent(person)) {
return;
}
//移动的距离
let moveNum = addMinutes * MyGame.MAP_MOVE_SPEED_MINUTE;
//这边暂时不使用三角函数计算,减少计算量
let disX = Math.abs(person.goalCityMapPos.x - person.nowMapPos.x);
let disY = Math.abs(person.goalCityMapPos.y - person.nowMapPos.y);
let dis = Math.sqrt(disX * disX + disY * disY);
let addX = disX / dis * moveNum;
let addY = disY / dis * moveNum;
//改变体力
this.changePowerNum(-1 * MyGame.MAP_MOVE_COST_POWER_MINUTE * addMinutes);
//x距离增加
if (person.goalCityMapPos.x !== person.nowMapPos.x) {
if (person.goalCityMapPos.x > person.nowMapPos.x) {
person.nowMapPos.x = person.nowMapPos.x + addX;
if (person.nowMapPos.x >= person.goalCityMapPos.x) {
person.nowMapPos.x = person.goalCityMapPos.x;
}
} else {
person.nowMapPos.x = person.nowMapPos.x - addX;
if (person.nowMapPos.x <= person.goalCityMapPos.x) {
person.nowMapPos.x = person.goalCityMapPos.x;
}
}
}
//y距离增加
if (person.goalCityMapPos.y !== person.nowMapPos.y) {
if (person.goalCityMapPos.y > person.nowMapPos.y) {
person.nowMapPos.y = person.nowMapPos.y + addY;
if (person.nowMapPos.y >= person.goalCityMapPos.y) {
person.nowMapPos.y = person.goalCityMapPos.y;
}
} else {
person.nowMapPos.y = person.nowMapPos.y - addY;
if (person.nowMapPos.y <= person.goalCityMapPos.y) {
person.nowMapPos.y = person.goalCityMapPos.y;
}
}
}
//改变进度条
if (this.travelProgressNotice) {
let lastCityData = MyGame.GameManager.gameDataSave.getCityById(this.lastCityId);
if (lastCityData) {
let disXTotal = Math.abs(person.goalCityMapPos.x - lastCityData.cityPos.x);
let disYTotal = Math.abs(person.goalCityMapPos.y - lastCityData.cityPos.y);
let disTotal = Math.sqrt(disXTotal * disXTotal + disYTotal * disYTotal);
this.travelProgressNotice.updateProgressNum(1 - (dis / disTotal));
}
}
if (MyGame.GameTool.judgeEqualPos(person.nowMapPos, person.goalCityMapPos)) {
person.personPos.cityId = person.goalCityId;
person.nowMapPos = person.goalCityMapPos;
person.goalCityMapPos = undefined;
person.goalCityId = undefined;
if (this.mapMoveFinishCb) {
this.mapMoveFinishCb();
if (this.isUserRole) {
MyGame.GameManager.gameSpeedResetting();
}
}
if (this.travelProgressNotice) {
this.travelProgressNotice.hide(false);
}
}
}
}
/**
* 前往一个城市
* @param cityId
*/
goToCity(cityId: number) {
if (this.inInBattle) {
return;
}
if (cityId === this.personPos.cityId) {
return;
}
this.goalCityMapPos = MyGame.GameManager.gameDataSave.getCityById(cityId).cityPos;
if (MyGame.GameTool.judgeEqualPos(this.nowMapPos, this.goalCityMapPos)) {
//修正一下
this.personPos.cityId = cityId;
return;
}
this.goalCityId = cityId;
//如果当前有大地图坐标的话就以这个数据为出发点,否则使用当前城市的大地图坐标为出发点
if (this.personPos.cityId !== MyGame.USER_IN_FIELD) {
let cityPos = MyGame.GameManager.gameDataSave.getCityById(this.personPos.cityId).cityPos;
this.nowMapPos = MyGame.GameTool.createMapPos(cityPos.x, cityPos.y);
}
this.lastCityId = this.personPos.cityId;
//立马出城
this.personPos.cityId = MyGame.USER_IN_FIELD;
}
//前往一个设施
goToBuilding(buildingId: number) {
if (this.inInBattle) {
return;
}
if (buildingId === MyGame.SELF_HOUSE_ID) {
//自宅
if (this.personPos.cityId === this.homePos) {
this.personPos.buildingId = buildingId;
return;
}
}
let nearCityData = MyGame.GameTool.getNearBuildingCity(buildingId, this.personPos.cityId, undefined, this);
if (nearCityData.cityId !== this.personPos.cityId) {
this.goToCity(nearCityData.cityId);
return;
}
//城市内的建筑是立马到达的
this.personPos.buildingId = buildingId;
}
//获得了物品
getItem(rewardArr: number[]) {
if (rewardArr.length === 0) {
return;
}
if (rewardArr.length % 2 !== 0) {
MyGame.LogTool.showLog(`奖励列表错误 ${rewardArr}`);
return;
}
let i;
for (i = 0; i < rewardArr.length; i++) {
let id = rewardArr[i];
let num = rewardArr[i + 1];
if (!this.itemObj[id]) {
this.itemObj[id] = 0;
}
this.itemObj[id] = this.itemObj[id] + num;
i++;
}
}
//更新行动
timeUpdateAction(addMinutes: number) {
this.nowActions.forEach(function (action: Action) {
action.timeUpdate(addMinutes, this);
}.bind(this));
}
//时间变化函数
timeUpdate(addMinutes: number) {
}
//日期变化函数
dayUpdate() {
}
/**
* 移除一个物品
* @param itemId 物品id
* @param removeNum 移除数量
*/
removeItemByItemId(itemId: number, removeNum: number) {
if (this.itemObj[itemId]) {
this.itemObj[itemId] = this.itemObj[itemId] - removeNum;
if (this.itemObj[itemId] < 0) {
MyGame.LogTool.showLog(`removeItemByItemId error ! removeNum is ${removeNum} , nowNum is ${this.itemObj[itemId]}`);
this.itemObj[itemId] = 0;
}
}
}
//获取存储的数据
getSaveData() {
return {
name: this.name,
attack: this.attack,
def: this.def,
command: this.command,
intelligence: this.intelligence,
charm: this.charm,
politics: this.politics,
sex: this.sex,
presonSkillIdArr: this.presonSkillIdArr,
equipAttack: this.equipAttack,
equipDef: this.equipDef,
equipJewelry: this.equipJewelry,
equipHorse: this.equipHorse,
personId: this.personId,
personPos: this.personPos,
homePos: this.homePos,
goalCityMapPos: this.goalCityMapPos,
nowMapPos: this.nowMapPos,
goalCityId: this.goalCityId,
itemObj: this.itemObj,
money: this.money,
power: this.power,
inInBattle: this.inInBattle,
nowActionIds: this.nowActionIds,
nowActionData: this.nowActionData,
lastCityId: this.lastCityId
}
}
//死亡回调
/**
* @param personAttack 击杀者
*/
deadCb(personAttack: BasePerson) {
MyGame.LogTool.showLog(`${personAttack.name} 击杀了 ${this.name}`);
}
//开始战斗的回调
startBattleCb() {
this.inInBattle = true;
}
//战斗结束回调
battleFinishCb() {
this.inInBattle = false;
}
//触发大地图随机事件
mapRandomEventCb() {
}
//移动结束的回调
mapMoveFinishCb() {
}
//行动结束回掉
actionFinishCb() {
this.nowActions = this.nowActions.filter(function (action: Action) {
return !action.isFinish();
}.bind(this));
}
/**
* 判断是否在自己家所在的城市
*/
inInHomePos(): boolean {
return this.personPos.cityId === this.homePos;
}
/**
* 获取物品的数量
*/
getItemTotalNum(): number {
let totalNum = 0;
for (var key in this.itemObj) {
if (!this.itemObj.hasOwnProperty(key)) {
continue;
}
totalNum = totalNum + this.itemObj[key];
}
return totalNum;
}
/**
* 增加物品数量
*/
addItemNum(itemId: number, num: number) {
this.itemObj[itemId] = (this.itemObj[itemId] || 0) + num;
if (this.itemObj[itemId] < 0) {
MyGame.LogTool.showLog(`addItemNum error ! now num is ${this.itemObj[itemId]}`);
this.itemObj[itemId] = 0;
}
}
/**
* 设置物品数量
*/
setItemNum(itemId: number, num: number) {
this.itemObj[itemId] = num;
}
/**
* 改变金钱数量
* @param changeMoneyNum 改变金钱数量
*/
changeMoneyNum(changeMoneyNum: number) {
this.money = this.money + changeMoneyNum;
MyGame.LogTool.showLog(`money change num is ${changeMoneyNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 直接设置当前的金钱数量
* @param newMoneyNum
*/
setMoneyNum(newMoneyNum: number) {
this.money = newMoneyNum;
MyGame.LogTool.showLog(`money now num is ${newMoneyNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 改变体力数量
* @param changePowerNum
*/
changePowerNum(changePowerNum: number) {
this.power = this.power + changePowerNum;
if (this.power > MyGame.MAX_POWER) {
this.power = MyGame.MAX_POWER;
}
if (this.power < 0) {
this.power = 0;
}
this.power = Math.floor(this.power * 100000) / 100000;
MyGame.LogTool.showLog(`power change num is ${changePowerNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 直接设置当前的体力数量
* @param newPowerNum
*/
setPowerNum(newPowerNum: number) {
this.power = newPowerNum;
if (this.power > MyGame.MAX_POWER) {
this.power = MyGame.MAX_POWER;
}
if (this.power < 0) {
this.power = 0;
}
this.power = Math.floor(this.power * 100000) / 100000;
MyGame.LogTool.showLog(`power now num is ${newPowerNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 设置所在的地点
*/
setPersonCityPos(cityId: number) {
this.personPos.cityId = cityId;
}
/**
* 增加一个行动
*/
addOneAction(action: Action) {
this.nowActions.push(action);
action.start(this);
}
} | identifier_body |
||
BasePersonFactory.ts | import { Action, actionSaveData } from '../Action/ActionFactory';
import { MyGame } from '../../Tool/System/Game';
import { SelfHome } from '../Building/SelfHome';
import ProgressNotice from '../../UI/Prefab/ProgressNotice_script';
export interface PersonPos {
cityId: number;
buildingId: number;
}
export interface MapPos {
x: number;
y: number;
}
export class BasePerson {
//任务姓名
name: string;
//攻击力
attack: number;
//防御力
def: number;
//统率
command: number;
//智力
intelligence: number;
//魅力
charm: number;
//政治
politics: number;
//性别
sex: number;
//个人技能
presonSkillIdArr: number[];
//武器装备
equipAttack: number;
//防御装备
equipDef: number;
//首饰
equipJewelry: number;
//坐骑
equipHorse: number;
//唯一id
personId: number;
//位置
personPos: PersonPos;
//家的位置
//就是一个城市id
homePos: number;
//大地图移动的目的地的位置
goalCityMapPos: MapPos;
//现在在大地图上的位置
nowMapPos: MapPos;
//大地图移动的目的地
goalCityId: number;
//当前的人物的物品数据
//物品id -> 物品数量
itemObj: { [itemId: number]: number };
//货币
money: number;
//体力
power: number;
//是否在战斗中
//暂定是不记录战斗信息
inInBattle: boolean;
//自宅
home: SelfHome;
//是否是主角
isUserRole: boolean;
//正在执行的行动id
nowActionIds: number[];
//正在执行的行动
nowActions: Action[];
//正在执行的动作的进度保存
nowActionData: { [actionId: number]: actionSaveData };
//绑定一个progressBar
travelProgressNotice: ProgressNotice;
//上一个城市
lastCityId: number;
constructor() {
}
/**
* 改变人物大地图上的位置
*/
changeMapPos(person: BasePerson, addMinutes: number) {
if (!person.goalCityMapPos) {
return;
}
if (!MyGame.GameTool.judgeEqualPos(person.nowMapPos, person.goalCityMapPos)) {
//还没有到达目的地
if (MyGame.MapRandomEvent.judgeMapRandomEvent(person)) {
return;
}
//移动的距离
let moveNum = addMinutes * MyGame.MAP_MOVE_SPEED_MINUTE;
//这边暂时不使用三角函数计算,减少计算量
let disX = Math.abs(person.goalCityMapPos.x - person.nowMapPos.x);
let disY = Math.abs(person.goalCityMapPos.y - person.nowMapPos.y);
let dis = Math.sqrt(disX * disX + disY * disY);
let addX = disX / dis * moveNum;
let addY = disY / dis * moveNum;
//改变体力
this.changePowerNum(-1 * MyGame.MAP_MOVE_COST_POWER_MINUTE * addMinutes);
//x距离增加
if (person.goalCityMapPos.x !== person.nowMapPos.x) {
if (person.goalCityMapPos.x > person.nowMapPos.x) {
person.nowMapPos.x = person.nowMapPos.x + addX;
if (person.nowMapPos.x >= person.goalCityMapPos.x) {
person.nowMapPos.x = person.goalCityMapPos.x;
}
} else {
person.nowMapPos.x = person.nowMapPos.x - addX;
if (person.nowMapPos.x <= person.goalCityMapPos.x) {
person.nowMapPos.x = person.goalCityMapPos.x;
}
}
}
//y距离增加
if (person.goalCityMapPos.y !== person.nowMapPos.y) {
if (person.goalCityMapPos.y > person.nowMapPos.y) {
person.nowMapPos.y = person.nowMapPos.y + addY;
if (person.nowMapPos.y >= person.goalCityMapPos.y) {
person.nowMapPos.y = person.goalCityMapPos.y;
}
} else {
person.nowMapPos.y = person.nowMapPos.y - addY;
if (person.nowMapPos.y <= person.goalCityMapPos.y) {
person.nowMapPos.y = person.goalCityMapPos.y;
}
}
}
//改变进度条
if (this.travelProgressNotice) {
let lastCityData = MyGame.GameManager.gameDataSave.getCityById(this.lastCityId);
if (lastCityData) {
let disXTotal = Math.abs(person.goalCityMapPos.x - lastCityData.cityPos.x);
let disYTotal = Math.abs(person.goalCityMapPos.y - lastCityData.cityPos.y);
let disTotal = Math.sqrt(disXTotal * disXTotal + disYTotal * disYTotal);
this.travelProgressNotice.updateProgressNum(1 - (dis / disTotal));
}
}
if (MyGame.GameTool.judgeEqualPos(person.nowMapPos, person.goalCityMapPos)) {
person.personPos.cityId = person.goalCityId;
person.nowMapPos = person.goalCityMapPos;
person.goalCityMapPos = undefined;
person.goalCityId = undefined;
if (this.mapMoveFinishCb) {
this.mapMoveFinishCb();
if (this.isUserRole) {
MyGame.GameManager.gameSpeedResetting();
}
}
if (this.travelProgressNotice) {
this.travelProgressNotice.hide(false);
}
}
}
}
/**
* 前往一个城市
* @param cityId
*/
goToCity(cityId: number) {
if (this.inInBattle) {
return;
}
if (cityId === this.personPos.cityId) {
return;
}
this.goalCityMapPos = MyGame.GameManager.gameDataSave.getCityById(cityId).cityPos;
if (MyGame.GameTool.judgeEqualPos(this.nowMapPos, this.goalCityMapPos)) {
//修正一下
this.personPos.cityId = cityId;
return;
}
this.goalCityId = cityId;
//如果当前有大地图坐标的话就以这个数据为出发点,否则使用当前城市的大地图坐标为出发点
if (this.personPos.cityId !== MyGame.USER_IN_FIELD) {
let cityPos = MyGame.GameManager.gameDataSave.getCityById(this.personPos.cityId).cityPos;
this.nowMapPos = MyGame.GameTool.createMapPos(cityPos.x, cityPos.y);
}
this.lastCityId = this.personPos.cityId;
//立马出城
this.personPos.cityId = MyGame.USER_IN_FIELD;
}
//前往一个设施
goToBuilding(buildingId: number) {
if (this.inInBattle) {
return;
}
if (buildingId === MyGame.SELF_HOUSE_ID) {
//自宅
if (this.personPos.cityId === this.homePos) {
this.personPos.buildingId = buildingId;
return;
}
}
let nearCityData = MyGame.GameTool.getNearBuildingCity(buildingId, this.personPos.cityId, undefined, this);
if (nearCityData.cityId !== this.personPos.cityId) {
this.goToCity(nearCityData.cityId);
return;
}
//城市内的建筑是立马到达的
this.personPos.buildingId = buildingId;
}
//获得了物品
getItem(rewardArr: number[]) {
if (rewardArr.length === 0) {
return;
}
if (rewardArr.length % 2 !== 0) {
MyGame.LogTool.showLog(`奖励列表错误 ${rewardArr}`);
return;
}
let i;
for (i = 0; i < rewardArr.length; i++) {
let id = rewardArr[i];
let num = rewardArr[i + 1];
if (!this.itemObj[id]) {
this.itemObj[id] = 0;
}
this.itemObj[id] = this.itemObj[id] + num;
i++;
}
}
//更新行动
timeUpdateAction(addMinutes: number) {
this.nowActions.forEach(function (action: Action) {
action.timeUpdate(addMinutes, this);
}.bind(this));
}
//时间变化函数
timeUpdate(addMinutes: number) {
}
//日期变化函数
dayUpdate() {
}
/**
* 移除一个物品
* @param itemId 物品id
* @param removeNum 移除数量
*/
removeItemByItemId(itemId: number, removeNum: number) {
if (this.itemObj[itemId]) {
this.itemObj[itemId] = this.itemObj[itemId] - removeNum;
if (this.itemObj[itemId] < 0) {
MyGame.LogTool.showLog(`removeItemByItemId error ! removeNum is ${removeNum} , nowNum is ${this.itemObj[itemId]}`);
this.itemObj[itemId] = 0;
}
}
}
//获取存储的数据
getSaveData() {
return {
name: this.name,
attack: this.attack,
def: this.def,
command: this.command,
intelligence: this.intelligence,
charm: this.charm,
politics: this.politics,
sex: this.sex,
presonSkillIdArr: this.presonSkillIdArr,
equipAttack: this.equipAttack,
equipDef: this.equipDef,
equipJewelry: this.equipJewelry,
equipHorse: this.equipHorse,
personId: this.personId,
personPos: this.personPos,
homePos: this.homePos,
goalCityMapPos: this.goalCityMapPos,
nowMapPos: this.nowMapPos,
goalCityId: this.goalCityId,
itemObj: this.itemObj,
money: this.money,
power: this.power,
inInBattle: this.inInBattle,
nowActionIds: this.nowActionIds,
nowActionData: this.nowActionData,
lastCityId: this.lastCityId
}
}
//死亡回调
/**
* @param personAttack 击杀者
*/
deadCb(personAttack: BasePerson) {
MyGame.LogTool.showLog(`${personAttack.name} 击杀了 ${this.name}`);
}
//开始战斗的回调
startBattleCb() {
this.inInBattle = true;
}
//战斗结束回调
battleFinishCb() {
this.inInBattle = false;
}
//触发大地图随机事件
mapRandomEventCb() {
}
//移动结束的回调
mapMoveFinishCb() {
}
//行动结束回掉
actionFinishCb() {
this.nowActions = this.nowActions.filter(function (action: Action) {
return !action.isFinish();
}.bind(this));
}
/**
* 判断是否在自己家所在的城市
*/
inInHomePos(): boolean {
return this.personPos.cityId === this.homePos;
}
/**
* 获取物品的数量
*/
getItemTotalNum(): number {
let totalNum = 0;
for (var key in this.itemObj) {
if (!this.itemObj.hasOwnProperty(key)) {
continue;
}
totalNum = totalNum + this.itemObj[key];
}
return totalNum;
}
/**
* 增加物品数量
*/
addItemNum(itemId: number, num: number) {
this.itemObj[itemId] = (this.itemObj[itemId] || 0) + num;
if (this.itemObj[itemId] < 0) {
MyGame.LogTool.showLog(`addItemNum error ! now num is ${this.itemObj[itemId]}`);
this.itemObj[itemId] = 0;
}
}
/**
* 设置物品数量
*/
setItemNum(itemId: number, num: number) {
this.itemObj[itemId] = num;
}
/**
* 改变金钱数量
* @param changeMoneyNum 改变金钱数量
*/
changeMoneyNum(changeMoneyNum: number) {
this.money = this.money + changeMoneyNum;
MyGame.LogTool.showLog(`money change num is ${changeMoneyNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 直接设置当前的金钱数量
* @param newMoneyNum
*/
setMoneyNum(newMoneyNum: number) {
this.money = newMoneyNum;
MyGame.LogTool.showLog(`money now num is ${newMoneyNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 改变体力数量
* @param changePowerNum
*/
changePowerNum(changePowerNum: number) {
this.power = this.power + changePowerNum;
if (this.power > MyGame.MAX_POWER) {
this.power = MyGame.MAX_POWER;
}
if (this.power < 0) {
this.power = 0;
}
| Math.floor(this.power * 100000) / 100000;
MyGame.LogTool.showLog(`power change num is ${changePowerNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 直接设置当前的体力数量
* @param newPowerNum
*/
setPowerNum(newPowerNum: number) {
this.power = newPowerNum;
if (this.power > MyGame.MAX_POWER) {
this.power = MyGame.MAX_POWER;
}
if (this.power < 0) {
this.power = 0;
}
this.power = Math.floor(this.power * 100000) / 100000;
MyGame.LogTool.showLog(`power now num is ${newPowerNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 设置所在的地点
*/
setPersonCityPos(cityId: number) {
this.personPos.cityId = cityId;
}
/**
* 增加一个行动
*/
addOneAction(action: Action) {
this.nowActions.push(action);
action.start(this);
}
} | this.power = | identifier_name |
BasePersonFactory.ts | import { Action, actionSaveData } from '../Action/ActionFactory';
import { MyGame } from '../../Tool/System/Game';
import { SelfHome } from '../Building/SelfHome';
import ProgressNotice from '../../UI/Prefab/ProgressNotice_script';
export interface PersonPos {
cityId: number;
buildingId: number;
}
export interface MapPos {
x: number;
y: number;
}
export class BasePerson {
//任务姓名
name: string;
//攻击力
attack: number;
//防御力
def: number;
//统率
command: number;
//智力
intelligence: number;
//魅力
charm: number;
//政治
politics: number;
//性别
sex: number;
//个人技能
presonSkillIdArr: number[];
//武器装备
equipAttack: number;
//防御装备
equipDef: number;
//首饰
equipJewelry: number;
//坐骑
equipHorse: number;
//唯一id
personId: number;
//位置
personPos: PersonPos;
//家的位置
//就是一个城市id
homePos: number;
//大地图移动的目的地的位置
goalCityMapPos: MapPos;
//现在在大地图上的位置
nowMapPos: MapPos;
//大地图移动的目的地
goalCityId: number;
//当前的人物的物品数据
//物品id -> 物品数量
itemObj: { [itemId: number]: number };
//货币
money: number;
//体力
power: number;
//是否在战斗中
//暂定是不记录战斗信息
inInBattle: boolean;
//自宅
home: SelfHome;
//是否是主角
isUserRole: boolean;
//正在执行的行动id
nowActionIds: number[];
//正在执行的行动
nowActions: Action[];
//正在执行的动作的进度保存
nowActionData: { [actionId: number]: actionSaveData };
//绑定一个progressBar
travelProgressNotice: ProgressNotice;
//上一个城市
lastCityId: number;
constructor() {
}
/**
* 改变人物大地图上的位置
*/
changeMapPos(person: BasePerson, addMinutes: number) {
if (!person.goalCityMapPos) {
return;
}
if (!MyGame.GameTool.judgeEqualPos(person.nowMapPos, person.goalCityMapPos)) {
//还没有到达目的地
if (MyGame.MapRandomEvent.judgeMapRandomEvent(person)) {
return;
}
//移动的距离
let moveNum = addMinutes * MyGame.MAP_MOVE_SPEED_MINUTE;
//这边暂时不使用三角函数计算,减少计算量
let disX = Math.abs(person.goalCityMapPos.x - person.nowMapPos.x);
let disY = Math.abs(person.goalCityMapPos.y - person.nowMapPos.y);
let dis = Math.sqrt(disX * disX + disY * disY);
let addX = disX / dis * moveNum;
let addY = disY / dis * moveNum;
//改变体力
this.changePowerNum(-1 * MyGame.MAP_MOVE_COST_POWER_MINUTE * addMinutes);
//x距离增加
if (person.goalCityMapPos.x !== person.nowMapPos.x) {
if (person.goalCityMapPos.x > person.nowMapPos.x) {
person.nowMapPos.x = person.nowMapPos.x + addX;
if (person.nowMapPos.x >= person.goalCityMapPos.x) {
person.nowMapPos.x = person.goalCityMapPos.x;
}
} else {
person.nowMapPos.x = person.nowMapPos.x - addX;
if (person.nowMapPos.x <= person.goalCityMapPos.x) {
person.nowMapPos.x = person.goalCityMapPos.x;
}
}
}
//y距离增加
if (person.goalCityMapPos.y !== person.nowMapPos.y) {
if (person.goalCityMapPos.y > person.nowMapPos.y) {
person.nowMapPos.y = person.nowMapPos.y + addY;
if (person.nowMapPos.y >= person.goalCityMapPos.y) {
person.nowMapPos.y = person.goalCityMapPos.y;
}
} else {
person.nowMapPos.y = person.nowMapPos.y - addY;
if (person.nowMapPos.y <= person.goalCityMapPos.y) {
person.nowMapPos.y = person.goalCityMapPos.y;
}
}
}
//改变进度条
if (this.travelProgressNotice) {
let lastCityData = MyGame.GameManager.gameDataSave.getCityById(this.lastCityId);
if (lastCityData) {
let disXTotal = Math.abs(person.goalCityMapPos.x - lastCityData.cityPos.x);
let disYTotal = Math.abs(person.goalCityMapPos.y - lastCityData.cityPos.y);
let disTotal = Math.sqrt(disXTotal * disXTotal + disYTotal * disYTotal);
this.travelProgressNotice.updateProgressNum(1 - (dis / disTotal));
}
}
if (MyGame.GameTool.judgeEqualPos(person.nowMapPos, person.goalCityMapPos)) {
person.personPos.cityId = person.goalCityId;
person.nowMapPos = person.goalCityMapPos;
person.goalCityMapPos = undefined;
person.goalCityId = undefined;
if (this.mapMoveFinishCb) {
this.mapMoveFinishCb();
if (this.isUserRole) {
MyGame.GameManager.gameSpeedResetting();
}
}
if (this.travelProgressNotice) {
this.travelProgressNotice.hide(false);
}
}
}
}
/**
* 前往一个城市
* @param cityId
*/
goToCity(cityId: number) {
if (this.inInBattle) {
return;
}
if (cityId === this.personPos.cityId) {
return;
}
this.goalCityMapPos = MyGame.GameManager.gameDataSave.getCityById(cityId).cityPos;
if (MyGame.GameTool.judgeEqualPos(this.nowMapPos, this.goalCityMapPos)) {
//修正一下
this.personPos.cityId = cityId;
return;
}
this.goalCityId = cityId;
//如果当前有大地图坐标的话就以这个数据为出发点,否则使用当前城市的大地图坐标为出发点
if (this.personPos.cityId !== MyGame.USER_IN_FIELD) {
let cityPos = MyGame.GameManager.gameDataSave.getCityById(this.personPos.cityId).cityPos;
this.nowMapPos = MyGame.GameTool.createMapPos(cityPos.x, cityPos.y);
}
this.lastCityId = this.personPos.cityId;
//立马出城
this.personPos.cityId = MyGame.USER_IN_FIELD;
}
//前往一个设施
goToBuilding(buildingId: number) {
if (this.inInBattle) {
return;
}
if (buildingId === MyGame.SELF_HOUSE_ID) {
//自宅
if (this.personPos.cityId === this.homePos) {
this.personPos.buildingId = buildingId;
return;
}
}
let nearCityData = MyGame.GameTool.getNearBuildingCity(buildingId, this.personPos.cityId, undefined, this);
if (nearCityData.cityId !== this.personPos.cityId) {
this.goToCity(nearCityData.cityId);
return;
}
//城市内的建筑是立马到达的
this.personPos.buildingId = buildingId;
}
//获得了物品
getItem(rewardArr: number[]) {
if (rewardArr.length === 0) {
return;
}
if (rewardArr.length % 2 !== 0) {
MyGame.LogTool.showLog(`奖励列表错误 ${rewardArr}`);
return;
}
let i;
for (i = 0; i < rewardArr.length; i++) {
let id = rewardArr[i];
let num = rewardArr[i + 1];
if (!this.itemObj[id]) {
this.itemObj[id] = 0;
}
this.itemObj[id] = this.itemObj[id] + num;
i++;
}
}
//更新行动
timeUpdateAction(addMinutes: number) {
this.nowActions.forEach(function (action: Action) {
action.timeUpdate(addMinutes, this);
}.bind(this));
}
//时间变化函数
timeUpdate(addMinutes: number) {
}
//日期变化函数
dayUpdate() {
}
/**
* 移除一个物品
* @param itemId 物品id
* @param removeNum 移除数量
*/
removeItemByItemId(itemId: number, removeNum: number) {
if (this.itemObj[itemId]) {
this.itemObj[itemId] = this.itemObj[itemId] - removeNum;
if (this.itemObj[itemId] < 0) {
MyGame.LogTool.showLog(`removeItemByItemId error ! removeNum is ${removeNum} , nowNum is ${this.itemObj[itemId]}`);
this.itemObj[itemId] = 0;
}
}
}
//获取存储的数据
getSaveData() {
return {
name: this.name,
attack: this.attack,
def: this.def,
command: this.command,
intelligence: this.intelligence,
charm: this.charm,
politics: this.politics,
sex: this.sex,
presonSkillIdArr: th | alCityMapPos: this.goalCityMapPos,
nowMapPos: this.nowMapPos,
goalCityId: this.goalCityId,
itemObj: this.itemObj,
money: this.money,
power: this.power,
inInBattle: this.inInBattle,
nowActionIds: this.nowActionIds,
nowActionData: this.nowActionData,
lastCityId: this.lastCityId
}
}
//死亡回调
/**
* @param personAttack 击杀者
*/
deadCb(personAttack: BasePerson) {
MyGame.LogTool.showLog(`${personAttack.name} 击杀了 ${this.name}`);
}
//开始战斗的回调
startBattleCb() {
this.inInBattle = true;
}
//战斗结束回调
battleFinishCb() {
this.inInBattle = false;
}
//触发大地图随机事件
mapRandomEventCb() {
}
//移动结束的回调
mapMoveFinishCb() {
}
//行动结束回掉
actionFinishCb() {
this.nowActions = this.nowActions.filter(function (action: Action) {
return !action.isFinish();
}.bind(this));
}
/**
* 判断是否在自己家所在的城市
*/
inInHomePos(): boolean {
return this.personPos.cityId === this.homePos;
}
/**
* 获取物品的数量
*/
getItemTotalNum(): number {
let totalNum = 0;
for (var key in this.itemObj) {
if (!this.itemObj.hasOwnProperty(key)) {
continue;
}
totalNum = totalNum + this.itemObj[key];
}
return totalNum;
}
/**
* 增加物品数量
*/
addItemNum(itemId: number, num: number) {
this.itemObj[itemId] = (this.itemObj[itemId] || 0) + num;
if (this.itemObj[itemId] < 0) {
MyGame.LogTool.showLog(`addItemNum error ! now num is ${this.itemObj[itemId]}`);
this.itemObj[itemId] = 0;
}
}
/**
* 设置物品数量
*/
setItemNum(itemId: number, num: number) {
this.itemObj[itemId] = num;
}
/**
* 改变金钱数量
* @param changeMoneyNum 改变金钱数量
*/
changeMoneyNum(changeMoneyNum: number) {
this.money = this.money + changeMoneyNum;
MyGame.LogTool.showLog(`money change num is ${changeMoneyNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 直接设置当前的金钱数量
* @param newMoneyNum
*/
setMoneyNum(newMoneyNum: number) {
this.money = newMoneyNum;
MyGame.LogTool.showLog(`money now num is ${newMoneyNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 改变体力数量
* @param changePowerNum
*/
changePowerNum(changePowerNum: number) {
this.power = this.power + changePowerNum;
if (this.power > MyGame.MAX_POWER) {
this.power = MyGame.MAX_POWER;
}
if (this.power < 0) {
this.power = 0;
}
this.power = Math.floor(this.power * 100000) / 100000;
MyGame.LogTool.showLog(`power change num is ${changePowerNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 直接设置当前的体力数量
* @param newPowerNum
*/
setPowerNum(newPowerNum: number) {
this.power = newPowerNum;
if (this.power > MyGame.MAX_POWER) {
this.power = MyGame.MAX_POWER;
}
if (this.power < 0) {
this.power = 0;
}
this.power = Math.floor(this.power * 100000) / 100000;
MyGame.LogTool.showLog(`power now num is ${newPowerNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 设置所在的地点
*/
setPersonCityPos(cityId: number) {
this.personPos.cityId = cityId;
}
/**
* 增加一个行动
*/
addOneAction(action: Action) {
this.nowActions.push(action);
action.start(this);
}
} | is.presonSkillIdArr,
equipAttack: this.equipAttack,
equipDef: this.equipDef,
equipJewelry: this.equipJewelry,
equipHorse: this.equipHorse,
personId: this.personId,
personPos: this.personPos,
homePos: this.homePos,
go | conditional_block |
py2_whole_image_desc_server_ts.py | #!/usr/bin/env python
# Tensorflow
from __future__ import print_function
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.python.platform import gfile
from sensor_msgs.msg import Image
# OpenCV
import cv2
from cv_bridge import CvBridge, CvBridgeError
# Misc Python packages
import numpy as np
import os
import time
import sys
# ROS
import rospy
import math
# Pkg msg definations
from tx2_whole_image_desc_server.srv import WholeImageDescriptorComputeTS, WholeImageDescriptorComputeTSResponse
from TerminalColors import bcolors
tcol = bcolors()
QUEUE_SIZE = 200
def imgmsg_to_cv2( msg ):
assert msg.encoding == "8UC3" or msg.encoding == "8UC1" or msg.encoding == "bgr8" or msg.encoding == "mono8", \
"Expecting the msg to have encoding as 8UC3 or 8UC1, received"+ str( msg.encoding )
if msg.encoding == "8UC3" or msg.encoding=='bgr8':
X = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width, -1)
return X
if msg.encoding == "8UC1" or msg.encoding=='mono8':
X = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width)
return X
class ProtoBufferModelImageDescriptor:
"""
This class loads the net structure from the .h5 file. This file contains
the model weights as well as architecture details.
In the argument `frozen_protobuf_file`
you need to specify the full path (keras model file).
"""
def __init__(self, frozen_protobuf_file, im_rows=600, im_cols=960, im_chnls=3):
start_const = time.time()
# return
## Build net
# from keras.backend.tensorflow_backend import set_session
# tf.set_random_seed(42)
config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.15
config.gpu_options.visible_device_list = "0"
config.intra_op_parallelism_threads=1
config.gpu_options.allow_growth=True
tf.keras.backend.set_session(tf.Session(config=config))
tf.keras.backend.set_learning_phase(0)
self.sess = tf.keras.backend.get_session()
self.queue = []
self.im_rows = int(im_rows)
self.im_cols = int(im_cols)
self.im_chnls = int(im_chnls)
LOG_DIR = '/'.join( frozen_protobuf_file.split('/')[0:-1] )
print( '+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
print( '++++++++++ (HDF5ModelImageDescriptor) LOG_DIR=', LOG_DIR )
print( '++++++++++ im_rows=', im_rows, ' im_cols=', im_cols, ' im_chnls=', im_chnls )
print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
model_type = LOG_DIR.split('/')[-1]
self.model_type = model_type
assert os.path.isdir( LOG_DIR ), "The LOG_DIR doesnot exist, or there is a permission issue. LOG_DIR="+LOG_DIR
assert os.path.isfile( frozen_protobuf_file ), 'The model weights file doesnot exists or there is a permission issue.'+"frozen_protobuf_file="+frozen_protobuf_file
#---
# Load .pb (protobuf file)
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# f = gfile.FastGFile(frozen_protobuf_file, 'rb')
f = tf.gfile.GFile( frozen_protobuf_file, 'rb')
graph_def = tf.GraphDef()
# Parses a serialized binary message into the current message.
graph_def.ParseFromString(f.read())
f.close()
#---
# Setup computation graph
print( 'Setup computational graph')
start_t = time.time()
sess = K.get_session()
sess.graph.as_default()
# Import a serialized TensorFlow `GraphDef` protocol buffer
# and place into the current default `Graph`.
tf.import_graph_def(graph_def)
print( 'Setup computational graph done in %4.2f sec ' %(time.time() - start_t ) )
#--
# Output Tensor.
# Note: the :0. Without :0 it will mean the operator, whgich is not what you want
# Note: import/
self.output_tensor = sess.graph.get_tensor_by_name('import/net_vlad_layer_1/l2_normalize_1:0')
self.sess = K.get_session()
# Doing this is a hack to force keras to allocate GPU memory. Don't comment this,
print ('Allocating GPU Memory...')
# Sample Prediction
tmp_zer = np.zeros( (1,self.im_rows,self.im_cols,self.im_chnls), dtype='float32' )
tmp_zer_out = self.sess.run(self.output_tensor, {'import/input_1:0': tmp_zer})
print( 'model input.shape=', tmp_zer.shape, '\toutput.shape=', tmp_zer_out.shape )
print( 'model_type=', self.model_type )
print( '-----' )
print( '\tinput_image.shape=', tmp_zer.shape )
print( '\toutput.shape=', tmp_zer_out.shape )
print( '\tminmax(tmp_zer_out)=', np.min( tmp_zer_out ), np.max( tmp_zer_out ) )
print( '\tnorm=', np.linalg.norm( tmp_zer_out ) )
print( '\tdtype=', tmp_zer_out.dtype )
print( '-----' )
print ( 'tmp_zer_out=', tmp_zer_out )
self.n_request_processed = 0
print( 'Constructor done in %4.2f sec ' %(time.time() - start_const ) )
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# quit()
def on_image_recv(self, msg):
self.queue.append(msg)
# print("Adding msg to queue", len(self.queue))
if len(self.queue) > QUEUE_SIZE:
del self.queue[0]
def pop_image_by_timestamp(self, stamp):
print("Find...", stamp, "queue_size", len(self.queue), "lag is", (self.queue[-1].header.stamp.to_sec() - stamp.to_sec())*1000, "ms")
index = -1
for i in range(len(self.queue)):
if math.fabs(self.queue[i].header.stamp.to_sec() - stamp.to_sec()) < 0.001:
index = i
break
if index >= 0:
|
dt_last = self.queue[-1].header.stamp.to_sec() - stamp.to_sec()
rospy.logwarn("Finding failed, dt is {:3.2f}ms; If this number > 0 means swarm_loop is too slow".format(dt_last)*1000)
if dt_last < 0:
return None, 1
return None, 0
def handle_req( self, req ):
""" The received image from CV bridge has to be [0,255]. In function makes it to
intensity range [-1 to 1]
"""
start_time_handle = time.time()
stamp = req.stamp.data
cv_image = None
for i in range(3):
cv_image, fail = self.pop_image_by_timestamp(stamp)
if cv_image is None and fail == 0:
rospy.logerr("Unable find image swarm loop too slow!")
result = WholeImageDescriptorComputeTSResponse()
return result
else:
if fail == 1:
print("Wait 0.02 sec for image come in and re find image")
rospy.sleep(0.02)
cv_image = self.pop_image_by_timestamp(stamp)
else:
break
if cv_image is None:
rospy.logerr("Unable to find such image")
result = WholeImageDescriptorComputeTSResponse()
return result
# print( '[ProtoBufferModelImageDescriptor Handle Request#%5d] cv_image.shape' %(self.n_request_processed), cv_image.shape, '\ta=', req.a, '\tt=', stamp )
if len(cv_image.shape)==2:
# print 'Input dimensions are NxM but I am expecting it to be NxMxC, so np.expand_dims'
cv_image = np.expand_dims( cv_image, -1 )
elif len( cv_image.shape )==3:
pass
else:
assert False
assert (cv_image.shape[0] == self.im_rows and cv_image.shape[1] == self.im_cols and cv_image.shape[2] == self.im_chnls) , \
"\n[whole_image_descriptor_compute_server] Input shape of the image \
does not match with the allocated GPU memory. Expecting an input image of \
size %dx%dx%d, but received : %s" %(self.im_rows, self.im_cols, self.im_chnls, str(cv_image.shape) )
## Compute Descriptor
start_time = time.time()
i__image = (np.expand_dims( cv_image.astype('float32'), 0 ) - 128.)*2.0/255. #[-1,1]
print( 'Prepare in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )
# u = self.model.predict( i__image )
with self.sess.as_default():
with self.sess.graph.as_default():
# u = self.model.predict( i__image )
u = self.sess.run(self.output_tensor, {'import/input_1:0': i__image})
print( tcol.HEADER, 'Descriptor Computed in %4.4fms' %( 1000. *(time.time() - start_time) ), tcol.ENDC )
# print( '\tinput_image.shape=', cv_image.shape, )
# print( '\tinput_image dtype=', cv_image.dtype )
# print( tcol.OKBLUE, '\tinput image (to neuralnet) minmax=', np.min( i__image ), np.max( i__image ), tcol.ENDC )
# print( '\tdesc.shape=', u.shape, )
# print( '\tdesc minmax=', np.min( u ), np.max( u ), )
# print( '\tnorm=', np.linalg.norm(u[0]) )
# print( '\tmodel_type=', self.model_type )
## Populate output message
result = WholeImageDescriptorComputeTSResponse()
# result.desc = [ cv_image.shape[0], cv_image.shape[1] ]
result.desc = u[0,:]
result.model_type = self.model_type
print( '[ProtoBufferModelImageDescriptor Handle Request] Callback returned in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )
return result
if __name__ == '__main__':
rospy.init_node( 'whole_image_descriptor_compute_server' )
##
## Load the config file and read image row, col
##
fs_image_width = -1
fs_image_height = -1
fs_image_chnls = 1
fs_image_height = rospy.get_param('~nrows')
fs_image_width = rospy.get_param('~ncols')
print ( '~~~~~~~~~~~~~~~~' )
print ( '~nrows = ', fs_image_height, '\t~ncols = ', fs_image_width, '\t~nchnls = ', fs_image_chnls )
print ( '~~~~~~~~~~~~~~~~' )
print( '~~~@@@@ OK...' )
sys.stdout.flush()
if rospy.has_param( '~frozen_protobuf_file'):
frozen_protobuf_file = rospy.get_param('~frozen_protobuf_file')
else:
print( tcol.FAIL, 'FATAL...missing specification of model file. You need to specify ~frozen_protobuf_file', tcol.ENDC )
quit()
gpu_netvlad = ProtoBufferModelImageDescriptor( frozen_protobuf_file=frozen_protobuf_file, im_rows=fs_image_height, im_cols=fs_image_width, im_chnls=fs_image_chnls )
s = rospy.Service( 'whole_image_descriptor_compute_ts', WholeImageDescriptorComputeTS, gpu_netvlad.handle_req)
sub = rospy.Subscriber("left_camera", Image, gpu_netvlad.on_image_recv, queue_size=20, tcp_nodelay=True)
print (tcol.OKGREEN )
print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print( '+++ whole_image_descriptor_compute_server is running +++' )
print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print( tcol.ENDC )
rospy.spin()
| cv_image = imgmsg_to_cv2( self.queue[index] )
del self.queue[0:index+1]
if cv_image.shape[0] != 240 or cv_image.shape[1] != 320:
cv_image = cv2.resize(cv_image, (320, 240))
return cv_image, 0 | conditional_block |
py2_whole_image_desc_server_ts.py | #!/usr/bin/env python
# Tensorflow
from __future__ import print_function
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.python.platform import gfile
from sensor_msgs.msg import Image
# OpenCV
import cv2
from cv_bridge import CvBridge, CvBridgeError
# Misc Python packages
import numpy as np
import os
import time
import sys
# ROS
import rospy
import math
# Pkg msg definations
from tx2_whole_image_desc_server.srv import WholeImageDescriptorComputeTS, WholeImageDescriptorComputeTSResponse
from TerminalColors import bcolors
tcol = bcolors()
QUEUE_SIZE = 200
def imgmsg_to_cv2( msg ):
assert msg.encoding == "8UC3" or msg.encoding == "8UC1" or msg.encoding == "bgr8" or msg.encoding == "mono8", \
"Expecting the msg to have encoding as 8UC3 or 8UC1, received"+ str( msg.encoding )
if msg.encoding == "8UC3" or msg.encoding=='bgr8':
X = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width, -1)
return X
if msg.encoding == "8UC1" or msg.encoding=='mono8':
X = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width)
return X
class ProtoBufferModelImageDescriptor:
"""
This class loads the net structure from the .h5 file. This file contains
the model weights as well as architecture details.
In the argument `frozen_protobuf_file`
you need to specify the full path (keras model file).
"""
def __init__(self, frozen_protobuf_file, im_rows=600, im_cols=960, im_chnls=3):
start_const = time.time()
# return
## Build net
# from keras.backend.tensorflow_backend import set_session
# tf.set_random_seed(42)
config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.15
config.gpu_options.visible_device_list = "0"
config.intra_op_parallelism_threads=1
config.gpu_options.allow_growth=True
tf.keras.backend.set_session(tf.Session(config=config))
tf.keras.backend.set_learning_phase(0)
self.sess = tf.keras.backend.get_session()
self.queue = []
self.im_rows = int(im_rows)
self.im_cols = int(im_cols)
self.im_chnls = int(im_chnls)
LOG_DIR = '/'.join( frozen_protobuf_file.split('/')[0:-1] )
print( '+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
print( '++++++++++ (HDF5ModelImageDescriptor) LOG_DIR=', LOG_DIR )
print( '++++++++++ im_rows=', im_rows, ' im_cols=', im_cols, ' im_chnls=', im_chnls )
print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
model_type = LOG_DIR.split('/')[-1]
self.model_type = model_type
assert os.path.isdir( LOG_DIR ), "The LOG_DIR doesnot exist, or there is a permission issue. LOG_DIR="+LOG_DIR
assert os.path.isfile( frozen_protobuf_file ), 'The model weights file doesnot exists or there is a permission issue.'+"frozen_protobuf_file="+frozen_protobuf_file
#---
# Load .pb (protobuf file)
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# f = gfile.FastGFile(frozen_protobuf_file, 'rb')
f = tf.gfile.GFile( frozen_protobuf_file, 'rb')
graph_def = tf.GraphDef()
# Parses a serialized binary message into the current message.
graph_def.ParseFromString(f.read())
f.close()
#---
# Setup computation graph
print( 'Setup computational graph')
start_t = time.time()
sess = K.get_session()
sess.graph.as_default()
# Import a serialized TensorFlow `GraphDef` protocol buffer
# and place into the current default `Graph`.
tf.import_graph_def(graph_def)
print( 'Setup computational graph done in %4.2f sec ' %(time.time() - start_t ) )
#--
# Output Tensor.
# Note: the :0. Without :0 it will mean the operator, whgich is not what you want
# Note: import/
self.output_tensor = sess.graph.get_tensor_by_name('import/net_vlad_layer_1/l2_normalize_1:0')
self.sess = K.get_session()
# Doing this is a hack to force keras to allocate GPU memory. Don't comment this,
print ('Allocating GPU Memory...')
# Sample Prediction
tmp_zer = np.zeros( (1,self.im_rows,self.im_cols,self.im_chnls), dtype='float32' )
tmp_zer_out = self.sess.run(self.output_tensor, {'import/input_1:0': tmp_zer})
print( 'model input.shape=', tmp_zer.shape, '\toutput.shape=', tmp_zer_out.shape )
print( 'model_type=', self.model_type )
print( '-----' )
print( '\tinput_image.shape=', tmp_zer.shape )
print( '\toutput.shape=', tmp_zer_out.shape )
print( '\tminmax(tmp_zer_out)=', np.min( tmp_zer_out ), np.max( tmp_zer_out ) )
print( '\tnorm=', np.linalg.norm( tmp_zer_out ) )
print( '\tdtype=', tmp_zer_out.dtype )
print( '-----' )
print ( 'tmp_zer_out=', tmp_zer_out )
self.n_request_processed = 0
print( 'Constructor done in %4.2f sec ' %(time.time() - start_const ) )
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# quit()
def on_image_recv(self, msg):
self.queue.append(msg)
# print("Adding msg to queue", len(self.queue))
if len(self.queue) > QUEUE_SIZE:
del self.queue[0]
def | (self, stamp):
print("Find...", stamp, "queue_size", len(self.queue), "lag is", (self.queue[-1].header.stamp.to_sec() - stamp.to_sec())*1000, "ms")
index = -1
for i in range(len(self.queue)):
if math.fabs(self.queue[i].header.stamp.to_sec() - stamp.to_sec()) < 0.001:
index = i
break
if index >= 0:
cv_image = imgmsg_to_cv2( self.queue[index] )
del self.queue[0:index+1]
if cv_image.shape[0] != 240 or cv_image.shape[1] != 320:
cv_image = cv2.resize(cv_image, (320, 240))
return cv_image, 0
dt_last = self.queue[-1].header.stamp.to_sec() - stamp.to_sec()
rospy.logwarn("Finding failed, dt is {:3.2f}ms; If this number > 0 means swarm_loop is too slow".format(dt_last)*1000)
if dt_last < 0:
return None, 1
return None, 0
def handle_req( self, req ):
""" The received image from CV bridge has to be [0,255]. In function makes it to
intensity range [-1 to 1]
"""
start_time_handle = time.time()
stamp = req.stamp.data
cv_image = None
for i in range(3):
cv_image, fail = self.pop_image_by_timestamp(stamp)
if cv_image is None and fail == 0:
rospy.logerr("Unable find image swarm loop too slow!")
result = WholeImageDescriptorComputeTSResponse()
return result
else:
if fail == 1:
print("Wait 0.02 sec for image come in and re find image")
rospy.sleep(0.02)
cv_image = self.pop_image_by_timestamp(stamp)
else:
break
if cv_image is None:
rospy.logerr("Unable to find such image")
result = WholeImageDescriptorComputeTSResponse()
return result
# print( '[ProtoBufferModelImageDescriptor Handle Request#%5d] cv_image.shape' %(self.n_request_processed), cv_image.shape, '\ta=', req.a, '\tt=', stamp )
if len(cv_image.shape)==2:
# print 'Input dimensions are NxM but I am expecting it to be NxMxC, so np.expand_dims'
cv_image = np.expand_dims( cv_image, -1 )
elif len( cv_image.shape )==3:
pass
else:
assert False
assert (cv_image.shape[0] == self.im_rows and cv_image.shape[1] == self.im_cols and cv_image.shape[2] == self.im_chnls) , \
"\n[whole_image_descriptor_compute_server] Input shape of the image \
does not match with the allocated GPU memory. Expecting an input image of \
size %dx%dx%d, but received : %s" %(self.im_rows, self.im_cols, self.im_chnls, str(cv_image.shape) )
## Compute Descriptor
start_time = time.time()
i__image = (np.expand_dims( cv_image.astype('float32'), 0 ) - 128.)*2.0/255. #[-1,1]
print( 'Prepare in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )
# u = self.model.predict( i__image )
with self.sess.as_default():
with self.sess.graph.as_default():
# u = self.model.predict( i__image )
u = self.sess.run(self.output_tensor, {'import/input_1:0': i__image})
print( tcol.HEADER, 'Descriptor Computed in %4.4fms' %( 1000. *(time.time() - start_time) ), tcol.ENDC )
# print( '\tinput_image.shape=', cv_image.shape, )
# print( '\tinput_image dtype=', cv_image.dtype )
# print( tcol.OKBLUE, '\tinput image (to neuralnet) minmax=', np.min( i__image ), np.max( i__image ), tcol.ENDC )
# print( '\tdesc.shape=', u.shape, )
# print( '\tdesc minmax=', np.min( u ), np.max( u ), )
# print( '\tnorm=', np.linalg.norm(u[0]) )
# print( '\tmodel_type=', self.model_type )
## Populate output message
result = WholeImageDescriptorComputeTSResponse()
# result.desc = [ cv_image.shape[0], cv_image.shape[1] ]
result.desc = u[0,:]
result.model_type = self.model_type
print( '[ProtoBufferModelImageDescriptor Handle Request] Callback returned in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )
return result
if __name__ == '__main__':
rospy.init_node( 'whole_image_descriptor_compute_server' )
##
## Load the config file and read image row, col
##
fs_image_width = -1
fs_image_height = -1
fs_image_chnls = 1
fs_image_height = rospy.get_param('~nrows')
fs_image_width = rospy.get_param('~ncols')
print ( '~~~~~~~~~~~~~~~~' )
print ( '~nrows = ', fs_image_height, '\t~ncols = ', fs_image_width, '\t~nchnls = ', fs_image_chnls )
print ( '~~~~~~~~~~~~~~~~' )
print( '~~~@@@@ OK...' )
sys.stdout.flush()
if rospy.has_param( '~frozen_protobuf_file'):
frozen_protobuf_file = rospy.get_param('~frozen_protobuf_file')
else:
print( tcol.FAIL, 'FATAL...missing specification of model file. You need to specify ~frozen_protobuf_file', tcol.ENDC )
quit()
gpu_netvlad = ProtoBufferModelImageDescriptor( frozen_protobuf_file=frozen_protobuf_file, im_rows=fs_image_height, im_cols=fs_image_width, im_chnls=fs_image_chnls )
s = rospy.Service( 'whole_image_descriptor_compute_ts', WholeImageDescriptorComputeTS, gpu_netvlad.handle_req)
sub = rospy.Subscriber("left_camera", Image, gpu_netvlad.on_image_recv, queue_size=20, tcp_nodelay=True)
print (tcol.OKGREEN )
print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print( '+++ whole_image_descriptor_compute_server is running +++' )
print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print( tcol.ENDC )
rospy.spin()
| pop_image_by_timestamp | identifier_name |
py2_whole_image_desc_server_ts.py | #!/usr/bin/env python
# Tensorflow
from __future__ import print_function
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.python.platform import gfile
from sensor_msgs.msg import Image
# OpenCV
import cv2
from cv_bridge import CvBridge, CvBridgeError
# Misc Python packages
import numpy as np
import os
import time
import sys
# ROS
import rospy
import math
# Pkg msg definations
from tx2_whole_image_desc_server.srv import WholeImageDescriptorComputeTS, WholeImageDescriptorComputeTSResponse
from TerminalColors import bcolors
tcol = bcolors()
QUEUE_SIZE = 200
def imgmsg_to_cv2( msg ):
assert msg.encoding == "8UC3" or msg.encoding == "8UC1" or msg.encoding == "bgr8" or msg.encoding == "mono8", \
"Expecting the msg to have encoding as 8UC3 or 8UC1, received"+ str( msg.encoding )
if msg.encoding == "8UC3" or msg.encoding=='bgr8':
X = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width, -1)
return X
| return X
class ProtoBufferModelImageDescriptor:
"""
This class loads the net structure from the .h5 file. This file contains
the model weights as well as architecture details.
In the argument `frozen_protobuf_file`
you need to specify the full path (keras model file).
"""
def __init__(self, frozen_protobuf_file, im_rows=600, im_cols=960, im_chnls=3):
start_const = time.time()
# return
## Build net
# from keras.backend.tensorflow_backend import set_session
# tf.set_random_seed(42)
config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.15
config.gpu_options.visible_device_list = "0"
config.intra_op_parallelism_threads=1
config.gpu_options.allow_growth=True
tf.keras.backend.set_session(tf.Session(config=config))
tf.keras.backend.set_learning_phase(0)
self.sess = tf.keras.backend.get_session()
self.queue = []
self.im_rows = int(im_rows)
self.im_cols = int(im_cols)
self.im_chnls = int(im_chnls)
LOG_DIR = '/'.join( frozen_protobuf_file.split('/')[0:-1] )
print( '+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
print( '++++++++++ (HDF5ModelImageDescriptor) LOG_DIR=', LOG_DIR )
print( '++++++++++ im_rows=', im_rows, ' im_cols=', im_cols, ' im_chnls=', im_chnls )
print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
model_type = LOG_DIR.split('/')[-1]
self.model_type = model_type
assert os.path.isdir( LOG_DIR ), "The LOG_DIR doesnot exist, or there is a permission issue. LOG_DIR="+LOG_DIR
assert os.path.isfile( frozen_protobuf_file ), 'The model weights file doesnot exists or there is a permission issue.'+"frozen_protobuf_file="+frozen_protobuf_file
#---
# Load .pb (protobuf file)
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# f = gfile.FastGFile(frozen_protobuf_file, 'rb')
f = tf.gfile.GFile( frozen_protobuf_file, 'rb')
graph_def = tf.GraphDef()
# Parses a serialized binary message into the current message.
graph_def.ParseFromString(f.read())
f.close()
#---
# Setup computation graph
print( 'Setup computational graph')
start_t = time.time()
sess = K.get_session()
sess.graph.as_default()
# Import a serialized TensorFlow `GraphDef` protocol buffer
# and place into the current default `Graph`.
tf.import_graph_def(graph_def)
print( 'Setup computational graph done in %4.2f sec ' %(time.time() - start_t ) )
#--
# Output Tensor.
# Note: the :0. Without :0 it will mean the operator, whgich is not what you want
# Note: import/
self.output_tensor = sess.graph.get_tensor_by_name('import/net_vlad_layer_1/l2_normalize_1:0')
self.sess = K.get_session()
# Doing this is a hack to force keras to allocate GPU memory. Don't comment this,
print ('Allocating GPU Memory...')
# Sample Prediction
tmp_zer = np.zeros( (1,self.im_rows,self.im_cols,self.im_chnls), dtype='float32' )
tmp_zer_out = self.sess.run(self.output_tensor, {'import/input_1:0': tmp_zer})
print( 'model input.shape=', tmp_zer.shape, '\toutput.shape=', tmp_zer_out.shape )
print( 'model_type=', self.model_type )
print( '-----' )
print( '\tinput_image.shape=', tmp_zer.shape )
print( '\toutput.shape=', tmp_zer_out.shape )
print( '\tminmax(tmp_zer_out)=', np.min( tmp_zer_out ), np.max( tmp_zer_out ) )
print( '\tnorm=', np.linalg.norm( tmp_zer_out ) )
print( '\tdtype=', tmp_zer_out.dtype )
print( '-----' )
print ( 'tmp_zer_out=', tmp_zer_out )
self.n_request_processed = 0
print( 'Constructor done in %4.2f sec ' %(time.time() - start_const ) )
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# quit()
def on_image_recv(self, msg):
self.queue.append(msg)
# print("Adding msg to queue", len(self.queue))
if len(self.queue) > QUEUE_SIZE:
del self.queue[0]
def pop_image_by_timestamp(self, stamp):
print("Find...", stamp, "queue_size", len(self.queue), "lag is", (self.queue[-1].header.stamp.to_sec() - stamp.to_sec())*1000, "ms")
index = -1
for i in range(len(self.queue)):
if math.fabs(self.queue[i].header.stamp.to_sec() - stamp.to_sec()) < 0.001:
index = i
break
if index >= 0:
cv_image = imgmsg_to_cv2( self.queue[index] )
del self.queue[0:index+1]
if cv_image.shape[0] != 240 or cv_image.shape[1] != 320:
cv_image = cv2.resize(cv_image, (320, 240))
return cv_image, 0
dt_last = self.queue[-1].header.stamp.to_sec() - stamp.to_sec()
rospy.logwarn("Finding failed, dt is {:3.2f}ms; If this number > 0 means swarm_loop is too slow".format(dt_last)*1000)
if dt_last < 0:
return None, 1
return None, 0
def handle_req( self, req ):
""" The received image from CV bridge has to be [0,255]. In function makes it to
intensity range [-1 to 1]
"""
start_time_handle = time.time()
stamp = req.stamp.data
cv_image = None
for i in range(3):
cv_image, fail = self.pop_image_by_timestamp(stamp)
if cv_image is None and fail == 0:
rospy.logerr("Unable find image swarm loop too slow!")
result = WholeImageDescriptorComputeTSResponse()
return result
else:
if fail == 1:
print("Wait 0.02 sec for image come in and re find image")
rospy.sleep(0.02)
cv_image = self.pop_image_by_timestamp(stamp)
else:
break
if cv_image is None:
rospy.logerr("Unable to find such image")
result = WholeImageDescriptorComputeTSResponse()
return result
# print( '[ProtoBufferModelImageDescriptor Handle Request#%5d] cv_image.shape' %(self.n_request_processed), cv_image.shape, '\ta=', req.a, '\tt=', stamp )
if len(cv_image.shape)==2:
# print 'Input dimensions are NxM but I am expecting it to be NxMxC, so np.expand_dims'
cv_image = np.expand_dims( cv_image, -1 )
elif len( cv_image.shape )==3:
pass
else:
assert False
assert (cv_image.shape[0] == self.im_rows and cv_image.shape[1] == self.im_cols and cv_image.shape[2] == self.im_chnls) , \
"\n[whole_image_descriptor_compute_server] Input shape of the image \
does not match with the allocated GPU memory. Expecting an input image of \
size %dx%dx%d, but received : %s" %(self.im_rows, self.im_cols, self.im_chnls, str(cv_image.shape) )
## Compute Descriptor
start_time = time.time()
i__image = (np.expand_dims( cv_image.astype('float32'), 0 ) - 128.)*2.0/255. #[-1,1]
print( 'Prepare in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )
# u = self.model.predict( i__image )
with self.sess.as_default():
with self.sess.graph.as_default():
# u = self.model.predict( i__image )
u = self.sess.run(self.output_tensor, {'import/input_1:0': i__image})
print( tcol.HEADER, 'Descriptor Computed in %4.4fms' %( 1000. *(time.time() - start_time) ), tcol.ENDC )
# print( '\tinput_image.shape=', cv_image.shape, )
# print( '\tinput_image dtype=', cv_image.dtype )
# print( tcol.OKBLUE, '\tinput image (to neuralnet) minmax=', np.min( i__image ), np.max( i__image ), tcol.ENDC )
# print( '\tdesc.shape=', u.shape, )
# print( '\tdesc minmax=', np.min( u ), np.max( u ), )
# print( '\tnorm=', np.linalg.norm(u[0]) )
# print( '\tmodel_type=', self.model_type )
## Populate output message
result = WholeImageDescriptorComputeTSResponse()
# result.desc = [ cv_image.shape[0], cv_image.shape[1] ]
result.desc = u[0,:]
result.model_type = self.model_type
print( '[ProtoBufferModelImageDescriptor Handle Request] Callback returned in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )
return result
if __name__ == '__main__':
rospy.init_node( 'whole_image_descriptor_compute_server' )
##
## Load the config file and read image row, col
##
fs_image_width = -1
fs_image_height = -1
fs_image_chnls = 1
fs_image_height = rospy.get_param('~nrows')
fs_image_width = rospy.get_param('~ncols')
print ( '~~~~~~~~~~~~~~~~' )
print ( '~nrows = ', fs_image_height, '\t~ncols = ', fs_image_width, '\t~nchnls = ', fs_image_chnls )
print ( '~~~~~~~~~~~~~~~~' )
print( '~~~@@@@ OK...' )
sys.stdout.flush()
if rospy.has_param( '~frozen_protobuf_file'):
frozen_protobuf_file = rospy.get_param('~frozen_protobuf_file')
else:
print( tcol.FAIL, 'FATAL...missing specification of model file. You need to specify ~frozen_protobuf_file', tcol.ENDC )
quit()
gpu_netvlad = ProtoBufferModelImageDescriptor( frozen_protobuf_file=frozen_protobuf_file, im_rows=fs_image_height, im_cols=fs_image_width, im_chnls=fs_image_chnls )
s = rospy.Service( 'whole_image_descriptor_compute_ts', WholeImageDescriptorComputeTS, gpu_netvlad.handle_req)
sub = rospy.Subscriber("left_camera", Image, gpu_netvlad.on_image_recv, queue_size=20, tcp_nodelay=True)
print (tcol.OKGREEN )
print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print( '+++ whole_image_descriptor_compute_server is running +++' )
print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print( tcol.ENDC )
rospy.spin() | if msg.encoding == "8UC1" or msg.encoding=='mono8':
X = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width) | random_line_split |
py2_whole_image_desc_server_ts.py | #!/usr/bin/env python
# Tensorflow
from __future__ import print_function
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.python.platform import gfile
from sensor_msgs.msg import Image
# OpenCV
import cv2
from cv_bridge import CvBridge, CvBridgeError
# Misc Python packages
import numpy as np
import os
import time
import sys
# ROS
import rospy
import math
# Pkg msg definations
from tx2_whole_image_desc_server.srv import WholeImageDescriptorComputeTS, WholeImageDescriptorComputeTSResponse
from TerminalColors import bcolors
tcol = bcolors()
QUEUE_SIZE = 200
def imgmsg_to_cv2( msg ):
assert msg.encoding == "8UC3" or msg.encoding == "8UC1" or msg.encoding == "bgr8" or msg.encoding == "mono8", \
"Expecting the msg to have encoding as 8UC3 or 8UC1, received"+ str( msg.encoding )
if msg.encoding == "8UC3" or msg.encoding=='bgr8':
X = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width, -1)
return X
if msg.encoding == "8UC1" or msg.encoding=='mono8':
X = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width)
return X
class ProtoBufferModelImageDescriptor:
"""
This class loads the net structure from the .h5 file. This file contains
the model weights as well as architecture details.
In the argument `frozen_protobuf_file`
you need to specify the full path (keras model file).
"""
def __init__(self, frozen_protobuf_file, im_rows=600, im_cols=960, im_chnls=3):
start_const = time.time()
# return
## Build net
# from keras.backend.tensorflow_backend import set_session
# tf.set_random_seed(42)
config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.15
config.gpu_options.visible_device_list = "0"
config.intra_op_parallelism_threads=1
config.gpu_options.allow_growth=True
tf.keras.backend.set_session(tf.Session(config=config))
tf.keras.backend.set_learning_phase(0)
self.sess = tf.keras.backend.get_session()
self.queue = []
self.im_rows = int(im_rows)
self.im_cols = int(im_cols)
self.im_chnls = int(im_chnls)
LOG_DIR = '/'.join( frozen_protobuf_file.split('/')[0:-1] )
print( '+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
print( '++++++++++ (HDF5ModelImageDescriptor) LOG_DIR=', LOG_DIR )
print( '++++++++++ im_rows=', im_rows, ' im_cols=', im_cols, ' im_chnls=', im_chnls )
print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
model_type = LOG_DIR.split('/')[-1]
self.model_type = model_type
assert os.path.isdir( LOG_DIR ), "The LOG_DIR doesnot exist, or there is a permission issue. LOG_DIR="+LOG_DIR
assert os.path.isfile( frozen_protobuf_file ), 'The model weights file doesnot exists or there is a permission issue.'+"frozen_protobuf_file="+frozen_protobuf_file
#---
# Load .pb (protobuf file)
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# f = gfile.FastGFile(frozen_protobuf_file, 'rb')
f = tf.gfile.GFile( frozen_protobuf_file, 'rb')
graph_def = tf.GraphDef()
# Parses a serialized binary message into the current message.
graph_def.ParseFromString(f.read())
f.close()
#---
# Setup computation graph
print( 'Setup computational graph')
start_t = time.time()
sess = K.get_session()
sess.graph.as_default()
# Import a serialized TensorFlow `GraphDef` protocol buffer
# and place into the current default `Graph`.
tf.import_graph_def(graph_def)
print( 'Setup computational graph done in %4.2f sec ' %(time.time() - start_t ) )
#--
# Output Tensor.
# Note: the :0. Without :0 it will mean the operator, whgich is not what you want
# Note: import/
self.output_tensor = sess.graph.get_tensor_by_name('import/net_vlad_layer_1/l2_normalize_1:0')
self.sess = K.get_session()
# Doing this is a hack to force keras to allocate GPU memory. Don't comment this,
print ('Allocating GPU Memory...')
# Sample Prediction
tmp_zer = np.zeros( (1,self.im_rows,self.im_cols,self.im_chnls), dtype='float32' )
tmp_zer_out = self.sess.run(self.output_tensor, {'import/input_1:0': tmp_zer})
print( 'model input.shape=', tmp_zer.shape, '\toutput.shape=', tmp_zer_out.shape )
print( 'model_type=', self.model_type )
print( '-----' )
print( '\tinput_image.shape=', tmp_zer.shape )
print( '\toutput.shape=', tmp_zer_out.shape )
print( '\tminmax(tmp_zer_out)=', np.min( tmp_zer_out ), np.max( tmp_zer_out ) )
print( '\tnorm=', np.linalg.norm( tmp_zer_out ) )
print( '\tdtype=', tmp_zer_out.dtype )
print( '-----' )
print ( 'tmp_zer_out=', tmp_zer_out )
self.n_request_processed = 0
print( 'Constructor done in %4.2f sec ' %(time.time() - start_const ) )
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# quit()
def on_image_recv(self, msg):
self.queue.append(msg)
# print("Adding msg to queue", len(self.queue))
if len(self.queue) > QUEUE_SIZE:
del self.queue[0]
def pop_image_by_timestamp(self, stamp):
|
def handle_req( self, req ):
""" The received image from CV bridge has to be [0,255]. In function makes it to
intensity range [-1 to 1]
"""
start_time_handle = time.time()
stamp = req.stamp.data
cv_image = None
for i in range(3):
cv_image, fail = self.pop_image_by_timestamp(stamp)
if cv_image is None and fail == 0:
rospy.logerr("Unable find image swarm loop too slow!")
result = WholeImageDescriptorComputeTSResponse()
return result
else:
if fail == 1:
print("Wait 0.02 sec for image come in and re find image")
rospy.sleep(0.02)
cv_image = self.pop_image_by_timestamp(stamp)
else:
break
if cv_image is None:
rospy.logerr("Unable to find such image")
result = WholeImageDescriptorComputeTSResponse()
return result
# print( '[ProtoBufferModelImageDescriptor Handle Request#%5d] cv_image.shape' %(self.n_request_processed), cv_image.shape, '\ta=', req.a, '\tt=', stamp )
if len(cv_image.shape)==2:
# print 'Input dimensions are NxM but I am expecting it to be NxMxC, so np.expand_dims'
cv_image = np.expand_dims( cv_image, -1 )
elif len( cv_image.shape )==3:
pass
else:
assert False
assert (cv_image.shape[0] == self.im_rows and cv_image.shape[1] == self.im_cols and cv_image.shape[2] == self.im_chnls) , \
"\n[whole_image_descriptor_compute_server] Input shape of the image \
does not match with the allocated GPU memory. Expecting an input image of \
size %dx%dx%d, but received : %s" %(self.im_rows, self.im_cols, self.im_chnls, str(cv_image.shape) )
## Compute Descriptor
start_time = time.time()
i__image = (np.expand_dims( cv_image.astype('float32'), 0 ) - 128.)*2.0/255. #[-1,1]
print( 'Prepare in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )
# u = self.model.predict( i__image )
with self.sess.as_default():
with self.sess.graph.as_default():
# u = self.model.predict( i__image )
u = self.sess.run(self.output_tensor, {'import/input_1:0': i__image})
print( tcol.HEADER, 'Descriptor Computed in %4.4fms' %( 1000. *(time.time() - start_time) ), tcol.ENDC )
# print( '\tinput_image.shape=', cv_image.shape, )
# print( '\tinput_image dtype=', cv_image.dtype )
# print( tcol.OKBLUE, '\tinput image (to neuralnet) minmax=', np.min( i__image ), np.max( i__image ), tcol.ENDC )
# print( '\tdesc.shape=', u.shape, )
# print( '\tdesc minmax=', np.min( u ), np.max( u ), )
# print( '\tnorm=', np.linalg.norm(u[0]) )
# print( '\tmodel_type=', self.model_type )
## Populate output message
result = WholeImageDescriptorComputeTSResponse()
# result.desc = [ cv_image.shape[0], cv_image.shape[1] ]
result.desc = u[0,:]
result.model_type = self.model_type
print( '[ProtoBufferModelImageDescriptor Handle Request] Callback returned in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )
return result
if __name__ == '__main__':
rospy.init_node( 'whole_image_descriptor_compute_server' )
##
## Load the config file and read image row, col
##
fs_image_width = -1
fs_image_height = -1
fs_image_chnls = 1
fs_image_height = rospy.get_param('~nrows')
fs_image_width = rospy.get_param('~ncols')
print ( '~~~~~~~~~~~~~~~~' )
print ( '~nrows = ', fs_image_height, '\t~ncols = ', fs_image_width, '\t~nchnls = ', fs_image_chnls )
print ( '~~~~~~~~~~~~~~~~' )
print( '~~~@@@@ OK...' )
sys.stdout.flush()
if rospy.has_param( '~frozen_protobuf_file'):
frozen_protobuf_file = rospy.get_param('~frozen_protobuf_file')
else:
print( tcol.FAIL, 'FATAL...missing specification of model file. You need to specify ~frozen_protobuf_file', tcol.ENDC )
quit()
gpu_netvlad = ProtoBufferModelImageDescriptor( frozen_protobuf_file=frozen_protobuf_file, im_rows=fs_image_height, im_cols=fs_image_width, im_chnls=fs_image_chnls )
s = rospy.Service( 'whole_image_descriptor_compute_ts', WholeImageDescriptorComputeTS, gpu_netvlad.handle_req)
sub = rospy.Subscriber("left_camera", Image, gpu_netvlad.on_image_recv, queue_size=20, tcp_nodelay=True)
print (tcol.OKGREEN )
print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print( '+++ whole_image_descriptor_compute_server is running +++' )
print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print( tcol.ENDC )
rospy.spin()
| print("Find...", stamp, "queue_size", len(self.queue), "lag is", (self.queue[-1].header.stamp.to_sec() - stamp.to_sec())*1000, "ms")
index = -1
for i in range(len(self.queue)):
if math.fabs(self.queue[i].header.stamp.to_sec() - stamp.to_sec()) < 0.001:
index = i
break
if index >= 0:
cv_image = imgmsg_to_cv2( self.queue[index] )
del self.queue[0:index+1]
if cv_image.shape[0] != 240 or cv_image.shape[1] != 320:
cv_image = cv2.resize(cv_image, (320, 240))
return cv_image, 0
dt_last = self.queue[-1].header.stamp.to_sec() - stamp.to_sec()
rospy.logwarn("Finding failed, dt is {:3.2f}ms; If this number > 0 means swarm_loop is too slow".format(dt_last)*1000)
if dt_last < 0:
return None, 1
return None, 0 | identifier_body |
vaultdb_test.go | package vaultdb
import (
"context"
"database/sql"
"fmt"
"os"
"runtime/pprof"
"strings"
"testing"
"time"
// "bg/common/briefpg"
vaultapi "github.com/hashicorp/vault/api"
logicalDb "github.com/hashicorp/vault/builtin/logical/database"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
_ "github.com/lib/pq"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
)
// testVaultServer is based largely on testVaultServerCoreConfig from
// command/command_test.go in the vault repo.
func testVaultServer(t *testing.T) (*vaultapi.Client, func()) {
coreConfig := &vault.CoreConfig{
DisableMlock: true,
DisableCache: true,
LogicalBackends: map[string]logical.Factory{
"database": logicalDb.Factory,
},
}
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
NumCores: 1,
})
cluster.Start()
core := cluster.Cores[0].Core
vault.TestWaitActive(t, core)
client := cluster.Cores[0].Client
client.SetToken(cluster.RootToken)
return client, func() { defer cluster.Cleanup() }
}
type vaultConfig struct {
dbURI string
path string
vcl *vaultapi.Logical
}
func (vconf vaultConfig) createRole(t *testing.T, role string, ttl, maxTTL int) {
_, err := vconf.vcl.Write(vconf.path+"/config/db", map[string]interface{}{
"allowed_roles": role,
})
if err != nil {
t.Fatalf("Failed to configure DB engine in Vault: %v", err)
}
// Create a role in Vault that is configured to create a Postgres role
// with all privileges.
createSQL := `
CREATE ROLE "{{name}}" WITH
LOGIN
PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}';
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
`
revokeSQL := `
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE usename = '{{name}}';
DROP ROLE IF EXISTS "{{name}}";
`
// XXX Should the force-terminate version be optional?
_, err = vconf.vcl.Write(vconf.path+"/roles/"+role, map[string]interface{}{
"db_name": "db",
"default_ttl": ttl,
"max_ttl": maxTTL,
"creation_statements": createSQL,
"revocation_statements": revokeSQL,
})
if err != nil {
t.Fatalf("Failed to create DB role '%s' in Vault: %v", role, err)
}
}
// setupVault creates a database and a secrets engine in Vault for it.
func setupVault(t *testing.T, vc *vaultapi.Client, bpg *briefpg.BriefPG) vaultConfig {
ctx := context.Background()
dbName := fmt.Sprintf("%s_%d", t.Name(), time.Now().Unix())
dbURI, err := bpg.CreateDB(ctx, dbName, "")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
// The URI Vault uses to access the database needs to be templated for
// credential information, but the Connector prefers not to have the
// creds, so we put the former into the Vault database plugin config and
// hand the latter back to pass to the tests. Note that we put the
// creds in as parameters, rather than in the normal position for a URL
// because various parts of the machinery either can't handle
// credentials without a host or blow up when path escaping the socket
// path and putting that in host position.
cleanDBURI := strings.TrimSuffix(dbURI, "&user=postgres&password=postgres")
dbURI = cleanDBURI + "&user={{username}}&password={{password}}"
t.Logf("Database URI: %s", dbURI)
mi := &vaultapi.MountInput{
Type: "database",
}
path := "database/" + dbName
if err := vc.Sys().Mount(path, mi); err != nil {
t.Fatalf("Failed to mount database secrets: %v", err)
}
// Configure the database plugin. The username and password are the
// "root" credentials.
vcl := vc.Logical()
_, err = vcl.Write(path+"/config/db", map[string]interface{}{
"plugin_name": "postgresql-database-plugin",
"connection_url": dbURI,
"username": "postgres",
"password": "postgres",
})
if err != nil {
t.Fatalf("Failed to configure DB engine in Vault: %v", err)
}
return vaultConfig{
dbURI: cleanDBURI,
path: path,
vcl: vcl,
}
}
// fakeVaultAuth mimics vaultgcpauth, except that we log in with the root token,
// and rotate the passed-in client's token with a time-limited sub-token.
func fakeVaultAuth(t *testing.T, vc *vaultapi.Client) (*fanout, chan struct{}) |
// testDBSecrets tests the basic functionality of vaultdb: that we can establish
// a connection to the database using credentials from Vault that rotate
// periodically.
func testDBSecrets(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
assert := require.New(t)
role := "myrole"
// Use the database via Vault
vdbc := NewConnector(vconf.dbURI, vc, nil, vconf.path, role,
zaptest.NewLogger(t).Sugar())
db := sql.OpenDB(vdbc)
// This combination is intended to indicate that each statement uses a
// brand new connection, and that connections won't be reused.
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(0)
// This requires the role to be configured, so will return an error.
err := vdbc.SetConnMaxLifetime(db)
assert.Error(err)
// This will attempt to open a connection, thus read creds from vault,
// thus fail because the role isn't configured.
err = db.Ping()
assert.Error(err)
vconf.createRole(t, role, 2, 5)
// These should succeed now.
err = vdbc.SetConnMaxLifetime(db)
assert.NoError(err)
err = db.Ping()
assert.NoError(err)
watcher, err := vdbc.getWatcher()
assert.NoError(err)
go watcher.Start()
// Make sure we got credentials.
ephemeralRoleName := vdbc.username()
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
// We can create an object with the credentials
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
// Verify that the user postgres thinks we are is the same as what Vault
// told us.
row := db.QueryRow(`SELECT session_user`)
assert.NoError(err)
var sessionUser string
err = row.Scan(&sessionUser)
assert.NoError(err)
assert.Equal(ephemeralRoleName, sessionUser)
// Wait for a renewal, and drop the table (showing the dropping user is
// the same as the creating one).
renewEvent := <-watcher.RenewCh()
assert.IsType(&vaultapi.RenewOutput{}, renewEvent)
_, err = db.Exec("DROP TABLE test;")
assert.NoError(err)
// Re-create the table; then, wait for the old credentials to expire.
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
doneErr := <-watcher.DoneCh()
assert.NoError(doneErr)
// Demonstrate that the new credentials are in use by looking at the
// session user. Because the credential rotation isn't happening in a
// separate goroutine, it will happen in one of the queries in the loop,
// but we don't know which, in advance. This is because the "done"
// notification we got above is not synchronized with the one received
// in waitWatcher, so we don't have a guarantee that it will have been
// delivered by the time we next call it.
for start := time.Now(); err == nil &&
sessionUser == ephemeralRoleName &&
time.Now().Before(start.Add(time.Second)); time.Sleep(50 * time.Millisecond) {
err = db.QueryRow(`SELECT session_user`).Scan(&sessionUser)
}
assert.NoError(err)
assert.NotEqual(ephemeralRoleName, sessionUser)
// Also, we can create new objects, but are unable to modify objects in
// use by the old user.
_, err = db.Exec("CREATE TABLE test2();")
assert.NoError(err)
_, err = db.Exec("DROP TABLE test;")
assert.Error(err)
// Run a query that creates objects at the beginning and the end, and is
// long enough that it would have to straddle credential rotation.
ephemeralRoleName = vdbc.username()
_, err = db.Exec("CREATE TABLE test3(); SELECT pg_sleep(5); CREATE TABLE test4();")
assert.NoError(err)
_, err = db.Exec("SELECT 1")
assert.NoError(err)
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
assert.NotEqual(ephemeralRoleName, vdbc.username())
// Make sure that table ownership is as expected; both tables created in
// the previous statement, despite crossing a credential rotation, are
// owned by the same user, but they're different from the owner of the
// previous one.
rows, err := db.Query(`
SELECT tablename, tableowner
FROM pg_tables
WHERE tablename IN ('test', 'test3', 'test4')`)
assert.NoError(err)
owners := make(map[string]string)
for rows.Next() {
var owner, table string
err = rows.Scan(&table, &owner)
assert.NoError(err)
owners[table] = owner
}
assert.NotEqual(owners["test2"], owners["test3"])
assert.Equal(owners["test3"], owners["test4"])
}
// testMultiVDBC tests two things. One is when authentication to Vault is done
// with a time-limited token, that sub-leases (such as database credentials) are
// appropriately expired and new credentials can be retrieved under the new auth
// token. The second is that we can have more than one Connector based on a
// single vault client and that the authentication notification doesn't fall
// into any deadlocks when we get a new auth token.
func testMultiVDBC(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
assert := require.New(t)
role := "myrole"
vconf.createRole(t, role, 2, 5)
notifier, stopChan := fakeVaultAuth(t, vc)
defer func() { stopChan <- struct{}{} }()
vdbc1 := NewConnector(vconf.dbURI, vc, notifier, vconf.path, role,
zaptest.NewLogger(t).Named("vdbc1").Sugar())
vdbc2 := NewConnector(vconf.dbURI, vc, notifier, vconf.path, role,
zaptest.NewLogger(t).Named("vdbc2").Sugar())
db1 := sql.OpenDB(vdbc1)
db1.SetMaxOpenConns(1)
db1.SetMaxIdleConns(0)
db2 := sql.OpenDB(vdbc2)
db2.SetMaxOpenConns(1)
db2.SetMaxIdleConns(0)
start := time.Now()
end := start.Add(5 * time.Second)
for time.Now().Before(end) {
err := db1.Ping()
assert.NoError(err)
time.Sleep(time.Second / 4)
err = db2.Ping()
assert.NoError(err)
time.Sleep(time.Second / 4)
}
}
func testCredentialRevocation(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
// assert := require.New(t)
role := "something"
vconf.createRole(t, role, 1, 1)
vdbc := NewConnector(vconf.dbURI, vc, nil, vconf.path, role,
zaptest.NewLogger(t).Named("something").Sugar())
db := sql.OpenDB(vdbc)
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(0)
// This sleep should be interrupted by the revocation statements
// terminating the session, but they never seem to get executed.
start := time.Now()
ch := make(chan error)
go func() {
_, err := db.Exec("SELECT pg_sleep(3)")
ch <- err
}()
time.Sleep(500 * time.Millisecond)
// We see a stack with the watcher in it here
pprof.Lookup("goroutine").WriteTo(os.Stdout, 2)
time.Sleep(1000 * time.Millisecond)
fmt.Println("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
// But not here, since the watcher has completed, and we haven't been
// asked for a new secret, with a new watcher.
pprof.Lookup("goroutine").WriteTo(os.Stdout, 2)
err := <-ch
t.Log(time.Now().Sub(start))
t.Log(err)
}
func TestEmAll(t *testing.T) {
var ctx = context.Background()
// Set up the database
bpg := briefpg.New(nil)
if err := bpg.Start(ctx); err != nil {
t.Fatalf("Failed to start Postgres: %v", err)
}
defer bpg.Fini(ctx)
testCases := []struct {
name string
tFunc func(*testing.T, *vaultapi.Client, vaultConfig)
}{
{"testDBSecrets", testDBSecrets},
{"testMultiVDBC", testMultiVDBC},
{"testCredentialRevocation", testCredentialRevocation},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
vc, vStop := testVaultServer(t)
defer vStop()
vconf := setupVault(t, vc, bpg)
tc.tFunc(t, vc, vconf)
})
}
}
func TestMain(m *testing.M) {
os.Exit(m.Run())
}
| {
assert := require.New(t)
notifier := newfanout(make(chan struct{}))
stopChan := make(chan struct{})
// We have to get the TokenAuth from a clone of passed-in client, or
// we'll end up trying to get new tokens using a token that's about to
// expire. Note that a Clone() doesn't clone the token, so we set that
// explicitly.
rootVC, err := vc.Clone()
assert.NoError(err)
rootVC.SetToken(vc.Token())
tokenAuth := rootVC.Auth().Token()
tcr := &vaultapi.TokenCreateRequest{TTL: "2s"}
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
go func() {
for {
renewAt, err := secret.TokenTTL()
assert.NoError(err)
renewAt = renewAt * 3 / 4
select {
case <-time.After(renewAt):
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
notifier.notify()
case <-stopChan:
return
}
}
}()
return notifier, stopChan
} | identifier_body |
vaultdb_test.go | package vaultdb
import (
"context"
"database/sql"
"fmt"
"os"
"runtime/pprof"
"strings"
"testing"
"time"
// "bg/common/briefpg"
vaultapi "github.com/hashicorp/vault/api"
logicalDb "github.com/hashicorp/vault/builtin/logical/database"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
_ "github.com/lib/pq"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
)
// testVaultServer is based largely on testVaultServerCoreConfig from
// command/command_test.go in the vault repo.
func testVaultServer(t *testing.T) (*vaultapi.Client, func()) {
coreConfig := &vault.CoreConfig{
DisableMlock: true,
DisableCache: true,
LogicalBackends: map[string]logical.Factory{
"database": logicalDb.Factory,
},
}
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
NumCores: 1,
})
cluster.Start()
core := cluster.Cores[0].Core
vault.TestWaitActive(t, core)
client := cluster.Cores[0].Client
client.SetToken(cluster.RootToken)
return client, func() { defer cluster.Cleanup() }
}
type vaultConfig struct {
dbURI string
path string
vcl *vaultapi.Logical
}
func (vconf vaultConfig) createRole(t *testing.T, role string, ttl, maxTTL int) {
_, err := vconf.vcl.Write(vconf.path+"/config/db", map[string]interface{}{
"allowed_roles": role,
})
if err != nil {
t.Fatalf("Failed to configure DB engine in Vault: %v", err)
}
// Create a role in Vault that is configured to create a Postgres role
// with all privileges.
createSQL := `
CREATE ROLE "{{name}}" WITH
LOGIN
PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}';
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
`
revokeSQL := `
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE usename = '{{name}}';
DROP ROLE IF EXISTS "{{name}}";
`
// XXX Should the force-terminate version be optional?
_, err = vconf.vcl.Write(vconf.path+"/roles/"+role, map[string]interface{}{
"db_name": "db",
"default_ttl": ttl,
"max_ttl": maxTTL,
"creation_statements": createSQL,
"revocation_statements": revokeSQL,
})
if err != nil {
t.Fatalf("Failed to create DB role '%s' in Vault: %v", role, err)
}
}
// setupVault creates a database and a secrets engine in Vault for it.
func setupVault(t *testing.T, vc *vaultapi.Client, bpg *briefpg.BriefPG) vaultConfig {
ctx := context.Background()
dbName := fmt.Sprintf("%s_%d", t.Name(), time.Now().Unix())
dbURI, err := bpg.CreateDB(ctx, dbName, "")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
// The URI Vault uses to access the database needs to be templated for
// credential information, but the Connector prefers not to have the
// creds, so we put the former into the Vault database plugin config and
// hand the latter back to pass to the tests. Note that we put the
// creds in as parameters, rather than in the normal position for a URL
// because various parts of the machinery either can't handle
// credentials without a host or blow up when path escaping the socket
// path and putting that in host position.
cleanDBURI := strings.TrimSuffix(dbURI, "&user=postgres&password=postgres")
dbURI = cleanDBURI + "&user={{username}}&password={{password}}"
t.Logf("Database URI: %s", dbURI)
mi := &vaultapi.MountInput{
Type: "database",
}
path := "database/" + dbName
if err := vc.Sys().Mount(path, mi); err != nil {
t.Fatalf("Failed to mount database secrets: %v", err)
}
// Configure the database plugin. The username and password are the
// "root" credentials.
vcl := vc.Logical()
_, err = vcl.Write(path+"/config/db", map[string]interface{}{
"plugin_name": "postgresql-database-plugin",
"connection_url": dbURI,
"username": "postgres",
"password": "postgres",
})
if err != nil {
t.Fatalf("Failed to configure DB engine in Vault: %v", err)
}
return vaultConfig{
dbURI: cleanDBURI,
path: path,
vcl: vcl,
}
}
// fakeVaultAuth mimics vaultgcpauth, except that we log in with the root token,
// and rotate the passed-in client's token with a time-limited sub-token.
func fakeVaultAuth(t *testing.T, vc *vaultapi.Client) (*fanout, chan struct{}) {
assert := require.New(t)
notifier := newfanout(make(chan struct{}))
stopChan := make(chan struct{})
// We have to get the TokenAuth from a clone of passed-in client, or
// we'll end up trying to get new tokens using a token that's about to
// expire. Note that a Clone() doesn't clone the token, so we set that
// explicitly.
rootVC, err := vc.Clone()
assert.NoError(err)
rootVC.SetToken(vc.Token())
tokenAuth := rootVC.Auth().Token()
tcr := &vaultapi.TokenCreateRequest{TTL: "2s"}
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
go func() {
for {
renewAt, err := secret.TokenTTL()
assert.NoError(err)
renewAt = renewAt * 3 / 4
select {
case <-time.After(renewAt):
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
notifier.notify()
case <-stopChan:
return
}
}
}()
return notifier, stopChan
}
// testDBSecrets tests the basic functionality of vaultdb: that we can establish
// a connection to the database using credentials from Vault that rotate
// periodically.
func testDBSecrets(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
assert := require.New(t)
role := "myrole"
// Use the database via Vault
vdbc := NewConnector(vconf.dbURI, vc, nil, vconf.path, role,
zaptest.NewLogger(t).Sugar())
db := sql.OpenDB(vdbc)
// This combination is intended to indicate that each statement uses a
// brand new connection, and that connections won't be reused.
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(0)
// This requires the role to be configured, so will return an error.
err := vdbc.SetConnMaxLifetime(db)
assert.Error(err)
// This will attempt to open a connection, thus read creds from vault,
// thus fail because the role isn't configured.
err = db.Ping()
assert.Error(err)
vconf.createRole(t, role, 2, 5)
// These should succeed now.
err = vdbc.SetConnMaxLifetime(db)
assert.NoError(err)
err = db.Ping()
assert.NoError(err)
watcher, err := vdbc.getWatcher()
assert.NoError(err)
go watcher.Start()
// Make sure we got credentials.
ephemeralRoleName := vdbc.username()
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
// We can create an object with the credentials
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
// Verify that the user postgres thinks we are is the same as what Vault
// told us.
row := db.QueryRow(`SELECT session_user`)
assert.NoError(err)
var sessionUser string
err = row.Scan(&sessionUser)
assert.NoError(err)
assert.Equal(ephemeralRoleName, sessionUser)
// Wait for a renewal, and drop the table (showing the dropping user is
// the same as the creating one).
renewEvent := <-watcher.RenewCh()
assert.IsType(&vaultapi.RenewOutput{}, renewEvent)
_, err = db.Exec("DROP TABLE test;")
assert.NoError(err)
// Re-create the table; then, wait for the old credentials to expire.
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
doneErr := <-watcher.DoneCh()
assert.NoError(doneErr)
// Demonstrate that the new credentials are in use by looking at the
// session user. Because the credential rotation isn't happening in a
// separate goroutine, it will happen in one of the queries in the loop,
// but we don't know which, in advance. This is because the "done"
// notification we got above is not synchronized with the one received
// in waitWatcher, so we don't have a guarantee that it will have been
// delivered by the time we next call it.
for start := time.Now(); err == nil &&
sessionUser == ephemeralRoleName &&
time.Now().Before(start.Add(time.Second)); time.Sleep(50 * time.Millisecond) {
err = db.QueryRow(`SELECT session_user`).Scan(&sessionUser)
}
assert.NoError(err)
assert.NotEqual(ephemeralRoleName, sessionUser)
// Also, we can create new objects, but are unable to modify objects in
// use by the old user.
_, err = db.Exec("CREATE TABLE test2();")
assert.NoError(err)
_, err = db.Exec("DROP TABLE test;")
assert.Error(err)
// Run a query that creates objects at the beginning and the end, and is
// long enough that it would have to straddle credential rotation.
ephemeralRoleName = vdbc.username()
_, err = db.Exec("CREATE TABLE test3(); SELECT pg_sleep(5); CREATE TABLE test4();")
assert.NoError(err)
_, err = db.Exec("SELECT 1")
assert.NoError(err)
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
assert.NotEqual(ephemeralRoleName, vdbc.username())
// Make sure that table ownership is as expected; both tables created in
// the previous statement, despite crossing a credential rotation, are
// owned by the same user, but they're different from the owner of the
// previous one.
rows, err := db.Query(`
SELECT tablename, tableowner
FROM pg_tables
WHERE tablename IN ('test', 'test3', 'test4')`)
assert.NoError(err)
owners := make(map[string]string)
for rows.Next() {
var owner, table string
err = rows.Scan(&table, &owner)
assert.NoError(err)
owners[table] = owner
}
assert.NotEqual(owners["test2"], owners["test3"])
assert.Equal(owners["test3"], owners["test4"])
}
// testMultiVDBC tests two things. One is when authentication to Vault is done
// with a time-limited token, that sub-leases (such as database credentials) are
// appropriately expired and new credentials can be retrieved under the new auth
// token. The second is that we can have more than one Connector based on a
// single vault client and that the authentication notification doesn't fall
// into any deadlocks when we get a new auth token.
func | (t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
assert := require.New(t)
role := "myrole"
vconf.createRole(t, role, 2, 5)
notifier, stopChan := fakeVaultAuth(t, vc)
defer func() { stopChan <- struct{}{} }()
vdbc1 := NewConnector(vconf.dbURI, vc, notifier, vconf.path, role,
zaptest.NewLogger(t).Named("vdbc1").Sugar())
vdbc2 := NewConnector(vconf.dbURI, vc, notifier, vconf.path, role,
zaptest.NewLogger(t).Named("vdbc2").Sugar())
db1 := sql.OpenDB(vdbc1)
db1.SetMaxOpenConns(1)
db1.SetMaxIdleConns(0)
db2 := sql.OpenDB(vdbc2)
db2.SetMaxOpenConns(1)
db2.SetMaxIdleConns(0)
start := time.Now()
end := start.Add(5 * time.Second)
for time.Now().Before(end) {
err := db1.Ping()
assert.NoError(err)
time.Sleep(time.Second / 4)
err = db2.Ping()
assert.NoError(err)
time.Sleep(time.Second / 4)
}
}
func testCredentialRevocation(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
// assert := require.New(t)
role := "something"
vconf.createRole(t, role, 1, 1)
vdbc := NewConnector(vconf.dbURI, vc, nil, vconf.path, role,
zaptest.NewLogger(t).Named("something").Sugar())
db := sql.OpenDB(vdbc)
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(0)
// This sleep should be interrupted by the revocation statements
// terminating the session, but they never seem to get executed.
start := time.Now()
ch := make(chan error)
go func() {
_, err := db.Exec("SELECT pg_sleep(3)")
ch <- err
}()
time.Sleep(500 * time.Millisecond)
// We see a stack with the watcher in it here
pprof.Lookup("goroutine").WriteTo(os.Stdout, 2)
time.Sleep(1000 * time.Millisecond)
fmt.Println("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
// But not here, since the watcher has completed, and we haven't been
// asked for a new secret, with a new watcher.
pprof.Lookup("goroutine").WriteTo(os.Stdout, 2)
err := <-ch
t.Log(time.Now().Sub(start))
t.Log(err)
}
func TestEmAll(t *testing.T) {
var ctx = context.Background()
// Set up the database
bpg := briefpg.New(nil)
if err := bpg.Start(ctx); err != nil {
t.Fatalf("Failed to start Postgres: %v", err)
}
defer bpg.Fini(ctx)
testCases := []struct {
name string
tFunc func(*testing.T, *vaultapi.Client, vaultConfig)
}{
{"testDBSecrets", testDBSecrets},
{"testMultiVDBC", testMultiVDBC},
{"testCredentialRevocation", testCredentialRevocation},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
vc, vStop := testVaultServer(t)
defer vStop()
vconf := setupVault(t, vc, bpg)
tc.tFunc(t, vc, vconf)
})
}
}
func TestMain(m *testing.M) {
os.Exit(m.Run())
}
| testMultiVDBC | identifier_name |
vaultdb_test.go | package vaultdb
import (
"context"
"database/sql"
"fmt"
"os"
"runtime/pprof"
"strings"
"testing"
"time"
// "bg/common/briefpg"
vaultapi "github.com/hashicorp/vault/api"
logicalDb "github.com/hashicorp/vault/builtin/logical/database"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
_ "github.com/lib/pq"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
)
// testVaultServer is based largely on testVaultServerCoreConfig from
// command/command_test.go in the vault repo.
func testVaultServer(t *testing.T) (*vaultapi.Client, func()) {
coreConfig := &vault.CoreConfig{
DisableMlock: true,
DisableCache: true,
LogicalBackends: map[string]logical.Factory{
"database": logicalDb.Factory,
},
}
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
NumCores: 1,
})
cluster.Start()
core := cluster.Cores[0].Core
vault.TestWaitActive(t, core)
client := cluster.Cores[0].Client
client.SetToken(cluster.RootToken)
return client, func() { defer cluster.Cleanup() }
}
type vaultConfig struct {
dbURI string
path string
vcl *vaultapi.Logical
}
func (vconf vaultConfig) createRole(t *testing.T, role string, ttl, maxTTL int) {
_, err := vconf.vcl.Write(vconf.path+"/config/db", map[string]interface{}{
"allowed_roles": role,
})
if err != nil {
t.Fatalf("Failed to configure DB engine in Vault: %v", err)
}
// Create a role in Vault that is configured to create a Postgres role
// with all privileges.
createSQL := `
CREATE ROLE "{{name}}" WITH
LOGIN
PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}';
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
`
revokeSQL := `
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE usename = '{{name}}';
DROP ROLE IF EXISTS "{{name}}";
`
// XXX Should the force-terminate version be optional?
_, err = vconf.vcl.Write(vconf.path+"/roles/"+role, map[string]interface{}{
"db_name": "db",
"default_ttl": ttl,
"max_ttl": maxTTL,
"creation_statements": createSQL,
"revocation_statements": revokeSQL,
})
if err != nil {
t.Fatalf("Failed to create DB role '%s' in Vault: %v", role, err)
}
}
// setupVault creates a database and a secrets engine in Vault for it.
func setupVault(t *testing.T, vc *vaultapi.Client, bpg *briefpg.BriefPG) vaultConfig {
ctx := context.Background()
dbName := fmt.Sprintf("%s_%d", t.Name(), time.Now().Unix())
dbURI, err := bpg.CreateDB(ctx, dbName, "")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
// The URI Vault uses to access the database needs to be templated for
// credential information, but the Connector prefers not to have the
// creds, so we put the former into the Vault database plugin config and
// hand the latter back to pass to the tests. Note that we put the
// creds in as parameters, rather than in the normal position for a URL
// because various parts of the machinery either can't handle
// credentials without a host or blow up when path escaping the socket
// path and putting that in host position.
cleanDBURI := strings.TrimSuffix(dbURI, "&user=postgres&password=postgres")
dbURI = cleanDBURI + "&user={{username}}&password={{password}}"
t.Logf("Database URI: %s", dbURI)
mi := &vaultapi.MountInput{
Type: "database",
}
path := "database/" + dbName
if err := vc.Sys().Mount(path, mi); err != nil {
t.Fatalf("Failed to mount database secrets: %v", err)
}
// Configure the database plugin. The username and password are the
// "root" credentials.
vcl := vc.Logical()
_, err = vcl.Write(path+"/config/db", map[string]interface{}{
"plugin_name": "postgresql-database-plugin",
"connection_url": dbURI,
"username": "postgres",
"password": "postgres",
})
if err != nil |
return vaultConfig{
dbURI: cleanDBURI,
path: path,
vcl: vcl,
}
}
// fakeVaultAuth mimics vaultgcpauth, except that we log in with the root token,
// and rotate the passed-in client's token with a time-limited sub-token.
func fakeVaultAuth(t *testing.T, vc *vaultapi.Client) (*fanout, chan struct{}) {
assert := require.New(t)
notifier := newfanout(make(chan struct{}))
stopChan := make(chan struct{})
// We have to get the TokenAuth from a clone of passed-in client, or
// we'll end up trying to get new tokens using a token that's about to
// expire. Note that a Clone() doesn't clone the token, so we set that
// explicitly.
rootVC, err := vc.Clone()
assert.NoError(err)
rootVC.SetToken(vc.Token())
tokenAuth := rootVC.Auth().Token()
tcr := &vaultapi.TokenCreateRequest{TTL: "2s"}
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
go func() {
for {
renewAt, err := secret.TokenTTL()
assert.NoError(err)
renewAt = renewAt * 3 / 4
select {
case <-time.After(renewAt):
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
notifier.notify()
case <-stopChan:
return
}
}
}()
return notifier, stopChan
}
// testDBSecrets tests the basic functionality of vaultdb: that we can establish
// a connection to the database using credentials from Vault that rotate
// periodically.
func testDBSecrets(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
assert := require.New(t)
role := "myrole"
// Use the database via Vault
vdbc := NewConnector(vconf.dbURI, vc, nil, vconf.path, role,
zaptest.NewLogger(t).Sugar())
db := sql.OpenDB(vdbc)
// This combination is intended to indicate that each statement uses a
// brand new connection, and that connections won't be reused.
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(0)
// This requires the role to be configured, so will return an error.
err := vdbc.SetConnMaxLifetime(db)
assert.Error(err)
// This will attempt to open a connection, thus read creds from vault,
// thus fail because the role isn't configured.
err = db.Ping()
assert.Error(err)
vconf.createRole(t, role, 2, 5)
// These should succeed now.
err = vdbc.SetConnMaxLifetime(db)
assert.NoError(err)
err = db.Ping()
assert.NoError(err)
watcher, err := vdbc.getWatcher()
assert.NoError(err)
go watcher.Start()
// Make sure we got credentials.
ephemeralRoleName := vdbc.username()
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
// We can create an object with the credentials
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
// Verify that the user postgres thinks we are is the same as what Vault
// told us.
row := db.QueryRow(`SELECT session_user`)
assert.NoError(err)
var sessionUser string
err = row.Scan(&sessionUser)
assert.NoError(err)
assert.Equal(ephemeralRoleName, sessionUser)
// Wait for a renewal, and drop the table (showing the dropping user is
// the same as the creating one).
renewEvent := <-watcher.RenewCh()
assert.IsType(&vaultapi.RenewOutput{}, renewEvent)
_, err = db.Exec("DROP TABLE test;")
assert.NoError(err)
// Re-create the table; then, wait for the old credentials to expire.
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
doneErr := <-watcher.DoneCh()
assert.NoError(doneErr)
// Demonstrate that the new credentials are in use by looking at the
// session user. Because the credential rotation isn't happening in a
// separate goroutine, it will happen in one of the queries in the loop,
// but we don't know which, in advance. This is because the "done"
// notification we got above is not synchronized with the one received
// in waitWatcher, so we don't have a guarantee that it will have been
// delivered by the time we next call it.
for start := time.Now(); err == nil &&
sessionUser == ephemeralRoleName &&
time.Now().Before(start.Add(time.Second)); time.Sleep(50 * time.Millisecond) {
err = db.QueryRow(`SELECT session_user`).Scan(&sessionUser)
}
assert.NoError(err)
assert.NotEqual(ephemeralRoleName, sessionUser)
// Also, we can create new objects, but are unable to modify objects in
// use by the old user.
_, err = db.Exec("CREATE TABLE test2();")
assert.NoError(err)
_, err = db.Exec("DROP TABLE test;")
assert.Error(err)
// Run a query that creates objects at the beginning and the end, and is
// long enough that it would have to straddle credential rotation.
ephemeralRoleName = vdbc.username()
_, err = db.Exec("CREATE TABLE test3(); SELECT pg_sleep(5); CREATE TABLE test4();")
assert.NoError(err)
_, err = db.Exec("SELECT 1")
assert.NoError(err)
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
assert.NotEqual(ephemeralRoleName, vdbc.username())
// Make sure that table ownership is as expected; both tables created in
// the previous statement, despite crossing a credential rotation, are
// owned by the same user, but they're different from the owner of the
// previous one.
rows, err := db.Query(`
SELECT tablename, tableowner
FROM pg_tables
WHERE tablename IN ('test', 'test3', 'test4')`)
assert.NoError(err)
owners := make(map[string]string)
for rows.Next() {
var owner, table string
err = rows.Scan(&table, &owner)
assert.NoError(err)
owners[table] = owner
}
assert.NotEqual(owners["test2"], owners["test3"])
assert.Equal(owners["test3"], owners["test4"])
}
// testMultiVDBC tests two things. One is when authentication to Vault is done
// with a time-limited token, that sub-leases (such as database credentials) are
// appropriately expired and new credentials can be retrieved under the new auth
// token. The second is that we can have more than one Connector based on a
// single vault client and that the authentication notification doesn't fall
// into any deadlocks when we get a new auth token.
func testMultiVDBC(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
assert := require.New(t)
role := "myrole"
vconf.createRole(t, role, 2, 5)
notifier, stopChan := fakeVaultAuth(t, vc)
defer func() { stopChan <- struct{}{} }()
vdbc1 := NewConnector(vconf.dbURI, vc, notifier, vconf.path, role,
zaptest.NewLogger(t).Named("vdbc1").Sugar())
vdbc2 := NewConnector(vconf.dbURI, vc, notifier, vconf.path, role,
zaptest.NewLogger(t).Named("vdbc2").Sugar())
db1 := sql.OpenDB(vdbc1)
db1.SetMaxOpenConns(1)
db1.SetMaxIdleConns(0)
db2 := sql.OpenDB(vdbc2)
db2.SetMaxOpenConns(1)
db2.SetMaxIdleConns(0)
start := time.Now()
end := start.Add(5 * time.Second)
for time.Now().Before(end) {
err := db1.Ping()
assert.NoError(err)
time.Sleep(time.Second / 4)
err = db2.Ping()
assert.NoError(err)
time.Sleep(time.Second / 4)
}
}
func testCredentialRevocation(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
// assert := require.New(t)
role := "something"
vconf.createRole(t, role, 1, 1)
vdbc := NewConnector(vconf.dbURI, vc, nil, vconf.path, role,
zaptest.NewLogger(t).Named("something").Sugar())
db := sql.OpenDB(vdbc)
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(0)
// This sleep should be interrupted by the revocation statements
// terminating the session, but they never seem to get executed.
start := time.Now()
ch := make(chan error)
go func() {
_, err := db.Exec("SELECT pg_sleep(3)")
ch <- err
}()
time.Sleep(500 * time.Millisecond)
// We see a stack with the watcher in it here
pprof.Lookup("goroutine").WriteTo(os.Stdout, 2)
time.Sleep(1000 * time.Millisecond)
fmt.Println("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
// But not here, since the watcher has completed, and we haven't been
// asked for a new secret, with a new watcher.
pprof.Lookup("goroutine").WriteTo(os.Stdout, 2)
err := <-ch
t.Log(time.Now().Sub(start))
t.Log(err)
}
func TestEmAll(t *testing.T) {
var ctx = context.Background()
// Set up the database
bpg := briefpg.New(nil)
if err := bpg.Start(ctx); err != nil {
t.Fatalf("Failed to start Postgres: %v", err)
}
defer bpg.Fini(ctx)
testCases := []struct {
name string
tFunc func(*testing.T, *vaultapi.Client, vaultConfig)
}{
{"testDBSecrets", testDBSecrets},
{"testMultiVDBC", testMultiVDBC},
{"testCredentialRevocation", testCredentialRevocation},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
vc, vStop := testVaultServer(t)
defer vStop()
vconf := setupVault(t, vc, bpg)
tc.tFunc(t, vc, vconf)
})
}
}
func TestMain(m *testing.M) {
os.Exit(m.Run())
}
| {
t.Fatalf("Failed to configure DB engine in Vault: %v", err)
} | conditional_block |
vaultdb_test.go | package vaultdb
import (
"context"
"database/sql"
"fmt"
"os"
"runtime/pprof"
"strings"
"testing"
"time"
// "bg/common/briefpg"
vaultapi "github.com/hashicorp/vault/api"
logicalDb "github.com/hashicorp/vault/builtin/logical/database"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
_ "github.com/lib/pq"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
)
// testVaultServer is based largely on testVaultServerCoreConfig from
// command/command_test.go in the vault repo.
func testVaultServer(t *testing.T) (*vaultapi.Client, func()) {
coreConfig := &vault.CoreConfig{
DisableMlock: true,
DisableCache: true,
LogicalBackends: map[string]logical.Factory{
"database": logicalDb.Factory,
},
}
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
NumCores: 1,
})
cluster.Start()
core := cluster.Cores[0].Core
vault.TestWaitActive(t, core)
client := cluster.Cores[0].Client
client.SetToken(cluster.RootToken)
return client, func() { defer cluster.Cleanup() }
}
type vaultConfig struct {
dbURI string
path string
vcl *vaultapi.Logical
}
func (vconf vaultConfig) createRole(t *testing.T, role string, ttl, maxTTL int) {
_, err := vconf.vcl.Write(vconf.path+"/config/db", map[string]interface{}{
"allowed_roles": role,
})
if err != nil {
t.Fatalf("Failed to configure DB engine in Vault: %v", err)
}
// Create a role in Vault that is configured to create a Postgres role
// with all privileges.
createSQL := `
CREATE ROLE "{{name}}" WITH
LOGIN
PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}';
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
`
revokeSQL := `
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE usename = '{{name}}';
DROP ROLE IF EXISTS "{{name}}";
`
// XXX Should the force-terminate version be optional?
_, err = vconf.vcl.Write(vconf.path+"/roles/"+role, map[string]interface{}{ | "creation_statements": createSQL,
"revocation_statements": revokeSQL,
})
if err != nil {
t.Fatalf("Failed to create DB role '%s' in Vault: %v", role, err)
}
}
// setupVault creates a database and a secrets engine in Vault for it.
func setupVault(t *testing.T, vc *vaultapi.Client, bpg *briefpg.BriefPG) vaultConfig {
ctx := context.Background()
dbName := fmt.Sprintf("%s_%d", t.Name(), time.Now().Unix())
dbURI, err := bpg.CreateDB(ctx, dbName, "")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
// The URI Vault uses to access the database needs to be templated for
// credential information, but the Connector prefers not to have the
// creds, so we put the former into the Vault database plugin config and
// hand the latter back to pass to the tests. Note that we put the
// creds in as parameters, rather than in the normal position for a URL
// because various parts of the machinery either can't handle
// credentials without a host or blow up when path escaping the socket
// path and putting that in host position.
cleanDBURI := strings.TrimSuffix(dbURI, "&user=postgres&password=postgres")
dbURI = cleanDBURI + "&user={{username}}&password={{password}}"
t.Logf("Database URI: %s", dbURI)
mi := &vaultapi.MountInput{
Type: "database",
}
path := "database/" + dbName
if err := vc.Sys().Mount(path, mi); err != nil {
t.Fatalf("Failed to mount database secrets: %v", err)
}
// Configure the database plugin. The username and password are the
// "root" credentials.
vcl := vc.Logical()
_, err = vcl.Write(path+"/config/db", map[string]interface{}{
"plugin_name": "postgresql-database-plugin",
"connection_url": dbURI,
"username": "postgres",
"password": "postgres",
})
if err != nil {
t.Fatalf("Failed to configure DB engine in Vault: %v", err)
}
return vaultConfig{
dbURI: cleanDBURI,
path: path,
vcl: vcl,
}
}
// fakeVaultAuth mimics vaultgcpauth, except that we log in with the root token,
// and rotate the passed-in client's token with a time-limited sub-token.
func fakeVaultAuth(t *testing.T, vc *vaultapi.Client) (*fanout, chan struct{}) {
assert := require.New(t)
notifier := newfanout(make(chan struct{}))
stopChan := make(chan struct{})
// We have to get the TokenAuth from a clone of passed-in client, or
// we'll end up trying to get new tokens using a token that's about to
// expire. Note that a Clone() doesn't clone the token, so we set that
// explicitly.
rootVC, err := vc.Clone()
assert.NoError(err)
rootVC.SetToken(vc.Token())
tokenAuth := rootVC.Auth().Token()
tcr := &vaultapi.TokenCreateRequest{TTL: "2s"}
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
go func() {
for {
renewAt, err := secret.TokenTTL()
assert.NoError(err)
renewAt = renewAt * 3 / 4
select {
case <-time.After(renewAt):
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
notifier.notify()
case <-stopChan:
return
}
}
}()
return notifier, stopChan
}
// testDBSecrets tests the basic functionality of vaultdb: that we can establish
// a connection to the database using credentials from Vault that rotate
// periodically.
func testDBSecrets(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
assert := require.New(t)
role := "myrole"
// Use the database via Vault
vdbc := NewConnector(vconf.dbURI, vc, nil, vconf.path, role,
zaptest.NewLogger(t).Sugar())
db := sql.OpenDB(vdbc)
// This combination is intended to indicate that each statement uses a
// brand new connection, and that connections won't be reused.
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(0)
// This requires the role to be configured, so will return an error.
err := vdbc.SetConnMaxLifetime(db)
assert.Error(err)
// This will attempt to open a connection, thus read creds from vault,
// thus fail because the role isn't configured.
err = db.Ping()
assert.Error(err)
vconf.createRole(t, role, 2, 5)
// These should succeed now.
err = vdbc.SetConnMaxLifetime(db)
assert.NoError(err)
err = db.Ping()
assert.NoError(err)
watcher, err := vdbc.getWatcher()
assert.NoError(err)
go watcher.Start()
// Make sure we got credentials.
ephemeralRoleName := vdbc.username()
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
// We can create an object with the credentials
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
// Verify that the user postgres thinks we are is the same as what Vault
// told us.
row := db.QueryRow(`SELECT session_user`)
assert.NoError(err)
var sessionUser string
err = row.Scan(&sessionUser)
assert.NoError(err)
assert.Equal(ephemeralRoleName, sessionUser)
// Wait for a renewal, and drop the table (showing the dropping user is
// the same as the creating one).
renewEvent := <-watcher.RenewCh()
assert.IsType(&vaultapi.RenewOutput{}, renewEvent)
_, err = db.Exec("DROP TABLE test;")
assert.NoError(err)
// Re-create the table; then, wait for the old credentials to expire.
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
doneErr := <-watcher.DoneCh()
assert.NoError(doneErr)
// Demonstrate that the new credentials are in use by looking at the
// session user. Because the credential rotation isn't happening in a
// separate goroutine, it will happen in one of the queries in the loop,
// but we don't know which, in advance. This is because the "done"
// notification we got above is not synchronized with the one received
// in waitWatcher, so we don't have a guarantee that it will have been
// delivered by the time we next call it.
for start := time.Now(); err == nil &&
sessionUser == ephemeralRoleName &&
time.Now().Before(start.Add(time.Second)); time.Sleep(50 * time.Millisecond) {
err = db.QueryRow(`SELECT session_user`).Scan(&sessionUser)
}
assert.NoError(err)
assert.NotEqual(ephemeralRoleName, sessionUser)
// Also, we can create new objects, but are unable to modify objects in
// use by the old user.
_, err = db.Exec("CREATE TABLE test2();")
assert.NoError(err)
_, err = db.Exec("DROP TABLE test;")
assert.Error(err)
// Run a query that creates objects at the beginning and the end, and is
// long enough that it would have to straddle credential rotation.
ephemeralRoleName = vdbc.username()
_, err = db.Exec("CREATE TABLE test3(); SELECT pg_sleep(5); CREATE TABLE test4();")
assert.NoError(err)
_, err = db.Exec("SELECT 1")
assert.NoError(err)
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
assert.NotEqual(ephemeralRoleName, vdbc.username())
// Make sure that table ownership is as expected; both tables created in
// the previous statement, despite crossing a credential rotation, are
// owned by the same user, but they're different from the owner of the
// previous one.
rows, err := db.Query(`
SELECT tablename, tableowner
FROM pg_tables
WHERE tablename IN ('test', 'test3', 'test4')`)
assert.NoError(err)
owners := make(map[string]string)
for rows.Next() {
var owner, table string
err = rows.Scan(&table, &owner)
assert.NoError(err)
owners[table] = owner
}
assert.NotEqual(owners["test2"], owners["test3"])
assert.Equal(owners["test3"], owners["test4"])
}
// testMultiVDBC tests two things. One is when authentication to Vault is done
// with a time-limited token, that sub-leases (such as database credentials) are
// appropriately expired and new credentials can be retrieved under the new auth
// token. The second is that we can have more than one Connector based on a
// single vault client and that the authentication notification doesn't fall
// into any deadlocks when we get a new auth token.
func testMultiVDBC(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
assert := require.New(t)
role := "myrole"
vconf.createRole(t, role, 2, 5)
notifier, stopChan := fakeVaultAuth(t, vc)
defer func() { stopChan <- struct{}{} }()
vdbc1 := NewConnector(vconf.dbURI, vc, notifier, vconf.path, role,
zaptest.NewLogger(t).Named("vdbc1").Sugar())
vdbc2 := NewConnector(vconf.dbURI, vc, notifier, vconf.path, role,
zaptest.NewLogger(t).Named("vdbc2").Sugar())
db1 := sql.OpenDB(vdbc1)
db1.SetMaxOpenConns(1)
db1.SetMaxIdleConns(0)
db2 := sql.OpenDB(vdbc2)
db2.SetMaxOpenConns(1)
db2.SetMaxIdleConns(0)
start := time.Now()
end := start.Add(5 * time.Second)
for time.Now().Before(end) {
err := db1.Ping()
assert.NoError(err)
time.Sleep(time.Second / 4)
err = db2.Ping()
assert.NoError(err)
time.Sleep(time.Second / 4)
}
}
func testCredentialRevocation(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
// assert := require.New(t)
role := "something"
vconf.createRole(t, role, 1, 1)
vdbc := NewConnector(vconf.dbURI, vc, nil, vconf.path, role,
zaptest.NewLogger(t).Named("something").Sugar())
db := sql.OpenDB(vdbc)
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(0)
// This sleep should be interrupted by the revocation statements
// terminating the session, but they never seem to get executed.
start := time.Now()
ch := make(chan error)
go func() {
_, err := db.Exec("SELECT pg_sleep(3)")
ch <- err
}()
time.Sleep(500 * time.Millisecond)
// We see a stack with the watcher in it here
pprof.Lookup("goroutine").WriteTo(os.Stdout, 2)
time.Sleep(1000 * time.Millisecond)
fmt.Println("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
// But not here, since the watcher has completed, and we haven't been
// asked for a new secret, with a new watcher.
pprof.Lookup("goroutine").WriteTo(os.Stdout, 2)
err := <-ch
t.Log(time.Now().Sub(start))
t.Log(err)
}
func TestEmAll(t *testing.T) {
var ctx = context.Background()
// Set up the database
bpg := briefpg.New(nil)
if err := bpg.Start(ctx); err != nil {
t.Fatalf("Failed to start Postgres: %v", err)
}
defer bpg.Fini(ctx)
testCases := []struct {
name string
tFunc func(*testing.T, *vaultapi.Client, vaultConfig)
}{
{"testDBSecrets", testDBSecrets},
{"testMultiVDBC", testMultiVDBC},
{"testCredentialRevocation", testCredentialRevocation},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
vc, vStop := testVaultServer(t)
defer vStop()
vconf := setupVault(t, vc, bpg)
tc.tFunc(t, vc, vconf)
})
}
}
func TestMain(m *testing.M) {
os.Exit(m.Run())
} | "db_name": "db",
"default_ttl": ttl,
"max_ttl": maxTTL, | random_line_split |
helpers.py | from htm.encoders.rdse import RDSE, RDSE_Parameters
from htm.bindings.sdr import SDR
from collections import defaultdict
from nnmnkwii.preprocessing import trim_zeros_frames
from sklearn.metrics import f1_score, confusion_matrix, classification_report
from attrdict import AttrDict
from datetime import datetime
import os
import json
import random
import pysptk
import soundfile as sf
import torchaudio as ta
import numpy as np
import pyworld as pw
from layers import Layer, Unknown
from viz_util import plot_features
import param
def get_wavfile_list(path):
wav_files = []
for dirpath, subdirs, files in os.walk(path):
for x in files:
if x.endswith(".wav"):
wav_files.append(os.path.join(dirpath, x))
return wav_files
def get_features(x, fs):
# f0 calculate
_f0, t = pw.dio(x, fs)
f0 = pw.stonemask(x, _f0, t, fs)
# mcep calculate
sp = trim_zeros_frames(pw.cheaptrick(x, f0, t, fs))
mcep = pysptk.sp2mc(sp, order=24, alpha=pysptk.util.mcepalpha(fs))
# bap calculate
ap = pw.d4c(x, f0, t, fs)
bap = pw.code_aperiodicity(ap, fs)
return f0, mcep, bap
def peak_normalize(data):
data = data.astype(np.float64)
amp = max(np.abs(np.max(data)), np.abs(np.min(data)))
data = data / amp
data.clip(-1, 1)
return data
def normalize(tensor):
|
def sort_dict(dict):
return sorted(dict.items(), key=lambda x: x[1])
def sort_dict_reverse(dict):
return sorted(dict.items(), key=lambda x: x[1], reverse=True)
def sort_dict_by_len(dict):
return sorted(dict.items(), key=lambda x: len(x[1]))
class Experiment:
def __init__(self, encoder, sdr_length, n_features):
self.encoder = encoder
self.sdr_length = sdr_length
self.n_features = n_features
self.mel = ta.transforms.MelSpectrogram(n_mels=self.n_features)
def get_encoding(self, feature):
encodings = [self.encoder.encode(feat) for feat in feature]
encoding = SDR(self.sdr_length * self.n_features)
encoding.concatenate(encodings)
return encoding
def get_mel_sp(self, data):
x, fs = ta.load(data)
# plot_waveform(x.detach().numpy().reshape(-1))
features = self.mel(normalize(x)).log2()
features = features.detach().numpy().astype(np.float32)
features = features.reshape(features.shape[1], -1)
# plot_specgram(features)
return features
def get_world_features(self, data):
x, fs = sf.read(data)
f0, mcep, bap = get_features(x, fs)
features = np.concatenate([
f0.reshape(-1, 1),
mcep[:, :self.n_features - 2],
-bap
], axis=1)
plot_features(x, features, data, param.default_parameters)
return features
def execute(self, data, model):
print("wavefile:{}".format(os.path.basename(data)))
features = self.get_mel_sp(data)
anomaly = []
for feature in features.T:
inp = self.get_encoding(feature)
# plot_input_data(inp)
act, pred = model.forward(inp)
anomaly.append(model.anomaly())
# plot_anomalies(anomaly)
model.reset()
score = np.mean(anomaly)
print("anomaly score:", score, end='\n\n')
return score
class OVRClassifier:
def __init__(self, models, sp2idx, experiment, unknown):
self.threshold = 0
self.models = models
self.unknown = unknown
self.sp2idx = sp2idx
self.exp = experiment
def get_speaker_idx(self, filename):
ans = 0
for speaker in self.sp2idx.keys():
if speaker in filename:
ans = self.sp2idx[speaker]
return ans
def optimize(self, train_data):
all_anoms = defaultdict(lambda: defaultdict(float))
for data in train_data:
for model_name, model in self.models.items():
model.eval()
all_anoms[data][model_name] = self.exp.execute(data, model)
anom_patterns = {all_anoms[data][model_name]
for data in train_data
for model_name in self.models.keys()}
results = defaultdict(float)
for th in sorted(anom_patterns, reverse=True):
ans = [self.get_speaker_idx(data) for data in train_data]
pred = []
for data in train_data:
anoms = all_anoms[data]
anoms[self.unknown] = th
anom_sorted = sort_dict(anoms)
pred_sp = anom_sorted[0][0]
pred.append(self.sp2idx[pred_sp])
results[th] = f1_score(ans, pred, average='macro')
results_sorted = sort_dict_reverse(results)
print("best score for train data:", results_sorted[0])
self.models[self.unknown].threshold = float(results_sorted[0][0])
def predict(self, data):
anomalies = {}
for speaker in self.sp2idx.keys():
model = self.models[speaker]
model.eval()
anomalies[speaker] = self.exp.execute(data, model)
anom_sorted = sort_dict(anomalies)
pred_sp = anom_sorted[0][0]
return self.sp2idx[pred_sp]
def score(self, test_data):
ans = [self.get_speaker_idx(data) for data in test_data]
pred = [self.predict(data) for data in test_data]
data_pair = (ans, pred)
f1 = f1_score(*data_pair, average="macro")
cm = confusion_matrix(*data_pair)
target_names = ["unknown" if target == self.unknown
else target for target in self.sp2idx.keys()]
report = classification_report(*data_pair, target_names=target_names)
return f1, cm, report
class Learner:
def __init__(self, input_path, setting, unknown, save_threshold, model_path=None):
self.model_path = model_path
if model_path is not None:
with open(os.path.join(model_path, 'setting.json'), 'r') as f:
self.setting = AttrDict(json.load(f))
else:
self.setting = setting
self.split_ratio = self.setting.ratio
self.input_path = input_path
self.unknown = unknown
self.sp2idx = self.speakers_to_idx()
self.idx2sp = self.idx_to_speakers()
self.encoder = self.create_encoder()
self.experiment = self.create_experiment()
self.train_dataset, self.test_dataset = self.create_dataset()
self.models = self.create_models()
self.clf = self.create_clf()
self.score = 0.0
self.save_threshold = save_threshold
def speakers_to_idx(self):
speakers = os.listdir(self.input_path)
speakers = [speaker for speaker in speakers
if not speaker == self.unknown]
speakers = [self.unknown] + speakers
return {k: v for v, k in enumerate(speakers)}
def idx_to_speakers(self):
return {k: v for v, k in self.sp2idx.items()}
def create_dataset(self):
wav_files = get_wavfile_list(self.input_path)
speakers_data = defaultdict(list)
for speaker in self.sp2idx.keys():
speakers_data[speaker] = [wav for wav in wav_files if speaker in wav]
sorted_spdata = sort_dict_by_len(speakers_data)
min_length = len(sorted_spdata[0][1])
split_idx = int(min_length * self.split_ratio)
train_dataset = defaultdict(list)
test_dataset = defaultdict(list)
for speaker in self.sp2idx.keys():
data = speakers_data[speaker]
train_dataset[speaker] = data[:split_idx]
test_dataset[speaker] = data[split_idx:min_length]
return train_dataset, test_dataset
def create_encoder(self):
print("creating encoder...")
print(self.setting("enc"))
scalarEncoderParams = RDSE_Parameters()
scalarEncoderParams.size = self.setting("enc").size
scalarEncoderParams.sparsity = self.setting("enc").sparsity
scalarEncoderParams.resolution = self.setting("enc").resolution
scalarEncoder = RDSE(scalarEncoderParams)
print()
return scalarEncoder
def create_model(self, speaker):
input_size = self.setting("enc").size * self.setting("enc").featureCount
output_size = self.setting("sp").columnCount
model = Layer(
din=(input_size,),
dout=(output_size,),
setting=self.setting)
if self.model_path is not None:
speaker_path = os.path.join(self.model_path, speaker)
model.load(speaker_path)
else:
print("creating model...")
print(self.setting("sp"))
print(self.setting("tm"))
model.compile()
print()
return model
def create_clf(self):
return OVRClassifier(self.models, self.sp2idx, self.experiment, self.unknown)
def create_experiment(self):
return Experiment(self.encoder, self.setting("enc").size, self.setting("enc").featureCount)
def create_models(self):
d = dict()
for speaker in self.sp2idx.keys():
if speaker == self.unknown:
threshold = 1.0 if self.model_path is None else self.setting["threshold"]
d[speaker] = Unknown(threshold)
else:
d[speaker] = self.create_model(speaker)
return d
def get_all_data(self, dataset):
return [data
for speaker in self.sp2idx
for data in dataset[speaker]]
def fit(self, epochs):
print("=====training phase=====")
for speaker in self.sp2idx.keys():
print("=" * 30 + "model of ", speaker, "=" * 30 + "\n")
model = self.models[speaker]
model.train()
train_data = self.train_dataset[speaker]
for epoch in range(epochs):
print("epoch {}".format(epoch))
for data in random.sample(train_data, len(train_data)):
self.experiment.execute(data, model)
fmt = "training data count: {}"
print(fmt.format(len(train_data)), end='\n\n')
all_train_data = self.get_all_data(self.train_dataset)
print("=====threshold optimization phase=====")
self.clf.optimize(all_train_data)
def evaluate(self):
print("=====testing phase=====")
all_test_data = self.get_all_data(self.test_dataset)
f1, cm, report = self.clf.score(all_test_data)
self.score = f1
fmt = "testing data count: {}"
print(fmt.format(len(all_test_data)), end='\n\n')
print("test threshold: ", self.models[self.unknown].threshold)
return f1, cm, report
def save(self):
if self.score < self.save_threshold:
return
dirname = '-'.join([datetime.now().isoformat(), f"{self.score:.2f}"])
if os.path.exists(dirname):
print("model path already exits.")
return
os.mkdir(dirname)
for speaker, model in self.models.items():
filename = os.path.join(dirname, speaker)
model.save(filename)
with open(os.path.join(dirname, 'setting.json'), 'w') as f:
self.setting["threshold"] = self.models[self.unknown].threshold
json.dump(self.setting, f, indent=4) | tensor_minus_mean = tensor - tensor.mean()
return tensor_minus_mean / tensor_minus_mean.abs().max() | identifier_body |
helpers.py | from htm.encoders.rdse import RDSE, RDSE_Parameters
from htm.bindings.sdr import SDR
from collections import defaultdict
from nnmnkwii.preprocessing import trim_zeros_frames
from sklearn.metrics import f1_score, confusion_matrix, classification_report
from attrdict import AttrDict
from datetime import datetime
import os
import json
import random
import pysptk
import soundfile as sf
import torchaudio as ta
import numpy as np
import pyworld as pw
from layers import Layer, Unknown
from viz_util import plot_features
import param
def get_wavfile_list(path):
wav_files = []
for dirpath, subdirs, files in os.walk(path):
for x in files:
if x.endswith(".wav"):
wav_files.append(os.path.join(dirpath, x))
return wav_files
def get_features(x, fs):
# f0 calculate
_f0, t = pw.dio(x, fs)
f0 = pw.stonemask(x, _f0, t, fs)
# mcep calculate
sp = trim_zeros_frames(pw.cheaptrick(x, f0, t, fs))
mcep = pysptk.sp2mc(sp, order=24, alpha=pysptk.util.mcepalpha(fs))
# bap calculate
ap = pw.d4c(x, f0, t, fs)
bap = pw.code_aperiodicity(ap, fs)
return f0, mcep, bap
def | (data):
data = data.astype(np.float64)
amp = max(np.abs(np.max(data)), np.abs(np.min(data)))
data = data / amp
data.clip(-1, 1)
return data
def normalize(tensor):
tensor_minus_mean = tensor - tensor.mean()
return tensor_minus_mean / tensor_minus_mean.abs().max()
def sort_dict(dict):
return sorted(dict.items(), key=lambda x: x[1])
def sort_dict_reverse(dict):
return sorted(dict.items(), key=lambda x: x[1], reverse=True)
def sort_dict_by_len(dict):
return sorted(dict.items(), key=lambda x: len(x[1]))
class Experiment:
def __init__(self, encoder, sdr_length, n_features):
self.encoder = encoder
self.sdr_length = sdr_length
self.n_features = n_features
self.mel = ta.transforms.MelSpectrogram(n_mels=self.n_features)
def get_encoding(self, feature):
encodings = [self.encoder.encode(feat) for feat in feature]
encoding = SDR(self.sdr_length * self.n_features)
encoding.concatenate(encodings)
return encoding
def get_mel_sp(self, data):
x, fs = ta.load(data)
# plot_waveform(x.detach().numpy().reshape(-1))
features = self.mel(normalize(x)).log2()
features = features.detach().numpy().astype(np.float32)
features = features.reshape(features.shape[1], -1)
# plot_specgram(features)
return features
def get_world_features(self, data):
x, fs = sf.read(data)
f0, mcep, bap = get_features(x, fs)
features = np.concatenate([
f0.reshape(-1, 1),
mcep[:, :self.n_features - 2],
-bap
], axis=1)
plot_features(x, features, data, param.default_parameters)
return features
def execute(self, data, model):
print("wavefile:{}".format(os.path.basename(data)))
features = self.get_mel_sp(data)
anomaly = []
for feature in features.T:
inp = self.get_encoding(feature)
# plot_input_data(inp)
act, pred = model.forward(inp)
anomaly.append(model.anomaly())
# plot_anomalies(anomaly)
model.reset()
score = np.mean(anomaly)
print("anomaly score:", score, end='\n\n')
return score
class OVRClassifier:
def __init__(self, models, sp2idx, experiment, unknown):
self.threshold = 0
self.models = models
self.unknown = unknown
self.sp2idx = sp2idx
self.exp = experiment
def get_speaker_idx(self, filename):
ans = 0
for speaker in self.sp2idx.keys():
if speaker in filename:
ans = self.sp2idx[speaker]
return ans
def optimize(self, train_data):
all_anoms = defaultdict(lambda: defaultdict(float))
for data in train_data:
for model_name, model in self.models.items():
model.eval()
all_anoms[data][model_name] = self.exp.execute(data, model)
anom_patterns = {all_anoms[data][model_name]
for data in train_data
for model_name in self.models.keys()}
results = defaultdict(float)
for th in sorted(anom_patterns, reverse=True):
ans = [self.get_speaker_idx(data) for data in train_data]
pred = []
for data in train_data:
anoms = all_anoms[data]
anoms[self.unknown] = th
anom_sorted = sort_dict(anoms)
pred_sp = anom_sorted[0][0]
pred.append(self.sp2idx[pred_sp])
results[th] = f1_score(ans, pred, average='macro')
results_sorted = sort_dict_reverse(results)
print("best score for train data:", results_sorted[0])
self.models[self.unknown].threshold = float(results_sorted[0][0])
def predict(self, data):
anomalies = {}
for speaker in self.sp2idx.keys():
model = self.models[speaker]
model.eval()
anomalies[speaker] = self.exp.execute(data, model)
anom_sorted = sort_dict(anomalies)
pred_sp = anom_sorted[0][0]
return self.sp2idx[pred_sp]
def score(self, test_data):
ans = [self.get_speaker_idx(data) for data in test_data]
pred = [self.predict(data) for data in test_data]
data_pair = (ans, pred)
f1 = f1_score(*data_pair, average="macro")
cm = confusion_matrix(*data_pair)
target_names = ["unknown" if target == self.unknown
else target for target in self.sp2idx.keys()]
report = classification_report(*data_pair, target_names=target_names)
return f1, cm, report
class Learner:
def __init__(self, input_path, setting, unknown, save_threshold, model_path=None):
self.model_path = model_path
if model_path is not None:
with open(os.path.join(model_path, 'setting.json'), 'r') as f:
self.setting = AttrDict(json.load(f))
else:
self.setting = setting
self.split_ratio = self.setting.ratio
self.input_path = input_path
self.unknown = unknown
self.sp2idx = self.speakers_to_idx()
self.idx2sp = self.idx_to_speakers()
self.encoder = self.create_encoder()
self.experiment = self.create_experiment()
self.train_dataset, self.test_dataset = self.create_dataset()
self.models = self.create_models()
self.clf = self.create_clf()
self.score = 0.0
self.save_threshold = save_threshold
def speakers_to_idx(self):
speakers = os.listdir(self.input_path)
speakers = [speaker for speaker in speakers
if not speaker == self.unknown]
speakers = [self.unknown] + speakers
return {k: v for v, k in enumerate(speakers)}
def idx_to_speakers(self):
return {k: v for v, k in self.sp2idx.items()}
def create_dataset(self):
wav_files = get_wavfile_list(self.input_path)
speakers_data = defaultdict(list)
for speaker in self.sp2idx.keys():
speakers_data[speaker] = [wav for wav in wav_files if speaker in wav]
sorted_spdata = sort_dict_by_len(speakers_data)
min_length = len(sorted_spdata[0][1])
split_idx = int(min_length * self.split_ratio)
train_dataset = defaultdict(list)
test_dataset = defaultdict(list)
for speaker in self.sp2idx.keys():
data = speakers_data[speaker]
train_dataset[speaker] = data[:split_idx]
test_dataset[speaker] = data[split_idx:min_length]
return train_dataset, test_dataset
def create_encoder(self):
print("creating encoder...")
print(self.setting("enc"))
scalarEncoderParams = RDSE_Parameters()
scalarEncoderParams.size = self.setting("enc").size
scalarEncoderParams.sparsity = self.setting("enc").sparsity
scalarEncoderParams.resolution = self.setting("enc").resolution
scalarEncoder = RDSE(scalarEncoderParams)
print()
return scalarEncoder
def create_model(self, speaker):
input_size = self.setting("enc").size * self.setting("enc").featureCount
output_size = self.setting("sp").columnCount
model = Layer(
din=(input_size,),
dout=(output_size,),
setting=self.setting)
if self.model_path is not None:
speaker_path = os.path.join(self.model_path, speaker)
model.load(speaker_path)
else:
print("creating model...")
print(self.setting("sp"))
print(self.setting("tm"))
model.compile()
print()
return model
def create_clf(self):
return OVRClassifier(self.models, self.sp2idx, self.experiment, self.unknown)
def create_experiment(self):
return Experiment(self.encoder, self.setting("enc").size, self.setting("enc").featureCount)
def create_models(self):
d = dict()
for speaker in self.sp2idx.keys():
if speaker == self.unknown:
threshold = 1.0 if self.model_path is None else self.setting["threshold"]
d[speaker] = Unknown(threshold)
else:
d[speaker] = self.create_model(speaker)
return d
def get_all_data(self, dataset):
return [data
for speaker in self.sp2idx
for data in dataset[speaker]]
def fit(self, epochs):
print("=====training phase=====")
for speaker in self.sp2idx.keys():
print("=" * 30 + "model of ", speaker, "=" * 30 + "\n")
model = self.models[speaker]
model.train()
train_data = self.train_dataset[speaker]
for epoch in range(epochs):
print("epoch {}".format(epoch))
for data in random.sample(train_data, len(train_data)):
self.experiment.execute(data, model)
fmt = "training data count: {}"
print(fmt.format(len(train_data)), end='\n\n')
all_train_data = self.get_all_data(self.train_dataset)
print("=====threshold optimization phase=====")
self.clf.optimize(all_train_data)
def evaluate(self):
print("=====testing phase=====")
all_test_data = self.get_all_data(self.test_dataset)
f1, cm, report = self.clf.score(all_test_data)
self.score = f1
fmt = "testing data count: {}"
print(fmt.format(len(all_test_data)), end='\n\n')
print("test threshold: ", self.models[self.unknown].threshold)
return f1, cm, report
def save(self):
if self.score < self.save_threshold:
return
dirname = '-'.join([datetime.now().isoformat(), f"{self.score:.2f}"])
if os.path.exists(dirname):
print("model path already exits.")
return
os.mkdir(dirname)
for speaker, model in self.models.items():
filename = os.path.join(dirname, speaker)
model.save(filename)
with open(os.path.join(dirname, 'setting.json'), 'w') as f:
self.setting["threshold"] = self.models[self.unknown].threshold
json.dump(self.setting, f, indent=4) | peak_normalize | identifier_name |
helpers.py | from htm.encoders.rdse import RDSE, RDSE_Parameters
from htm.bindings.sdr import SDR
from collections import defaultdict
from nnmnkwii.preprocessing import trim_zeros_frames
from sklearn.metrics import f1_score, confusion_matrix, classification_report
from attrdict import AttrDict
from datetime import datetime
import os
import json
import random
import pysptk
import soundfile as sf
import torchaudio as ta
import numpy as np
import pyworld as pw
from layers import Layer, Unknown
from viz_util import plot_features | def get_wavfile_list(path):
wav_files = []
for dirpath, subdirs, files in os.walk(path):
for x in files:
if x.endswith(".wav"):
wav_files.append(os.path.join(dirpath, x))
return wav_files
def get_features(x, fs):
# f0 calculate
_f0, t = pw.dio(x, fs)
f0 = pw.stonemask(x, _f0, t, fs)
# mcep calculate
sp = trim_zeros_frames(pw.cheaptrick(x, f0, t, fs))
mcep = pysptk.sp2mc(sp, order=24, alpha=pysptk.util.mcepalpha(fs))
# bap calculate
ap = pw.d4c(x, f0, t, fs)
bap = pw.code_aperiodicity(ap, fs)
return f0, mcep, bap
def peak_normalize(data):
data = data.astype(np.float64)
amp = max(np.abs(np.max(data)), np.abs(np.min(data)))
data = data / amp
data.clip(-1, 1)
return data
def normalize(tensor):
tensor_minus_mean = tensor - tensor.mean()
return tensor_minus_mean / tensor_minus_mean.abs().max()
def sort_dict(dict):
return sorted(dict.items(), key=lambda x: x[1])
def sort_dict_reverse(dict):
return sorted(dict.items(), key=lambda x: x[1], reverse=True)
def sort_dict_by_len(dict):
return sorted(dict.items(), key=lambda x: len(x[1]))
class Experiment:
def __init__(self, encoder, sdr_length, n_features):
self.encoder = encoder
self.sdr_length = sdr_length
self.n_features = n_features
self.mel = ta.transforms.MelSpectrogram(n_mels=self.n_features)
def get_encoding(self, feature):
encodings = [self.encoder.encode(feat) for feat in feature]
encoding = SDR(self.sdr_length * self.n_features)
encoding.concatenate(encodings)
return encoding
def get_mel_sp(self, data):
x, fs = ta.load(data)
# plot_waveform(x.detach().numpy().reshape(-1))
features = self.mel(normalize(x)).log2()
features = features.detach().numpy().astype(np.float32)
features = features.reshape(features.shape[1], -1)
# plot_specgram(features)
return features
def get_world_features(self, data):
x, fs = sf.read(data)
f0, mcep, bap = get_features(x, fs)
features = np.concatenate([
f0.reshape(-1, 1),
mcep[:, :self.n_features - 2],
-bap
], axis=1)
plot_features(x, features, data, param.default_parameters)
return features
def execute(self, data, model):
print("wavefile:{}".format(os.path.basename(data)))
features = self.get_mel_sp(data)
anomaly = []
for feature in features.T:
inp = self.get_encoding(feature)
# plot_input_data(inp)
act, pred = model.forward(inp)
anomaly.append(model.anomaly())
# plot_anomalies(anomaly)
model.reset()
score = np.mean(anomaly)
print("anomaly score:", score, end='\n\n')
return score
class OVRClassifier:
def __init__(self, models, sp2idx, experiment, unknown):
self.threshold = 0
self.models = models
self.unknown = unknown
self.sp2idx = sp2idx
self.exp = experiment
def get_speaker_idx(self, filename):
ans = 0
for speaker in self.sp2idx.keys():
if speaker in filename:
ans = self.sp2idx[speaker]
return ans
def optimize(self, train_data):
all_anoms = defaultdict(lambda: defaultdict(float))
for data in train_data:
for model_name, model in self.models.items():
model.eval()
all_anoms[data][model_name] = self.exp.execute(data, model)
anom_patterns = {all_anoms[data][model_name]
for data in train_data
for model_name in self.models.keys()}
results = defaultdict(float)
for th in sorted(anom_patterns, reverse=True):
ans = [self.get_speaker_idx(data) for data in train_data]
pred = []
for data in train_data:
anoms = all_anoms[data]
anoms[self.unknown] = th
anom_sorted = sort_dict(anoms)
pred_sp = anom_sorted[0][0]
pred.append(self.sp2idx[pred_sp])
results[th] = f1_score(ans, pred, average='macro')
results_sorted = sort_dict_reverse(results)
print("best score for train data:", results_sorted[0])
self.models[self.unknown].threshold = float(results_sorted[0][0])
def predict(self, data):
anomalies = {}
for speaker in self.sp2idx.keys():
model = self.models[speaker]
model.eval()
anomalies[speaker] = self.exp.execute(data, model)
anom_sorted = sort_dict(anomalies)
pred_sp = anom_sorted[0][0]
return self.sp2idx[pred_sp]
def score(self, test_data):
ans = [self.get_speaker_idx(data) for data in test_data]
pred = [self.predict(data) for data in test_data]
data_pair = (ans, pred)
f1 = f1_score(*data_pair, average="macro")
cm = confusion_matrix(*data_pair)
target_names = ["unknown" if target == self.unknown
else target for target in self.sp2idx.keys()]
report = classification_report(*data_pair, target_names=target_names)
return f1, cm, report
class Learner:
def __init__(self, input_path, setting, unknown, save_threshold, model_path=None):
self.model_path = model_path
if model_path is not None:
with open(os.path.join(model_path, 'setting.json'), 'r') as f:
self.setting = AttrDict(json.load(f))
else:
self.setting = setting
self.split_ratio = self.setting.ratio
self.input_path = input_path
self.unknown = unknown
self.sp2idx = self.speakers_to_idx()
self.idx2sp = self.idx_to_speakers()
self.encoder = self.create_encoder()
self.experiment = self.create_experiment()
self.train_dataset, self.test_dataset = self.create_dataset()
self.models = self.create_models()
self.clf = self.create_clf()
self.score = 0.0
self.save_threshold = save_threshold
def speakers_to_idx(self):
speakers = os.listdir(self.input_path)
speakers = [speaker for speaker in speakers
if not speaker == self.unknown]
speakers = [self.unknown] + speakers
return {k: v for v, k in enumerate(speakers)}
def idx_to_speakers(self):
return {k: v for v, k in self.sp2idx.items()}
def create_dataset(self):
wav_files = get_wavfile_list(self.input_path)
speakers_data = defaultdict(list)
for speaker in self.sp2idx.keys():
speakers_data[speaker] = [wav for wav in wav_files if speaker in wav]
sorted_spdata = sort_dict_by_len(speakers_data)
min_length = len(sorted_spdata[0][1])
split_idx = int(min_length * self.split_ratio)
train_dataset = defaultdict(list)
test_dataset = defaultdict(list)
for speaker in self.sp2idx.keys():
data = speakers_data[speaker]
train_dataset[speaker] = data[:split_idx]
test_dataset[speaker] = data[split_idx:min_length]
return train_dataset, test_dataset
def create_encoder(self):
print("creating encoder...")
print(self.setting("enc"))
scalarEncoderParams = RDSE_Parameters()
scalarEncoderParams.size = self.setting("enc").size
scalarEncoderParams.sparsity = self.setting("enc").sparsity
scalarEncoderParams.resolution = self.setting("enc").resolution
scalarEncoder = RDSE(scalarEncoderParams)
print()
return scalarEncoder
def create_model(self, speaker):
input_size = self.setting("enc").size * self.setting("enc").featureCount
output_size = self.setting("sp").columnCount
model = Layer(
din=(input_size,),
dout=(output_size,),
setting=self.setting)
if self.model_path is not None:
speaker_path = os.path.join(self.model_path, speaker)
model.load(speaker_path)
else:
print("creating model...")
print(self.setting("sp"))
print(self.setting("tm"))
model.compile()
print()
return model
def create_clf(self):
return OVRClassifier(self.models, self.sp2idx, self.experiment, self.unknown)
def create_experiment(self):
return Experiment(self.encoder, self.setting("enc").size, self.setting("enc").featureCount)
def create_models(self):
d = dict()
for speaker in self.sp2idx.keys():
if speaker == self.unknown:
threshold = 1.0 if self.model_path is None else self.setting["threshold"]
d[speaker] = Unknown(threshold)
else:
d[speaker] = self.create_model(speaker)
return d
def get_all_data(self, dataset):
return [data
for speaker in self.sp2idx
for data in dataset[speaker]]
def fit(self, epochs):
print("=====training phase=====")
for speaker in self.sp2idx.keys():
print("=" * 30 + "model of ", speaker, "=" * 30 + "\n")
model = self.models[speaker]
model.train()
train_data = self.train_dataset[speaker]
for epoch in range(epochs):
print("epoch {}".format(epoch))
for data in random.sample(train_data, len(train_data)):
self.experiment.execute(data, model)
fmt = "training data count: {}"
print(fmt.format(len(train_data)), end='\n\n')
all_train_data = self.get_all_data(self.train_dataset)
print("=====threshold optimization phase=====")
self.clf.optimize(all_train_data)
def evaluate(self):
print("=====testing phase=====")
all_test_data = self.get_all_data(self.test_dataset)
f1, cm, report = self.clf.score(all_test_data)
self.score = f1
fmt = "testing data count: {}"
print(fmt.format(len(all_test_data)), end='\n\n')
print("test threshold: ", self.models[self.unknown].threshold)
return f1, cm, report
def save(self):
if self.score < self.save_threshold:
return
dirname = '-'.join([datetime.now().isoformat(), f"{self.score:.2f}"])
if os.path.exists(dirname):
print("model path already exits.")
return
os.mkdir(dirname)
for speaker, model in self.models.items():
filename = os.path.join(dirname, speaker)
model.save(filename)
with open(os.path.join(dirname, 'setting.json'), 'w') as f:
self.setting["threshold"] = self.models[self.unknown].threshold
json.dump(self.setting, f, indent=4) | import param
| random_line_split |
helpers.py | from htm.encoders.rdse import RDSE, RDSE_Parameters
from htm.bindings.sdr import SDR
from collections import defaultdict
from nnmnkwii.preprocessing import trim_zeros_frames
from sklearn.metrics import f1_score, confusion_matrix, classification_report
from attrdict import AttrDict
from datetime import datetime
import os
import json
import random
import pysptk
import soundfile as sf
import torchaudio as ta
import numpy as np
import pyworld as pw
from layers import Layer, Unknown
from viz_util import plot_features
import param
def get_wavfile_list(path):
wav_files = []
for dirpath, subdirs, files in os.walk(path):
for x in files:
if x.endswith(".wav"):
wav_files.append(os.path.join(dirpath, x))
return wav_files
def get_features(x, fs):
# f0 calculate
_f0, t = pw.dio(x, fs)
f0 = pw.stonemask(x, _f0, t, fs)
# mcep calculate
sp = trim_zeros_frames(pw.cheaptrick(x, f0, t, fs))
mcep = pysptk.sp2mc(sp, order=24, alpha=pysptk.util.mcepalpha(fs))
# bap calculate
ap = pw.d4c(x, f0, t, fs)
bap = pw.code_aperiodicity(ap, fs)
return f0, mcep, bap
def peak_normalize(data):
data = data.astype(np.float64)
amp = max(np.abs(np.max(data)), np.abs(np.min(data)))
data = data / amp
data.clip(-1, 1)
return data
def normalize(tensor):
tensor_minus_mean = tensor - tensor.mean()
return tensor_minus_mean / tensor_minus_mean.abs().max()
def sort_dict(dict):
return sorted(dict.items(), key=lambda x: x[1])
def sort_dict_reverse(dict):
return sorted(dict.items(), key=lambda x: x[1], reverse=True)
def sort_dict_by_len(dict):
return sorted(dict.items(), key=lambda x: len(x[1]))
class Experiment:
def __init__(self, encoder, sdr_length, n_features):
self.encoder = encoder
self.sdr_length = sdr_length
self.n_features = n_features
self.mel = ta.transforms.MelSpectrogram(n_mels=self.n_features)
def get_encoding(self, feature):
encodings = [self.encoder.encode(feat) for feat in feature]
encoding = SDR(self.sdr_length * self.n_features)
encoding.concatenate(encodings)
return encoding
def get_mel_sp(self, data):
x, fs = ta.load(data)
# plot_waveform(x.detach().numpy().reshape(-1))
features = self.mel(normalize(x)).log2()
features = features.detach().numpy().astype(np.float32)
features = features.reshape(features.shape[1], -1)
# plot_specgram(features)
return features
def get_world_features(self, data):
x, fs = sf.read(data)
f0, mcep, bap = get_features(x, fs)
features = np.concatenate([
f0.reshape(-1, 1),
mcep[:, :self.n_features - 2],
-bap
], axis=1)
plot_features(x, features, data, param.default_parameters)
return features
def execute(self, data, model):
print("wavefile:{}".format(os.path.basename(data)))
features = self.get_mel_sp(data)
anomaly = []
for feature in features.T:
inp = self.get_encoding(feature)
# plot_input_data(inp)
act, pred = model.forward(inp)
anomaly.append(model.anomaly())
# plot_anomalies(anomaly)
model.reset()
score = np.mean(anomaly)
print("anomaly score:", score, end='\n\n')
return score
class OVRClassifier:
def __init__(self, models, sp2idx, experiment, unknown):
self.threshold = 0
self.models = models
self.unknown = unknown
self.sp2idx = sp2idx
self.exp = experiment
def get_speaker_idx(self, filename):
ans = 0
for speaker in self.sp2idx.keys():
if speaker in filename:
ans = self.sp2idx[speaker]
return ans
def optimize(self, train_data):
all_anoms = defaultdict(lambda: defaultdict(float))
for data in train_data:
for model_name, model in self.models.items():
model.eval()
all_anoms[data][model_name] = self.exp.execute(data, model)
anom_patterns = {all_anoms[data][model_name]
for data in train_data
for model_name in self.models.keys()}
results = defaultdict(float)
for th in sorted(anom_patterns, reverse=True):
ans = [self.get_speaker_idx(data) for data in train_data]
pred = []
for data in train_data:
anoms = all_anoms[data]
anoms[self.unknown] = th
anom_sorted = sort_dict(anoms)
pred_sp = anom_sorted[0][0]
pred.append(self.sp2idx[pred_sp])
results[th] = f1_score(ans, pred, average='macro')
results_sorted = sort_dict_reverse(results)
print("best score for train data:", results_sorted[0])
self.models[self.unknown].threshold = float(results_sorted[0][0])
def predict(self, data):
anomalies = {}
for speaker in self.sp2idx.keys():
model = self.models[speaker]
model.eval()
anomalies[speaker] = self.exp.execute(data, model)
anom_sorted = sort_dict(anomalies)
pred_sp = anom_sorted[0][0]
return self.sp2idx[pred_sp]
def score(self, test_data):
ans = [self.get_speaker_idx(data) for data in test_data]
pred = [self.predict(data) for data in test_data]
data_pair = (ans, pred)
f1 = f1_score(*data_pair, average="macro")
cm = confusion_matrix(*data_pair)
target_names = ["unknown" if target == self.unknown
else target for target in self.sp2idx.keys()]
report = classification_report(*data_pair, target_names=target_names)
return f1, cm, report
class Learner:
def __init__(self, input_path, setting, unknown, save_threshold, model_path=None):
self.model_path = model_path
if model_path is not None:
|
else:
self.setting = setting
self.split_ratio = self.setting.ratio
self.input_path = input_path
self.unknown = unknown
self.sp2idx = self.speakers_to_idx()
self.idx2sp = self.idx_to_speakers()
self.encoder = self.create_encoder()
self.experiment = self.create_experiment()
self.train_dataset, self.test_dataset = self.create_dataset()
self.models = self.create_models()
self.clf = self.create_clf()
self.score = 0.0
self.save_threshold = save_threshold
def speakers_to_idx(self):
speakers = os.listdir(self.input_path)
speakers = [speaker for speaker in speakers
if not speaker == self.unknown]
speakers = [self.unknown] + speakers
return {k: v for v, k in enumerate(speakers)}
def idx_to_speakers(self):
return {k: v for v, k in self.sp2idx.items()}
def create_dataset(self):
wav_files = get_wavfile_list(self.input_path)
speakers_data = defaultdict(list)
for speaker in self.sp2idx.keys():
speakers_data[speaker] = [wav for wav in wav_files if speaker in wav]
sorted_spdata = sort_dict_by_len(speakers_data)
min_length = len(sorted_spdata[0][1])
split_idx = int(min_length * self.split_ratio)
train_dataset = defaultdict(list)
test_dataset = defaultdict(list)
for speaker in self.sp2idx.keys():
data = speakers_data[speaker]
train_dataset[speaker] = data[:split_idx]
test_dataset[speaker] = data[split_idx:min_length]
return train_dataset, test_dataset
def create_encoder(self):
print("creating encoder...")
print(self.setting("enc"))
scalarEncoderParams = RDSE_Parameters()
scalarEncoderParams.size = self.setting("enc").size
scalarEncoderParams.sparsity = self.setting("enc").sparsity
scalarEncoderParams.resolution = self.setting("enc").resolution
scalarEncoder = RDSE(scalarEncoderParams)
print()
return scalarEncoder
def create_model(self, speaker):
input_size = self.setting("enc").size * self.setting("enc").featureCount
output_size = self.setting("sp").columnCount
model = Layer(
din=(input_size,),
dout=(output_size,),
setting=self.setting)
if self.model_path is not None:
speaker_path = os.path.join(self.model_path, speaker)
model.load(speaker_path)
else:
print("creating model...")
print(self.setting("sp"))
print(self.setting("tm"))
model.compile()
print()
return model
def create_clf(self):
return OVRClassifier(self.models, self.sp2idx, self.experiment, self.unknown)
def create_experiment(self):
return Experiment(self.encoder, self.setting("enc").size, self.setting("enc").featureCount)
def create_models(self):
d = dict()
for speaker in self.sp2idx.keys():
if speaker == self.unknown:
threshold = 1.0 if self.model_path is None else self.setting["threshold"]
d[speaker] = Unknown(threshold)
else:
d[speaker] = self.create_model(speaker)
return d
def get_all_data(self, dataset):
return [data
for speaker in self.sp2idx
for data in dataset[speaker]]
def fit(self, epochs):
print("=====training phase=====")
for speaker in self.sp2idx.keys():
print("=" * 30 + "model of ", speaker, "=" * 30 + "\n")
model = self.models[speaker]
model.train()
train_data = self.train_dataset[speaker]
for epoch in range(epochs):
print("epoch {}".format(epoch))
for data in random.sample(train_data, len(train_data)):
self.experiment.execute(data, model)
fmt = "training data count: {}"
print(fmt.format(len(train_data)), end='\n\n')
all_train_data = self.get_all_data(self.train_dataset)
print("=====threshold optimization phase=====")
self.clf.optimize(all_train_data)
def evaluate(self):
print("=====testing phase=====")
all_test_data = self.get_all_data(self.test_dataset)
f1, cm, report = self.clf.score(all_test_data)
self.score = f1
fmt = "testing data count: {}"
print(fmt.format(len(all_test_data)), end='\n\n')
print("test threshold: ", self.models[self.unknown].threshold)
return f1, cm, report
def save(self):
if self.score < self.save_threshold:
return
dirname = '-'.join([datetime.now().isoformat(), f"{self.score:.2f}"])
if os.path.exists(dirname):
print("model path already exits.")
return
os.mkdir(dirname)
for speaker, model in self.models.items():
filename = os.path.join(dirname, speaker)
model.save(filename)
with open(os.path.join(dirname, 'setting.json'), 'w') as f:
self.setting["threshold"] = self.models[self.unknown].threshold
json.dump(self.setting, f, indent=4) | with open(os.path.join(model_path, 'setting.json'), 'r') as f:
self.setting = AttrDict(json.load(f)) | conditional_block |
kmip.go | /*
Copyright 2022 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kms
import (
"bufio"
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"fmt"
"io"
"strconv"
"time"
kmip "github.com/gemalto/kmip-go"
"github.com/gemalto/kmip-go/kmip14"
"github.com/gemalto/kmip-go/ttlv"
"github.com/google/uuid"
"github.com/pkg/errors"
)
const (
TypeKMIP = "kmip"
// KMIP version.
protocolMajor = 1
protocolMinor = 4
// kmipDefaultReadTimeout is the default read network timeout.
kmipDefaultReadTimeout = uint8(10)
// kmipDefaultWriteTimeout is the default write network timeout.
kmipDefaultWriteTimeout = uint8(10)
// cryptographicLength of the key.
cryptographicLength = 256
//nolint:gosec, value not credential, just configuration keys.
kmipEndpoint = "KMIP_ENDPOINT"
kmipTLSServerName = "TLS_SERVER_NAME"
kmipReadTimeOut = "READ_TIMEOUT"
kmipWriteTimeOut = "WRITE_TIMEOUT"
KmipCACert = "CA_CERT"
KmipClientCert = "CLIENT_CERT"
KmipClientKey = "CLIENT_KEY"
KmipUniqueIdentifier = "UNIQUE_IDENTIFIER"
// EtcKmipDir is kmip config dir.
EtcKmipDir = "/etc/kmip"
)
var (
kmsKMIPMandatoryTokenDetails = []string{KmipCACert, KmipClientCert, KmipClientKey}
kmsKMIPMandatoryConnectionDetails = []string{kmipEndpoint}
ErrKMIPEndpointNotSet = errors.Errorf("%s not set.", kmipEndpoint)
ErrKMIPCACertNotSet = errors.Errorf("%s not set.", KmipCACert)
ErrKMIPClientCertNotSet = errors.Errorf("%s not set.", KmipClientCert)
ErrKMIPClientKeyNotSet = errors.Errorf("%s not set.", KmipClientKey)
)
type kmipKMS struct {
// standard KMIP configuration options
endpoint string
tlsConfig *tls.Config
readTimeout uint8
writeTimeout uint8
}
// InitKKMIP initializes the KMIP KMS.
func InitKMIP(config map[string]string) (*kmipKMS, error) {
kms := &kmipKMS{}
kms.endpoint = GetParam(config, kmipEndpoint)
if kms.endpoint == "" {
return nil, ErrKMIPEndpointNotSet
}
// optional
serverName := GetParam(config, kmipTLSServerName)
// optional
kms.readTimeout = kmipDefaultReadTimeout
timeout, err := strconv.Atoi(GetParam(config, kmipReadTimeOut))
if err == nil {
kms.readTimeout = uint8(timeout)
}
// optional
kms.writeTimeout = kmipDefaultWriteTimeout
timeout, err = strconv.Atoi(GetParam(config, kmipWriteTimeOut))
if err == nil {
kms.writeTimeout = uint8(timeout)
}
caCert := GetParam(config, KmipCACert)
if caCert == "" {
return nil, ErrKMIPCACertNotSet
}
clientCert := GetParam(config, KmipClientCert)
if clientCert == "" {
return nil, ErrKMIPClientCertNotSet
}
clientKey := GetParam(config, KmipClientKey)
if clientKey == "" {
return nil, ErrKMIPClientKeyNotSet
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM([]byte(caCert))
cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey))
if err != nil {
return nil, fmt.Errorf("invalid X509 key pair: %w", err)
}
kms.tlsConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
ServerName: serverName,
RootCAs: caCertPool,
Certificates: []tls.Certificate{cert},
}
return kms, nil
}
// IsKMIP determines whether the configured KMS is KMIP.
func (c *Config) IsKMIP() bool { return c.Provider == TypeKMIP }
// registerKey will create a register key and return its unique identifier.
func (kms *kmipKMS) registerKey(keyName, keyValue string) (string, error) {
valueBytes, err := base64.StdEncoding.DecodeString(keyValue)
if err != nil {
return "", errors.Wrap(err, "failed to convert string to bytes")
}
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
registerPayload := kmip.RegisterRequestPayload{
ObjectType: kmip14.ObjectTypeSymmetricKey,
SymmetricKey: &kmip.SymmetricKey{
KeyBlock: kmip.KeyBlock{
KeyFormatType: kmip14.KeyFormatTypeOpaque,
KeyValue: &kmip.KeyValue{
KeyMaterial: valueBytes,
},
CryptographicLength: cryptographicLength,
CryptographicAlgorithm: kmip14.CryptographicAlgorithmAES,
},
},
}
registerPayload.TemplateAttribute.Append(kmip14.TagCryptographicUsageMask, kmip14.CryptographicUsageMaskExport)
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationRegister, registerPayload)
if err != nil {
return "", errors.Wrap(err, "failed to send register request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationRegister, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip register response")
}
var registerRespPayload kmip.RegisterResponsePayload
err = decoder.DecodeValue(®isterRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
return registerRespPayload.UniqueIdentifier, nil
}
func (kms *kmipKMS) getKey(uniqueIdentifier string) (string, error) {
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationGet, kmip.GetRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return "", errors.Wrap(err, "failed to send get request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationGet, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip response")
}
var getRespPayload kmip.GetResponsePayload
err = decoder.DecodeValue(&getRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
secretBytes := getRespPayload.SymmetricKey.KeyBlock.KeyValue.KeyMaterial.([]byte)
secretBase64 := base64.StdEncoding.EncodeToString(secretBytes)
return secretBase64, nil
}
func (kms *kmipKMS) deleteKey(uniqueIdentifier string) error {
conn, err := kms.connect()
if err != nil {
return errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationDestroy, kmip.DestroyRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return errors.Wrap(err, "failed to send delete request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationDestroy, uniqueBatchItemID)
if err != nil {
return errors.Wrap(err, "failed to verify kmip response")
}
var destroyRespPayload kmip.DestroyResponsePayload
err = decoder.DecodeValue(&destroyRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return errors.Wrap(err, "failed to decode kmip response value")
}
return nil
}
// connect to the kmip endpoint, perform TLS and KMIP handshakes.
func (kms *kmipKMS) connect() (*tls.Conn, error) {
conn, err := tls.Dial("tcp", kms.endpoint, kms.tlsConfig)
if err != nil {
return nil, fmt.Errorf("failed to dial kmip connection endpoint: %w", err)
}
defer func() {
if err != nil {
conn.Close()
}
}()
if kms.readTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.readTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set read deadline: %w", err)
}
}
if kms.writeTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.writeTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set write deadline: %w", err)
}
}
err = conn.Handshake()
if err != nil {
return nil, fmt.Errorf("failed to perform connection handshake: %w", err)
}
err = kms.discover(conn)
if err != nil {
return nil, err
}
return conn, nil
}
// discover performs KMIP discover operation.
// https://docs.oasis-open.org/kmip/spec/v1.4/kmip-spec-v1.4.html
// chapter 4.26.
func (kms *kmipKMS) discover(conn io.ReadWriter) error {
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn,
kmip14.OperationDiscoverVersions,
kmip.DiscoverVersionsRequestPayload{
ProtocolVersion: []kmip.ProtocolVersion{
{
ProtocolVersionMajor: protocolMajor,
ProtocolVersionMinor: protocolMinor,
},
},
})
if err != nil {
return err
}
batchItem, err := kms.verifyResponse(
respMsg,
kmip14.OperationDiscoverVersions,
uniqueBatchItemID)
if err != nil {
return err
}
ttlvPayload, ok := batchItem.ResponsePayload.(ttlv.TTLV)
if !ok {
return errors.New("failed to parse responsePayload")
}
var respDiscoverVersionsPayload kmip.DiscoverVersionsResponsePayload
err = decoder.DecodeValue(&respDiscoverVersionsPayload, ttlvPayload)
if err != nil {
return err
}
if len(respDiscoverVersionsPayload.ProtocolVersion) != 1 {
return fmt.Errorf("invalid len of discovered protocol versions %v expected 1",
len(respDiscoverVersionsPayload.ProtocolVersion))
}
pv := respDiscoverVersionsPayload.ProtocolVersion[0]
if pv.ProtocolVersionMajor != protocolMajor || pv.ProtocolVersionMinor != protocolMinor {
return fmt.Errorf("invalid discovered protocol version %v.%v expected %v.%v",
pv.ProtocolVersionMajor, pv.ProtocolVersionMinor, protocolMajor, protocolMinor)
}
return nil
}
// send sends KMIP operation over tls connection, returns
// kmip response message,
// ttlv Decoder to decode message into desired format,
// batchItem ID,
// and error.
func (kms *kmipKMS) send(
conn io.ReadWriter,
operation kmip14.Operation,
payload interface{},
) (*kmip.ResponseMessage, *ttlv.Decoder, []byte, error) {
biID := uuid.New()
msg := kmip.RequestMessage{
RequestHeader: kmip.RequestHeader{
ProtocolVersion: kmip.ProtocolVersion{
ProtocolVersionMajor: protocolMajor,
ProtocolVersionMinor: protocolMinor,
},
BatchCount: 1,
},
BatchItem: []kmip.RequestBatchItem{
{
UniqueBatchItemID: biID[:],
Operation: operation,
RequestPayload: payload,
},
},
}
req, err := ttlv.Marshal(msg)
if err != nil |
_, err = conn.Write(req)
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to write request onto connection: %w", err)
}
decoder := ttlv.NewDecoder(bufio.NewReader(conn))
resp, err := decoder.NextTTLV()
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to read ttlv KMIP value: %w", err)
}
var respMsg kmip.ResponseMessage
err = decoder.DecodeValue(&respMsg, resp)
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to decode response value: %w", err)
}
return &respMsg, decoder, biID[:], nil
}
// verifyResponse verifies the response success and return the batch item.
func (kms *kmipKMS) verifyResponse(
respMsg *kmip.ResponseMessage,
operation kmip14.Operation,
uniqueBatchItemID []byte,
) (*kmip.ResponseBatchItem, error) {
if respMsg.ResponseHeader.BatchCount != 1 {
return nil, fmt.Errorf("batch count %q should be \"1\"",
respMsg.ResponseHeader.BatchCount)
}
if len(respMsg.BatchItem) != 1 {
return nil, fmt.Errorf("batch Intems list len %q should be \"1\"",
len(respMsg.BatchItem))
}
batchItem := respMsg.BatchItem[0]
if operation != batchItem.Operation {
return nil, fmt.Errorf("unexpected operation, real %q expected %q",
batchItem.Operation, operation)
}
if !bytes.Equal(uniqueBatchItemID, batchItem.UniqueBatchItemID) {
return nil, fmt.Errorf("unexpected uniqueBatchItemID, real %q expected %q",
batchItem.UniqueBatchItemID, uniqueBatchItemID)
}
if kmip14.ResultStatusSuccess != batchItem.ResultStatus {
return nil, fmt.Errorf("unexpected result status %q expected success %q,"+
"result reason %q, result message %q",
batchItem.ResultStatus, kmip14.ResultStatusSuccess,
batchItem.ResultReason, batchItem.ResultMessage)
}
return &batchItem, nil
}
| {
return nil, nil, nil,
fmt.Errorf("failed to ttlv marshal message: %w", err)
} | conditional_block |
kmip.go | /*
Copyright 2022 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kms
import (
"bufio"
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"fmt"
"io"
"strconv"
"time"
kmip "github.com/gemalto/kmip-go"
"github.com/gemalto/kmip-go/kmip14"
"github.com/gemalto/kmip-go/ttlv"
"github.com/google/uuid"
"github.com/pkg/errors"
)
const (
TypeKMIP = "kmip"
// KMIP version.
protocolMajor = 1
protocolMinor = 4
// kmipDefaultReadTimeout is the default read network timeout.
kmipDefaultReadTimeout = uint8(10)
// kmipDefaultWriteTimeout is the default write network timeout.
kmipDefaultWriteTimeout = uint8(10)
// cryptographicLength of the key.
cryptographicLength = 256
//nolint:gosec, value not credential, just configuration keys.
kmipEndpoint = "KMIP_ENDPOINT"
kmipTLSServerName = "TLS_SERVER_NAME"
kmipReadTimeOut = "READ_TIMEOUT"
kmipWriteTimeOut = "WRITE_TIMEOUT"
KmipCACert = "CA_CERT"
KmipClientCert = "CLIENT_CERT"
KmipClientKey = "CLIENT_KEY"
KmipUniqueIdentifier = "UNIQUE_IDENTIFIER"
// EtcKmipDir is kmip config dir.
EtcKmipDir = "/etc/kmip"
)
var (
kmsKMIPMandatoryTokenDetails = []string{KmipCACert, KmipClientCert, KmipClientKey}
kmsKMIPMandatoryConnectionDetails = []string{kmipEndpoint}
ErrKMIPEndpointNotSet = errors.Errorf("%s not set.", kmipEndpoint)
ErrKMIPCACertNotSet = errors.Errorf("%s not set.", KmipCACert)
ErrKMIPClientCertNotSet = errors.Errorf("%s not set.", KmipClientCert)
ErrKMIPClientKeyNotSet = errors.Errorf("%s not set.", KmipClientKey)
)
type kmipKMS struct {
// standard KMIP configuration options
endpoint string
tlsConfig *tls.Config
readTimeout uint8
writeTimeout uint8
}
// InitKKMIP initializes the KMIP KMS.
func InitKMIP(config map[string]string) (*kmipKMS, error) {
kms := &kmipKMS{}
kms.endpoint = GetParam(config, kmipEndpoint)
if kms.endpoint == "" {
return nil, ErrKMIPEndpointNotSet
}
// optional
serverName := GetParam(config, kmipTLSServerName)
// optional
kms.readTimeout = kmipDefaultReadTimeout
timeout, err := strconv.Atoi(GetParam(config, kmipReadTimeOut))
if err == nil {
kms.readTimeout = uint8(timeout)
}
// optional
kms.writeTimeout = kmipDefaultWriteTimeout
timeout, err = strconv.Atoi(GetParam(config, kmipWriteTimeOut))
if err == nil {
kms.writeTimeout = uint8(timeout)
}
caCert := GetParam(config, KmipCACert)
if caCert == "" {
return nil, ErrKMIPCACertNotSet
}
clientCert := GetParam(config, KmipClientCert)
if clientCert == "" {
return nil, ErrKMIPClientCertNotSet
}
clientKey := GetParam(config, KmipClientKey)
if clientKey == "" {
return nil, ErrKMIPClientKeyNotSet
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM([]byte(caCert))
cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey))
if err != nil {
return nil, fmt.Errorf("invalid X509 key pair: %w", err)
}
kms.tlsConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
ServerName: serverName,
RootCAs: caCertPool,
Certificates: []tls.Certificate{cert},
}
return kms, nil
}
// IsKMIP determines whether the configured KMS is KMIP.
func (c *Config) IsKMIP() bool { return c.Provider == TypeKMIP }
// registerKey will create a register key and return its unique identifier.
func (kms *kmipKMS) registerKey(keyName, keyValue string) (string, error) {
valueBytes, err := base64.StdEncoding.DecodeString(keyValue)
if err != nil {
return "", errors.Wrap(err, "failed to convert string to bytes")
}
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
registerPayload := kmip.RegisterRequestPayload{
ObjectType: kmip14.ObjectTypeSymmetricKey,
SymmetricKey: &kmip.SymmetricKey{
KeyBlock: kmip.KeyBlock{
KeyFormatType: kmip14.KeyFormatTypeOpaque,
KeyValue: &kmip.KeyValue{
KeyMaterial: valueBytes,
},
CryptographicLength: cryptographicLength,
CryptographicAlgorithm: kmip14.CryptographicAlgorithmAES,
},
},
}
registerPayload.TemplateAttribute.Append(kmip14.TagCryptographicUsageMask, kmip14.CryptographicUsageMaskExport)
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationRegister, registerPayload)
if err != nil {
return "", errors.Wrap(err, "failed to send register request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationRegister, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip register response")
}
var registerRespPayload kmip.RegisterResponsePayload
err = decoder.DecodeValue(®isterRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
return registerRespPayload.UniqueIdentifier, nil
}
func (kms *kmipKMS) getKey(uniqueIdentifier string) (string, error) {
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationGet, kmip.GetRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return "", errors.Wrap(err, "failed to send get request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationGet, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip response")
}
var getRespPayload kmip.GetResponsePayload
err = decoder.DecodeValue(&getRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
secretBytes := getRespPayload.SymmetricKey.KeyBlock.KeyValue.KeyMaterial.([]byte)
secretBase64 := base64.StdEncoding.EncodeToString(secretBytes)
return secretBase64, nil
}
func (kms *kmipKMS) deleteKey(uniqueIdentifier string) error {
conn, err := kms.connect()
if err != nil {
return errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationDestroy, kmip.DestroyRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return errors.Wrap(err, "failed to send delete request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationDestroy, uniqueBatchItemID)
if err != nil {
return errors.Wrap(err, "failed to verify kmip response")
}
var destroyRespPayload kmip.DestroyResponsePayload
err = decoder.DecodeValue(&destroyRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return errors.Wrap(err, "failed to decode kmip response value")
}
return nil
}
// connect to the kmip endpoint, perform TLS and KMIP handshakes.
func (kms *kmipKMS) connect() (*tls.Conn, error) {
conn, err := tls.Dial("tcp", kms.endpoint, kms.tlsConfig)
if err != nil {
return nil, fmt.Errorf("failed to dial kmip connection endpoint: %w", err)
}
defer func() {
if err != nil {
conn.Close()
}
}()
if kms.readTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.readTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set read deadline: %w", err)
}
}
if kms.writeTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.writeTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set write deadline: %w", err)
}
}
err = conn.Handshake()
if err != nil {
return nil, fmt.Errorf("failed to perform connection handshake: %w", err)
}
err = kms.discover(conn)
if err != nil {
return nil, err
}
return conn, nil
}
// discover performs KMIP discover operation.
// https://docs.oasis-open.org/kmip/spec/v1.4/kmip-spec-v1.4.html
// chapter 4.26.
func (kms *kmipKMS) discover(conn io.ReadWriter) error {
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn,
kmip14.OperationDiscoverVersions,
kmip.DiscoverVersionsRequestPayload{
ProtocolVersion: []kmip.ProtocolVersion{
{
ProtocolVersionMajor: protocolMajor,
ProtocolVersionMinor: protocolMinor,
},
},
})
if err != nil {
return err
}
batchItem, err := kms.verifyResponse(
respMsg,
kmip14.OperationDiscoverVersions,
uniqueBatchItemID)
if err != nil {
return err
}
ttlvPayload, ok := batchItem.ResponsePayload.(ttlv.TTLV)
if !ok {
return errors.New("failed to parse responsePayload")
}
var respDiscoverVersionsPayload kmip.DiscoverVersionsResponsePayload
err = decoder.DecodeValue(&respDiscoverVersionsPayload, ttlvPayload)
if err != nil {
return err
}
| if pv.ProtocolVersionMajor != protocolMajor || pv.ProtocolVersionMinor != protocolMinor {
return fmt.Errorf("invalid discovered protocol version %v.%v expected %v.%v",
pv.ProtocolVersionMajor, pv.ProtocolVersionMinor, protocolMajor, protocolMinor)
}
return nil
}
// send sends KMIP operation over tls connection, returns
// kmip response message,
// ttlv Decoder to decode message into desired format,
// batchItem ID,
// and error.
func (kms *kmipKMS) send(
conn io.ReadWriter,
operation kmip14.Operation,
payload interface{},
) (*kmip.ResponseMessage, *ttlv.Decoder, []byte, error) {
biID := uuid.New()
msg := kmip.RequestMessage{
RequestHeader: kmip.RequestHeader{
ProtocolVersion: kmip.ProtocolVersion{
ProtocolVersionMajor: protocolMajor,
ProtocolVersionMinor: protocolMinor,
},
BatchCount: 1,
},
BatchItem: []kmip.RequestBatchItem{
{
UniqueBatchItemID: biID[:],
Operation: operation,
RequestPayload: payload,
},
},
}
req, err := ttlv.Marshal(msg)
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to ttlv marshal message: %w", err)
}
_, err = conn.Write(req)
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to write request onto connection: %w", err)
}
decoder := ttlv.NewDecoder(bufio.NewReader(conn))
resp, err := decoder.NextTTLV()
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to read ttlv KMIP value: %w", err)
}
var respMsg kmip.ResponseMessage
err = decoder.DecodeValue(&respMsg, resp)
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to decode response value: %w", err)
}
return &respMsg, decoder, biID[:], nil
}
// verifyResponse verifies the response success and return the batch item.
func (kms *kmipKMS) verifyResponse(
respMsg *kmip.ResponseMessage,
operation kmip14.Operation,
uniqueBatchItemID []byte,
) (*kmip.ResponseBatchItem, error) {
if respMsg.ResponseHeader.BatchCount != 1 {
return nil, fmt.Errorf("batch count %q should be \"1\"",
respMsg.ResponseHeader.BatchCount)
}
if len(respMsg.BatchItem) != 1 {
return nil, fmt.Errorf("batch Intems list len %q should be \"1\"",
len(respMsg.BatchItem))
}
batchItem := respMsg.BatchItem[0]
if operation != batchItem.Operation {
return nil, fmt.Errorf("unexpected operation, real %q expected %q",
batchItem.Operation, operation)
}
if !bytes.Equal(uniqueBatchItemID, batchItem.UniqueBatchItemID) {
return nil, fmt.Errorf("unexpected uniqueBatchItemID, real %q expected %q",
batchItem.UniqueBatchItemID, uniqueBatchItemID)
}
if kmip14.ResultStatusSuccess != batchItem.ResultStatus {
return nil, fmt.Errorf("unexpected result status %q expected success %q,"+
"result reason %q, result message %q",
batchItem.ResultStatus, kmip14.ResultStatusSuccess,
batchItem.ResultReason, batchItem.ResultMessage)
}
return &batchItem, nil
} | if len(respDiscoverVersionsPayload.ProtocolVersion) != 1 {
return fmt.Errorf("invalid len of discovered protocol versions %v expected 1",
len(respDiscoverVersionsPayload.ProtocolVersion))
}
pv := respDiscoverVersionsPayload.ProtocolVersion[0] | random_line_split |
kmip.go | /*
Copyright 2022 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kms
import (
"bufio"
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"fmt"
"io"
"strconv"
"time"
kmip "github.com/gemalto/kmip-go"
"github.com/gemalto/kmip-go/kmip14"
"github.com/gemalto/kmip-go/ttlv"
"github.com/google/uuid"
"github.com/pkg/errors"
)
const (
TypeKMIP = "kmip"
// KMIP version.
protocolMajor = 1
protocolMinor = 4
// kmipDefaultReadTimeout is the default read network timeout.
kmipDefaultReadTimeout = uint8(10)
// kmipDefaultWriteTimeout is the default write network timeout.
kmipDefaultWriteTimeout = uint8(10)
// cryptographicLength of the key.
cryptographicLength = 256
//nolint:gosec, value not credential, just configuration keys.
kmipEndpoint = "KMIP_ENDPOINT"
kmipTLSServerName = "TLS_SERVER_NAME"
kmipReadTimeOut = "READ_TIMEOUT"
kmipWriteTimeOut = "WRITE_TIMEOUT"
KmipCACert = "CA_CERT"
KmipClientCert = "CLIENT_CERT"
KmipClientKey = "CLIENT_KEY"
KmipUniqueIdentifier = "UNIQUE_IDENTIFIER"
// EtcKmipDir is kmip config dir.
EtcKmipDir = "/etc/kmip"
)
var (
kmsKMIPMandatoryTokenDetails = []string{KmipCACert, KmipClientCert, KmipClientKey}
kmsKMIPMandatoryConnectionDetails = []string{kmipEndpoint}
ErrKMIPEndpointNotSet = errors.Errorf("%s not set.", kmipEndpoint)
ErrKMIPCACertNotSet = errors.Errorf("%s not set.", KmipCACert)
ErrKMIPClientCertNotSet = errors.Errorf("%s not set.", KmipClientCert)
ErrKMIPClientKeyNotSet = errors.Errorf("%s not set.", KmipClientKey)
)
type kmipKMS struct {
// standard KMIP configuration options
endpoint string
tlsConfig *tls.Config
readTimeout uint8
writeTimeout uint8
}
// InitKKMIP initializes the KMIP KMS.
func InitKMIP(config map[string]string) (*kmipKMS, error) {
kms := &kmipKMS{}
kms.endpoint = GetParam(config, kmipEndpoint)
if kms.endpoint == "" {
return nil, ErrKMIPEndpointNotSet
}
// optional
serverName := GetParam(config, kmipTLSServerName)
// optional
kms.readTimeout = kmipDefaultReadTimeout
timeout, err := strconv.Atoi(GetParam(config, kmipReadTimeOut))
if err == nil {
kms.readTimeout = uint8(timeout)
}
// optional
kms.writeTimeout = kmipDefaultWriteTimeout
timeout, err = strconv.Atoi(GetParam(config, kmipWriteTimeOut))
if err == nil {
kms.writeTimeout = uint8(timeout)
}
caCert := GetParam(config, KmipCACert)
if caCert == "" {
return nil, ErrKMIPCACertNotSet
}
clientCert := GetParam(config, KmipClientCert)
if clientCert == "" {
return nil, ErrKMIPClientCertNotSet
}
clientKey := GetParam(config, KmipClientKey)
if clientKey == "" {
return nil, ErrKMIPClientKeyNotSet
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM([]byte(caCert))
cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey))
if err != nil {
return nil, fmt.Errorf("invalid X509 key pair: %w", err)
}
kms.tlsConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
ServerName: serverName,
RootCAs: caCertPool,
Certificates: []tls.Certificate{cert},
}
return kms, nil
}
// IsKMIP determines whether the configured KMS is KMIP.
func (c *Config) IsKMIP() bool { return c.Provider == TypeKMIP }
// registerKey will create a register key and return its unique identifier.
func (kms *kmipKMS) registerKey(keyName, keyValue string) (string, error) {
valueBytes, err := base64.StdEncoding.DecodeString(keyValue)
if err != nil {
return "", errors.Wrap(err, "failed to convert string to bytes")
}
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
registerPayload := kmip.RegisterRequestPayload{
ObjectType: kmip14.ObjectTypeSymmetricKey,
SymmetricKey: &kmip.SymmetricKey{
KeyBlock: kmip.KeyBlock{
KeyFormatType: kmip14.KeyFormatTypeOpaque,
KeyValue: &kmip.KeyValue{
KeyMaterial: valueBytes,
},
CryptographicLength: cryptographicLength,
CryptographicAlgorithm: kmip14.CryptographicAlgorithmAES,
},
},
}
registerPayload.TemplateAttribute.Append(kmip14.TagCryptographicUsageMask, kmip14.CryptographicUsageMaskExport)
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationRegister, registerPayload)
if err != nil {
return "", errors.Wrap(err, "failed to send register request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationRegister, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip register response")
}
var registerRespPayload kmip.RegisterResponsePayload
err = decoder.DecodeValue(®isterRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
return registerRespPayload.UniqueIdentifier, nil
}
func (kms *kmipKMS) getKey(uniqueIdentifier string) (string, error) {
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationGet, kmip.GetRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return "", errors.Wrap(err, "failed to send get request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationGet, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip response")
}
var getRespPayload kmip.GetResponsePayload
err = decoder.DecodeValue(&getRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
secretBytes := getRespPayload.SymmetricKey.KeyBlock.KeyValue.KeyMaterial.([]byte)
secretBase64 := base64.StdEncoding.EncodeToString(secretBytes)
return secretBase64, nil
}
func (kms *kmipKMS) deleteKey(uniqueIdentifier string) error {
conn, err := kms.connect()
if err != nil {
return errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationDestroy, kmip.DestroyRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return errors.Wrap(err, "failed to send delete request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationDestroy, uniqueBatchItemID)
if err != nil {
return errors.Wrap(err, "failed to verify kmip response")
}
var destroyRespPayload kmip.DestroyResponsePayload
err = decoder.DecodeValue(&destroyRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return errors.Wrap(err, "failed to decode kmip response value")
}
return nil
}
// connect to the kmip endpoint, perform TLS and KMIP handshakes.
func (kms *kmipKMS) connect() (*tls.Conn, error) {
conn, err := tls.Dial("tcp", kms.endpoint, kms.tlsConfig)
if err != nil {
return nil, fmt.Errorf("failed to dial kmip connection endpoint: %w", err)
}
defer func() {
if err != nil {
conn.Close()
}
}()
if kms.readTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.readTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set read deadline: %w", err)
}
}
if kms.writeTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.writeTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set write deadline: %w", err)
}
}
err = conn.Handshake()
if err != nil {
return nil, fmt.Errorf("failed to perform connection handshake: %w", err)
}
err = kms.discover(conn)
if err != nil {
return nil, err
}
return conn, nil
}
// discover performs KMIP discover operation.
// https://docs.oasis-open.org/kmip/spec/v1.4/kmip-spec-v1.4.html
// chapter 4.26.
func (kms *kmipKMS) discover(conn io.ReadWriter) error |
// send sends KMIP operation over tls connection, returns
// kmip response message,
// ttlv Decoder to decode message into desired format,
// batchItem ID,
// and error.
func (kms *kmipKMS) send(
conn io.ReadWriter,
operation kmip14.Operation,
payload interface{},
) (*kmip.ResponseMessage, *ttlv.Decoder, []byte, error) {
biID := uuid.New()
msg := kmip.RequestMessage{
RequestHeader: kmip.RequestHeader{
ProtocolVersion: kmip.ProtocolVersion{
ProtocolVersionMajor: protocolMajor,
ProtocolVersionMinor: protocolMinor,
},
BatchCount: 1,
},
BatchItem: []kmip.RequestBatchItem{
{
UniqueBatchItemID: biID[:],
Operation: operation,
RequestPayload: payload,
},
},
}
req, err := ttlv.Marshal(msg)
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to ttlv marshal message: %w", err)
}
_, err = conn.Write(req)
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to write request onto connection: %w", err)
}
decoder := ttlv.NewDecoder(bufio.NewReader(conn))
resp, err := decoder.NextTTLV()
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to read ttlv KMIP value: %w", err)
}
var respMsg kmip.ResponseMessage
err = decoder.DecodeValue(&respMsg, resp)
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to decode response value: %w", err)
}
return &respMsg, decoder, biID[:], nil
}
// verifyResponse verifies the response success and return the batch item.
func (kms *kmipKMS) verifyResponse(
respMsg *kmip.ResponseMessage,
operation kmip14.Operation,
uniqueBatchItemID []byte,
) (*kmip.ResponseBatchItem, error) {
if respMsg.ResponseHeader.BatchCount != 1 {
return nil, fmt.Errorf("batch count %q should be \"1\"",
respMsg.ResponseHeader.BatchCount)
}
if len(respMsg.BatchItem) != 1 {
return nil, fmt.Errorf("batch Intems list len %q should be \"1\"",
len(respMsg.BatchItem))
}
batchItem := respMsg.BatchItem[0]
if operation != batchItem.Operation {
return nil, fmt.Errorf("unexpected operation, real %q expected %q",
batchItem.Operation, operation)
}
if !bytes.Equal(uniqueBatchItemID, batchItem.UniqueBatchItemID) {
return nil, fmt.Errorf("unexpected uniqueBatchItemID, real %q expected %q",
batchItem.UniqueBatchItemID, uniqueBatchItemID)
}
if kmip14.ResultStatusSuccess != batchItem.ResultStatus {
return nil, fmt.Errorf("unexpected result status %q expected success %q,"+
"result reason %q, result message %q",
batchItem.ResultStatus, kmip14.ResultStatusSuccess,
batchItem.ResultReason, batchItem.ResultMessage)
}
return &batchItem, nil
}
| {
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn,
kmip14.OperationDiscoverVersions,
kmip.DiscoverVersionsRequestPayload{
ProtocolVersion: []kmip.ProtocolVersion{
{
ProtocolVersionMajor: protocolMajor,
ProtocolVersionMinor: protocolMinor,
},
},
})
if err != nil {
return err
}
batchItem, err := kms.verifyResponse(
respMsg,
kmip14.OperationDiscoverVersions,
uniqueBatchItemID)
if err != nil {
return err
}
ttlvPayload, ok := batchItem.ResponsePayload.(ttlv.TTLV)
if !ok {
return errors.New("failed to parse responsePayload")
}
var respDiscoverVersionsPayload kmip.DiscoverVersionsResponsePayload
err = decoder.DecodeValue(&respDiscoverVersionsPayload, ttlvPayload)
if err != nil {
return err
}
if len(respDiscoverVersionsPayload.ProtocolVersion) != 1 {
return fmt.Errorf("invalid len of discovered protocol versions %v expected 1",
len(respDiscoverVersionsPayload.ProtocolVersion))
}
pv := respDiscoverVersionsPayload.ProtocolVersion[0]
if pv.ProtocolVersionMajor != protocolMajor || pv.ProtocolVersionMinor != protocolMinor {
return fmt.Errorf("invalid discovered protocol version %v.%v expected %v.%v",
pv.ProtocolVersionMajor, pv.ProtocolVersionMinor, protocolMajor, protocolMinor)
}
return nil
} | identifier_body |
kmip.go | /*
Copyright 2022 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kms
import (
"bufio"
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"fmt"
"io"
"strconv"
"time"
kmip "github.com/gemalto/kmip-go"
"github.com/gemalto/kmip-go/kmip14"
"github.com/gemalto/kmip-go/ttlv"
"github.com/google/uuid"
"github.com/pkg/errors"
)
const (
TypeKMIP = "kmip"
// KMIP version.
protocolMajor = 1
protocolMinor = 4
// kmipDefaultReadTimeout is the default read network timeout.
kmipDefaultReadTimeout = uint8(10)
// kmipDefaultWriteTimeout is the default write network timeout.
kmipDefaultWriteTimeout = uint8(10)
// cryptographicLength of the key.
cryptographicLength = 256
//nolint:gosec, value not credential, just configuration keys.
kmipEndpoint = "KMIP_ENDPOINT"
kmipTLSServerName = "TLS_SERVER_NAME"
kmipReadTimeOut = "READ_TIMEOUT"
kmipWriteTimeOut = "WRITE_TIMEOUT"
KmipCACert = "CA_CERT"
KmipClientCert = "CLIENT_CERT"
KmipClientKey = "CLIENT_KEY"
KmipUniqueIdentifier = "UNIQUE_IDENTIFIER"
// EtcKmipDir is kmip config dir.
EtcKmipDir = "/etc/kmip"
)
var (
kmsKMIPMandatoryTokenDetails = []string{KmipCACert, KmipClientCert, KmipClientKey}
kmsKMIPMandatoryConnectionDetails = []string{kmipEndpoint}
ErrKMIPEndpointNotSet = errors.Errorf("%s not set.", kmipEndpoint)
ErrKMIPCACertNotSet = errors.Errorf("%s not set.", KmipCACert)
ErrKMIPClientCertNotSet = errors.Errorf("%s not set.", KmipClientCert)
ErrKMIPClientKeyNotSet = errors.Errorf("%s not set.", KmipClientKey)
)
type kmipKMS struct {
// standard KMIP configuration options
endpoint string
tlsConfig *tls.Config
readTimeout uint8
writeTimeout uint8
}
// InitKKMIP initializes the KMIP KMS.
func InitKMIP(config map[string]string) (*kmipKMS, error) {
kms := &kmipKMS{}
kms.endpoint = GetParam(config, kmipEndpoint)
if kms.endpoint == "" {
return nil, ErrKMIPEndpointNotSet
}
// optional
serverName := GetParam(config, kmipTLSServerName)
// optional
kms.readTimeout = kmipDefaultReadTimeout
timeout, err := strconv.Atoi(GetParam(config, kmipReadTimeOut))
if err == nil {
kms.readTimeout = uint8(timeout)
}
// optional
kms.writeTimeout = kmipDefaultWriteTimeout
timeout, err = strconv.Atoi(GetParam(config, kmipWriteTimeOut))
if err == nil {
kms.writeTimeout = uint8(timeout)
}
caCert := GetParam(config, KmipCACert)
if caCert == "" {
return nil, ErrKMIPCACertNotSet
}
clientCert := GetParam(config, KmipClientCert)
if clientCert == "" {
return nil, ErrKMIPClientCertNotSet
}
clientKey := GetParam(config, KmipClientKey)
if clientKey == "" {
return nil, ErrKMIPClientKeyNotSet
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM([]byte(caCert))
cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey))
if err != nil {
return nil, fmt.Errorf("invalid X509 key pair: %w", err)
}
kms.tlsConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
ServerName: serverName,
RootCAs: caCertPool,
Certificates: []tls.Certificate{cert},
}
return kms, nil
}
// IsKMIP determines whether the configured KMS is KMIP.
func (c *Config) IsKMIP() bool { return c.Provider == TypeKMIP }
// registerKey will create a register key and return its unique identifier.
func (kms *kmipKMS) registerKey(keyName, keyValue string) (string, error) {
valueBytes, err := base64.StdEncoding.DecodeString(keyValue)
if err != nil {
return "", errors.Wrap(err, "failed to convert string to bytes")
}
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
registerPayload := kmip.RegisterRequestPayload{
ObjectType: kmip14.ObjectTypeSymmetricKey,
SymmetricKey: &kmip.SymmetricKey{
KeyBlock: kmip.KeyBlock{
KeyFormatType: kmip14.KeyFormatTypeOpaque,
KeyValue: &kmip.KeyValue{
KeyMaterial: valueBytes,
},
CryptographicLength: cryptographicLength,
CryptographicAlgorithm: kmip14.CryptographicAlgorithmAES,
},
},
}
registerPayload.TemplateAttribute.Append(kmip14.TagCryptographicUsageMask, kmip14.CryptographicUsageMaskExport)
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationRegister, registerPayload)
if err != nil {
return "", errors.Wrap(err, "failed to send register request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationRegister, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip register response")
}
var registerRespPayload kmip.RegisterResponsePayload
err = decoder.DecodeValue(®isterRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
return registerRespPayload.UniqueIdentifier, nil
}
func (kms *kmipKMS) getKey(uniqueIdentifier string) (string, error) {
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationGet, kmip.GetRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return "", errors.Wrap(err, "failed to send get request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationGet, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip response")
}
var getRespPayload kmip.GetResponsePayload
err = decoder.DecodeValue(&getRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
secretBytes := getRespPayload.SymmetricKey.KeyBlock.KeyValue.KeyMaterial.([]byte)
secretBase64 := base64.StdEncoding.EncodeToString(secretBytes)
return secretBase64, nil
}
func (kms *kmipKMS) deleteKey(uniqueIdentifier string) error {
conn, err := kms.connect()
if err != nil {
return errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationDestroy, kmip.DestroyRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return errors.Wrap(err, "failed to send delete request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationDestroy, uniqueBatchItemID)
if err != nil {
return errors.Wrap(err, "failed to verify kmip response")
}
var destroyRespPayload kmip.DestroyResponsePayload
err = decoder.DecodeValue(&destroyRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return errors.Wrap(err, "failed to decode kmip response value")
}
return nil
}
// connect to the kmip endpoint, perform TLS and KMIP handshakes.
func (kms *kmipKMS) connect() (*tls.Conn, error) {
conn, err := tls.Dial("tcp", kms.endpoint, kms.tlsConfig)
if err != nil {
return nil, fmt.Errorf("failed to dial kmip connection endpoint: %w", err)
}
defer func() {
if err != nil {
conn.Close()
}
}()
if kms.readTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.readTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set read deadline: %w", err)
}
}
if kms.writeTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.writeTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set write deadline: %w", err)
}
}
err = conn.Handshake()
if err != nil {
return nil, fmt.Errorf("failed to perform connection handshake: %w", err)
}
err = kms.discover(conn)
if err != nil {
return nil, err
}
return conn, nil
}
// discover performs KMIP discover operation.
// https://docs.oasis-open.org/kmip/spec/v1.4/kmip-spec-v1.4.html
// chapter 4.26.
func (kms *kmipKMS) | (conn io.ReadWriter) error {
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn,
kmip14.OperationDiscoverVersions,
kmip.DiscoverVersionsRequestPayload{
ProtocolVersion: []kmip.ProtocolVersion{
{
ProtocolVersionMajor: protocolMajor,
ProtocolVersionMinor: protocolMinor,
},
},
})
if err != nil {
return err
}
batchItem, err := kms.verifyResponse(
respMsg,
kmip14.OperationDiscoverVersions,
uniqueBatchItemID)
if err != nil {
return err
}
ttlvPayload, ok := batchItem.ResponsePayload.(ttlv.TTLV)
if !ok {
return errors.New("failed to parse responsePayload")
}
var respDiscoverVersionsPayload kmip.DiscoverVersionsResponsePayload
err = decoder.DecodeValue(&respDiscoverVersionsPayload, ttlvPayload)
if err != nil {
return err
}
if len(respDiscoverVersionsPayload.ProtocolVersion) != 1 {
return fmt.Errorf("invalid len of discovered protocol versions %v expected 1",
len(respDiscoverVersionsPayload.ProtocolVersion))
}
pv := respDiscoverVersionsPayload.ProtocolVersion[0]
if pv.ProtocolVersionMajor != protocolMajor || pv.ProtocolVersionMinor != protocolMinor {
return fmt.Errorf("invalid discovered protocol version %v.%v expected %v.%v",
pv.ProtocolVersionMajor, pv.ProtocolVersionMinor, protocolMajor, protocolMinor)
}
return nil
}
// send sends KMIP operation over tls connection, returns
// kmip response message,
// ttlv Decoder to decode message into desired format,
// batchItem ID,
// and error.
func (kms *kmipKMS) send(
conn io.ReadWriter,
operation kmip14.Operation,
payload interface{},
) (*kmip.ResponseMessage, *ttlv.Decoder, []byte, error) {
biID := uuid.New()
msg := kmip.RequestMessage{
RequestHeader: kmip.RequestHeader{
ProtocolVersion: kmip.ProtocolVersion{
ProtocolVersionMajor: protocolMajor,
ProtocolVersionMinor: protocolMinor,
},
BatchCount: 1,
},
BatchItem: []kmip.RequestBatchItem{
{
UniqueBatchItemID: biID[:],
Operation: operation,
RequestPayload: payload,
},
},
}
req, err := ttlv.Marshal(msg)
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to ttlv marshal message: %w", err)
}
_, err = conn.Write(req)
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to write request onto connection: %w", err)
}
decoder := ttlv.NewDecoder(bufio.NewReader(conn))
resp, err := decoder.NextTTLV()
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to read ttlv KMIP value: %w", err)
}
var respMsg kmip.ResponseMessage
err = decoder.DecodeValue(&respMsg, resp)
if err != nil {
return nil, nil, nil,
fmt.Errorf("failed to decode response value: %w", err)
}
return &respMsg, decoder, biID[:], nil
}
// verifyResponse verifies the response success and return the batch item.
func (kms *kmipKMS) verifyResponse(
respMsg *kmip.ResponseMessage,
operation kmip14.Operation,
uniqueBatchItemID []byte,
) (*kmip.ResponseBatchItem, error) {
if respMsg.ResponseHeader.BatchCount != 1 {
return nil, fmt.Errorf("batch count %q should be \"1\"",
respMsg.ResponseHeader.BatchCount)
}
if len(respMsg.BatchItem) != 1 {
return nil, fmt.Errorf("batch Intems list len %q should be \"1\"",
len(respMsg.BatchItem))
}
batchItem := respMsg.BatchItem[0]
if operation != batchItem.Operation {
return nil, fmt.Errorf("unexpected operation, real %q expected %q",
batchItem.Operation, operation)
}
if !bytes.Equal(uniqueBatchItemID, batchItem.UniqueBatchItemID) {
return nil, fmt.Errorf("unexpected uniqueBatchItemID, real %q expected %q",
batchItem.UniqueBatchItemID, uniqueBatchItemID)
}
if kmip14.ResultStatusSuccess != batchItem.ResultStatus {
return nil, fmt.Errorf("unexpected result status %q expected success %q,"+
"result reason %q, result message %q",
batchItem.ResultStatus, kmip14.ResultStatusSuccess,
batchItem.ResultReason, batchItem.ResultMessage)
}
return &batchItem, nil
}
| discover | identifier_name |
views.py | # -*- coding: utf-8 -*-
from time import strptime
from datetime import datetime, date, time
from django.conf import settings
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.core.context_processors import csrf
from django.contrib.auth.decorators import login_required
from contract.models import *
from person.models import *
from employees.models import Employee, Visits as eVisits
from finance.models import *
from finance.forms import *
from .models import *
from .forms import *
day_name = "понедельник вторник среда четверг пятница суббота воскресенье"
day_name = day_name.split()
abc = ("А","Б","В","Г","Д","Е","Ё","Ж","З","И","К",
"Л","М","Н","О","П","Р","С","Т","У","Ф","Х",
"Ц","Ч","Ш","Щ","Э","Ю","Я",)
@login_required(login_url='/login/')
def guest_visit(request, id=0, ):
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
b_url = reverse('r_guest_card', args=(guest.pk, ))
if request.method == 'POST':
post_val = request.POST.copy()
post_val['date'] = datetime.now()
f = FormInvitation(post_val)
if f.is_valid():
f.save()
return HttpResponseRedirect(b_url)
else:
return HttpResponse(f.errors)
context_dict = dict(request=request, g=guest, b_url=b_url)
context_dict.update(csrf(request))
return render_to_response('guest_visit.html', context_dict)
@login_required(login_url='/login/')
def cashier(request, ):
p_title='Работа с кассой'
cashhost = settings.CASHIER_HOST
context_dict = dict(request=request, p_title=p_title, cashhost=cashhost,)
return render_to_response("cashier.html", context_dict)
@login_required(login_url='/login/')
def guest_card(request, id=0, act=None ):
b_url = reverse('r_guest')
p_title = 'Личная карта гостя'
cashhost = settings.CASHIER_HOST
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
try:
v = GuestVisits.objects.get(guest=guest, is_online=-1)
guest.is_online = True
except GuestVisits.DoesNotExist:
v = ""
guest.is_online = False
if act == 'inout':
guest.is_online = not guest.is_online
if guest.is_online:
v = GuestVisits(date_start=datetime.now(),
locker=request.POST['locker'],
date_end=None,
guest=guest)
v.save()
else:
i = Invitation.objects.filter(guest=guest, is_free=True)[0]
i.is_free = False
i.save()
v.out()
v = ""
visits = GuestVisits.objects.filter(guest=guest).order_by('date_start')
credits = Credits.objects.filter(guest=guest).order_by('plan_date')
context_dict = dict(request=request, b_url=b_url, p_title=p_title, guest=guest,
v=v, visits=visits, credits=credits, cashhost = cashhost)
context_dict.update(csrf(request))
return render_to_response("guest_card.html", context_dict)
@login_required(login_url='/login/')
def clientinvite(request,):
lst = []
ct = ContractType.objects.filter(period_days__in=[182, 365])
if 'query' in request.GET.keys():
query = request.GET.get('query')
if len(query) > 0:
clnts = Client.objects.filter(last_name__icontains=query).order_by("last_name")
for c in Contract.objects.filter(contract_type__in=ct,
is_current=1, client__in=clnts):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
else:
for c in Contract.objects.filter(contract_type__in=ct, is_current=1):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
context_dict = dict(lst=lst, )
return render_to_response("client_invite.html", context_dict)
@login_required(login_url='/login/')
def guest(request, id=-1, act=None):
b_url = reverse('r_guest')
p_title = 'Гость'
lst = []
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['manager'] = request.user.pk
post_values['is_client'] = 0
post_values['date'] = datetime.now().date()
d = strptime(post_values['born'],"%d.%m.%Y")
post_values['born'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
form = FormGuest(post_values)
if form.is_valid():
# try:
f = form.save()
# except Exception:
# context_dict = dict(form=form)
# return render_to_response("form_err.html", context_dict)
else:
f = form.errors
if 'contract' in post_values.keys():
try:
c_pk = int(post_values['contract'])
except ValueError:
c_pk = 0
if c_pk > 0:
post_values['guest'] = | tle, b_url=b_url, )
context_dict.update(csrf(request))
return render_to_response("guest_add.html", context_dict)
if 'query' in request.GET.keys():
query = request.GET.get('query')
lst = Guest.objects.filter(lastname__icontains=query).order_by("lastname")
elif id > -1:
lst = Guest.objects.filter(lastname__istartswith=abc[int(id)]).order_by("lastname")
else:
lst = Guest.objects.all().order_by("lastname")
context_dict = dict(request=request, lst=lst, abc=abc, id=id)
context_dict.update(csrf(request))
return render_to_response("guest.html", context_dict)
@login_required(login_url='/login/')
def reminder(request, id=0, act=None):
b_url = reverse('reminder')
p_title = 'Напоминание'
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['author'] = request.user.pk
t = strptime(request.POST['time'],"%H:%M")
post_values['time'] = time(t.tm_hour, t.tm_min)
post_values['is_everyday'] = False
post_values['wdays'] = ""
post_values['group1'] = int(post_values['group1'])
if post_values['group1'] == 1:
post_values['is_everyday'] = True
elif post_values['group1'] == 2:
d = strptime(request.POST['date'],"%d.%m.%Y")
post_values['date'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
elif post_values['group1'] == 3:
for i in xrange(0,7):
if "wday" + str(i) in post_values.keys():
post_values['wdays'] += str(i) + ","
form = FormReminder(post_values)
if form.is_valid():
form.save()
return HttpResponseRedirect(b_url)
else:
p_title = form.errors
context_dict = dict(request=request, p_title=p_title, b_url=b_url, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder_add.html", context_dict)
elif id > 0:
try:
r = Reminder.objects.get(pk=id)
except Reminder.DoesNotExist:
o_name = p_title
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
if act == 'del':
r.delete()
elif act == 'read':
r.read(request.user)
lst = []
for r in Reminder.objects.all().order_by('is_everyday','date','wdays'):
if r.is_everyday:
lst.append((r,1))
elif r.date:
lst.append((r,2))
else:
wl = [int(x) for x in r.wdays[:-1].split(',')]
lst.append((r,wl))
context_dict = dict(request=request, lst=lst, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder.html", context_dict)
@login_required(login_url='/login/')
def bithday(request):
if request.method == 'POST':
born = strptime(request.POST['born_date'],"%d.%m")
d = born.tm_mday
m = born.tm_mon
rdate = date(datetime.now().year,m,d,)
else:
d = datetime.now().day
m = datetime.now().month
rdate = datetime.now()
c = Contract.objects.filter(is_current=True).values('client')
lst = Client.objects.filter(born_date__month=m, born_date__day=d, pk__in=c).order_by("last_name")
context_dict = dict(request=request, lst=lst, rdate=rdate)
context_dict.update(csrf(request))
return render_to_response("bithday.html", context_dict)
@login_required(login_url='/login/')
def clients_login(request,):
lst = []
employees = []
if request.method == 'POST':
try:
find = long(request.POST.get('lastname'))
except ValueError:
find = request.POST.get('lastname')
if isinstance(find, long):
res = Contract.objects.filter(card=find, is_current=1)
# if not find in the current try find in the prospect
if res.count() < 1:
res = Contract.objects.filter(card=find, is_current=2)
employees = Employee.objects.filter(card=find,)
else:
ac = Contract.objects.filter(is_current__in=[1, 2]).values('client')
res = Client.objects.filter(last_name__icontains=find, pk__in=ac)
employees = Employee.objects.filter(lastname__icontains=find)
if res.count() + employees.count() == 1:
if employees:
url = reverse('e_comein', args=(employees[0].pk, ))
else:
try: # if contract
url = reverse('person_card',args=[res[0].client.pk])
except AttributeError:
url = reverse('person_card',args=[res[0].pk])
return HttpResponseRedirect(url)
else:
lst = res
context_dict = dict(request=request, lst=lst, employees=employees)
context_dict.update(csrf(request))
return render_to_response("client_login.html", context_dict, )
@login_required(login_url='/login/')
def clients_online(request,):
lst = []
for v in Visits.objects.filter(is_online=-1).order_by('date_start'):
debts = Credits.objects.filter(client=v.contract.client).count()
lst.append((debts,v))
glst = []
for gv in GuestVisits.objects.filter(is_online=-1).order_by('date_start'):
debts = Credits.objects.filter(guest=gv.guest).count()
glst.append((debts, gv))
elst = eVisits.objects.filter(date_end__isnull=True).order_by('date_start')
context_dict = dict(request=request, lst = lst, glst=glst, elst=elst)
return render_to_response("online.html", context_dict, )
@login_required(login_url='/login/')
def reception_menu(request,):
Y = datetime.today().year
m = datetime.today().strftime("%m")
d = datetime.today().strftime("%d")
context_dict = dict(request=request, Y=Y, m=m, d=d, )
return render_to_response("reception_menu.html", context_dict, )
| f.pk
post_values['date'] = datetime.now()
post_values['is_free'] = True
fi = FormInvitation(post_values)
if fi.is_valid():
fi.save()
else:
fi = fi.errors
url = reverse('r_guest', args=(0, ))
return HttpResponseRedirect(url)
context_dict = dict(request=request, p_title=p_ti | conditional_block |
views.py | # -*- coding: utf-8 -*-
from time import strptime
from datetime import datetime, date, time
from django.conf import settings
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.core.context_processors import csrf
from django.contrib.auth.decorators import login_required
from contract.models import *
from person.models import *
from employees.models import Employee, Visits as eVisits
from finance.models import *
from finance.forms import *
from .models import *
from .forms import *
day_name = "понедельник вторник среда четверг пятница суббота воскресенье"
day_name = day_name.split()
abc = ("А","Б","В","Г","Д","Е","Ё","Ж","З","И","К",
"Л","М","Н","О","П","Р","С","Т","У","Ф","Х",
"Ц","Ч","Ш","Щ","Э","Ю","Я",)
@login_required(login_url='/login/')
def guest_visit(request, id=0, ):
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
b_url = reverse('r_guest_card', args=(guest.pk, ))
if request.method == 'POST':
post_val = request.POST.copy()
post_val['date'] = datetime.now()
f = FormInvitation(post_val)
if f.is_valid():
f.save()
return HttpResponseRedirect(b_url)
else:
return HttpResponse(f.errors)
context_dict = dict(request=request, g=guest, b_url=b_url)
context_dict.update(csrf(request))
return render_to_response('guest_visit.html', context_dict)
@login_required(login_url='/login/')
def cashier(request, ):
p_title='Работа с кассой'
cashhost = settings.CASHIER_HOST
context_dict = dict(request=request, p_title=p_title, cashhost=cashhost,)
return render_to_response("cashier.html", context_dict)
@login_required(login_url='/login/')
def guest_card(request, id=0, act=None ):
b_url = reverse('r_guest')
p_title = 'Личная карта гостя'
cashhost = settings.CASHIER_HOST
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
try:
v = GuestVisits.objects.get(guest=guest, is_online=-1)
guest.is_online = True
except GuestVisits.DoesNotExist:
v = ""
guest.is_online = False
if act == 'inout':
guest.is_online = not guest.is_online
if guest.is_online:
v = GuestVisits(date_start=datetime.now(),
locker=request.POST['locker'],
date_end=None,
guest=guest)
v.save()
else:
i = Invitation.objects.filter(guest=guest, is_free=True)[0]
i.is_free = False
i.save()
v.out()
v = ""
visits = GuestVisits.objects.filter(guest=guest).order_by('date_start')
credits = Credits.objects.filter(guest=guest).order_by('plan_date')
context_dict = dict(request=request, b_url=b_url, p_title=p_title, guest=guest,
v=v, visits=visits, credits=credits, cashhost = cashhost)
context_dict.update(csrf(request))
return render_to_response("guest_card.html", context_dict)
@login_required(login_url='/login/')
def clientinvite(request,):
lst = []
ct = ContractType.objects.filter(period_days__in=[182, 365])
if 'query' in request.GET.keys():
query = request.GET.get('query')
if len(query) > 0:
clnts = Client.objects.filter(last_name__icontains=query).order_by("last_name")
for c in Contract.objects.filter(contract_type__in=ct,
is_current=1, client__in=clnts):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
else:
for c in Contract.objects.filter(contract_type__in=ct, is_current=1):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
context_dict = dict(lst=lst, )
return render_to_response("client_invite.html", context_dict)
@login_required(login_url='/login/')
def guest(request, id=-1, act=None):
b_url = reverse('r_guest')
p_title = 'Гость'
lst = []
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['manager'] = request.user.pk
post_values['is_client'] = 0
post_values['date'] = datetime.now().date()
d = strptime(post_values['born'],"%d.%m.%Y")
post_values['born'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
form = FormGuest(post_values)
if form.is_valid():
# try:
f = form.save()
# except Exception:
# context_dict = dict(form=form)
# return render_to_response("form_err.html", context_dict)
else:
f = form.errors
if 'contract' in post_values.keys():
try:
c_pk = int(post_values['contract'])
except ValueError:
c_pk = 0
if c_pk > 0:
post_values['guest'] = f.pk
post_values['date'] = datetime.now()
post_values['is_free'] = True
fi = FormInvitation(post_values)
if fi.is_valid():
fi.save()
else:
fi = fi.errors
url = reverse('r_guest', args=(0, ))
return HttpResponseRedirect(url)
context_dict = dict(request=request, p_title=p_title, b_url=b_url, )
context_dict.update(csrf(request))
return render_to_response("guest_add.html", context_dict)
if 'query' in request.GET.keys():
query = request.GET.get('query')
lst = Guest.objects.filter(lastname__icontains=query).order_by("lastname")
elif id > -1:
lst = Guest.objects.filter(lastname__istartswith=abc[int(id)]).order_by("lastname")
else:
lst = Guest.objects.all().order_by("lastname")
context_dict = dict(request=request, lst=lst, abc=abc, id=id)
context_dict.update(csrf(request))
return render_to_response("guest.html", context_dict)
@login_required(login_url='/login/')
def reminder(request, id=0, act=None):
b_url = reverse('reminder')
p_title = 'Напоминание'
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['author'] = request.user.pk
t = strptime(request.POST['time'],"%H:%M")
post_values['time'] = time(t.tm_hour, t.tm_min)
post_values['is_everyday'] = False
post_values['wdays'] = ""
post_values['group1'] = int(post_values['group1'])
if post_values['group1'] == 1:
post_values['is_everyday'] = True
elif post_values['group1'] == 2:
d = strptime(request.POST['date'],"%d.%m.%Y")
post_values['date'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
elif post_values['group1'] == 3:
for i in xrange(0,7):
if "wday" + str(i) in post_values.keys():
post_values['wdays'] += str(i) + ","
form = FormReminder(post_values)
if form.is_valid():
form.save()
return HttpResponseRedirect(b_url)
else:
p_title = form.errors
context_dict = dict(request=request, p_title=p_title, b_url=b_url, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder_add.html", context_dict)
elif id > 0:
try:
r = Reminder.objects.get(pk=id)
except Reminder.DoesNotExist:
o_name = p_title
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
if act == 'del':
r.delete()
elif act == 'read':
r.read(request.user)
lst = []
for r in Reminder.objects.all().order_by('is_everyday','date','wdays'):
if r.is_everyday:
lst.append((r,1))
elif r.date:
lst.append((r,2))
else:
wl = [int(x) for x in r.wdays[:-1].split(',')]
lst.append((r,wl))
context_dict = dict(request=request, lst=lst, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder.html", context_dict)
@login_required(login_url='/login/')
def bithday(request):
if request.method == 'POST':
born = strptime(request.POST['born_date'],"%d.%m")
d = born.tm_mday
m = born.tm_mon
rdate = date(datetime.now().year,m,d,)
else:
d = datetime.now().day
m = datetime.now().month
rdate = datetime.now()
c = Contract.objects.filter(is_current=True).values('client')
lst = Client.objects.filter(born_date__month=m, born_date__day=d, pk__in=c).order_by("last_name")
context_dict = dict(request=request, lst=lst, rdate=rdate)
context_dict.update(csrf(request))
return render_to_response("bithday.html", context_dict)
@login_required(login_url='/login/')
def clients_login(request,):
lst = []
employees = []
if request.method == 'POST':
try:
find = long(request.POST.get('lastname'))
except ValueError:
find = r | _start'):
debts = Credits.objects.filter(client=v.contract.client).count()
lst.append((debts,v))
glst = []
for gv in GuestVisits.objects.filter(is_online=-1).order_by('date_start'):
debts = Credits.objects.filter(guest=gv.guest).count()
glst.append((debts, gv))
elst = eVisits.objects.filter(date_end__isnull=True).order_by('date_start')
context_dict = dict(request=request, lst = lst, glst=glst, elst=elst)
return render_to_response("online.html", context_dict, )
@login_required(login_url='/login/')
def reception_menu(request,):
Y = datetime.today().year
m = datetime.today().strftime("%m")
d = datetime.today().strftime("%d")
context_dict = dict(request=request, Y=Y, m=m, d=d, )
return render_to_response("reception_menu.html", context_dict, )
| equest.POST.get('lastname')
if isinstance(find, long):
res = Contract.objects.filter(card=find, is_current=1)
# if not find in the current try find in the prospect
if res.count() < 1:
res = Contract.objects.filter(card=find, is_current=2)
employees = Employee.objects.filter(card=find,)
else:
ac = Contract.objects.filter(is_current__in=[1, 2]).values('client')
res = Client.objects.filter(last_name__icontains=find, pk__in=ac)
employees = Employee.objects.filter(lastname__icontains=find)
if res.count() + employees.count() == 1:
if employees:
url = reverse('e_comein', args=(employees[0].pk, ))
else:
try: # if contract
url = reverse('person_card',args=[res[0].client.pk])
except AttributeError:
url = reverse('person_card',args=[res[0].pk])
return HttpResponseRedirect(url)
else:
lst = res
context_dict = dict(request=request, lst=lst, employees=employees)
context_dict.update(csrf(request))
return render_to_response("client_login.html", context_dict, )
@login_required(login_url='/login/')
def clients_online(request,):
lst = []
for v in Visits.objects.filter(is_online=-1).order_by('date | identifier_body |
views.py | # -*- coding: utf-8 -*-
from time import strptime
from datetime import datetime, date, time
from django.conf import settings
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.core.context_processors import csrf
from django.contrib.auth.decorators import login_required
from contract.models import *
from person.models import *
from employees.models import Employee, Visits as eVisits
from finance.models import *
from finance.forms import *
from .models import *
from .forms import *
day_name = "понедельник вторник среда четверг пятница суббота воскресенье"
day_name = day_name.split()
abc = ("А","Б","В","Г","Д","Е","Ё","Ж","З","И","К",
"Л","М","Н","О","П","Р","С","Т","У","Ф","Х",
"Ц","Ч","Ш","Щ","Э","Ю","Я",)
@login_required(login_url='/login/')
def guest_visit(request, id=0, ):
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
b_url = reverse('r_guest_card', args=(guest.pk, ))
if request.method == 'POST':
post_val = request.POST.copy()
post_val['date'] = datetime.now()
f = FormInvitation(post_val)
if f.is_valid():
f.save()
return HttpResponseRedirect(b_url)
else:
return HttpResponse(f.errors)
context_dict = dict(request=request, g=guest, b_url=b_url)
context_dict.update(csrf(request))
return render_to_response('guest_visit.html', context_dict)
@login_required(login_url='/login/')
def cashier(request, ):
p_title='Работа с кассой'
cashhost = settings.CASHIER_HOST
context_dict = dict(request=request, p_title=p_title, cashhost=cashhost,)
return render_to_response("cashier.html", context_dict)
@login_required(login_url='/login/')
def guest_card(request, id=0, act=None ):
b_url = reverse('r_guest')
p_title = 'Личная карта гостя'
cashhost = settings.CASHIER_HOST
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
try:
v = GuestVisits.objects.get(guest=guest, is_online=-1)
guest.is_online = True
except GuestVisits.DoesNotExist:
v = ""
guest.is_online = False
if act == 'inout':
guest.is_online = not guest.is_online
if guest.is_online:
v = GuestVisits(date_start=datetime.now(),
locker=request.POST['locker'],
date_end=None,
guest=guest)
v.save()
else:
i = Invitation.objects.filter(guest=guest, is_free=True)[0]
i.is_free = False
i.save()
v.out()
v = ""
visits = GuestVisits.objects.filter(guest=guest).order_by('date_start')
credits = Credits.objects.filter(guest=guest).order_by('plan_date')
context_dict = dict(request=request, b_url=b_url, p_title=p_title, guest=guest,
v=v, visits=visits, credits=credits, cashhost = cashhost)
context_dict.update(csrf(request))
return render_to_response("guest_card.html", context_dict)
@login_required(login_url='/login/')
def clientinvite(request,):
lst = []
ct = ContractType.objects.filter(period_days__in=[182, 365])
if 'query' in request.GET. | ery = request.GET.get('query')
if len(query) > 0:
clnts = Client.objects.filter(last_name__icontains=query).order_by("last_name")
for c in Contract.objects.filter(contract_type__in=ct,
is_current=1, client__in=clnts):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
else:
for c in Contract.objects.filter(contract_type__in=ct, is_current=1):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
context_dict = dict(lst=lst, )
return render_to_response("client_invite.html", context_dict)
@login_required(login_url='/login/')
def guest(request, id=-1, act=None):
b_url = reverse('r_guest')
p_title = 'Гость'
lst = []
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['manager'] = request.user.pk
post_values['is_client'] = 0
post_values['date'] = datetime.now().date()
d = strptime(post_values['born'],"%d.%m.%Y")
post_values['born'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
form = FormGuest(post_values)
if form.is_valid():
# try:
f = form.save()
# except Exception:
# context_dict = dict(form=form)
# return render_to_response("form_err.html", context_dict)
else:
f = form.errors
if 'contract' in post_values.keys():
try:
c_pk = int(post_values['contract'])
except ValueError:
c_pk = 0
if c_pk > 0:
post_values['guest'] = f.pk
post_values['date'] = datetime.now()
post_values['is_free'] = True
fi = FormInvitation(post_values)
if fi.is_valid():
fi.save()
else:
fi = fi.errors
url = reverse('r_guest', args=(0, ))
return HttpResponseRedirect(url)
context_dict = dict(request=request, p_title=p_title, b_url=b_url, )
context_dict.update(csrf(request))
return render_to_response("guest_add.html", context_dict)
if 'query' in request.GET.keys():
query = request.GET.get('query')
lst = Guest.objects.filter(lastname__icontains=query).order_by("lastname")
elif id > -1:
lst = Guest.objects.filter(lastname__istartswith=abc[int(id)]).order_by("lastname")
else:
lst = Guest.objects.all().order_by("lastname")
context_dict = dict(request=request, lst=lst, abc=abc, id=id)
context_dict.update(csrf(request))
return render_to_response("guest.html", context_dict)
@login_required(login_url='/login/')
def reminder(request, id=0, act=None):
b_url = reverse('reminder')
p_title = 'Напоминание'
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['author'] = request.user.pk
t = strptime(request.POST['time'],"%H:%M")
post_values['time'] = time(t.tm_hour, t.tm_min)
post_values['is_everyday'] = False
post_values['wdays'] = ""
post_values['group1'] = int(post_values['group1'])
if post_values['group1'] == 1:
post_values['is_everyday'] = True
elif post_values['group1'] == 2:
d = strptime(request.POST['date'],"%d.%m.%Y")
post_values['date'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
elif post_values['group1'] == 3:
for i in xrange(0,7):
if "wday" + str(i) in post_values.keys():
post_values['wdays'] += str(i) + ","
form = FormReminder(post_values)
if form.is_valid():
form.save()
return HttpResponseRedirect(b_url)
else:
p_title = form.errors
context_dict = dict(request=request, p_title=p_title, b_url=b_url, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder_add.html", context_dict)
elif id > 0:
try:
r = Reminder.objects.get(pk=id)
except Reminder.DoesNotExist:
o_name = p_title
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
if act == 'del':
r.delete()
elif act == 'read':
r.read(request.user)
lst = []
for r in Reminder.objects.all().order_by('is_everyday','date','wdays'):
if r.is_everyday:
lst.append((r,1))
elif r.date:
lst.append((r,2))
else:
wl = [int(x) for x in r.wdays[:-1].split(',')]
lst.append((r,wl))
context_dict = dict(request=request, lst=lst, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder.html", context_dict)
@login_required(login_url='/login/')
def bithday(request):
if request.method == 'POST':
born = strptime(request.POST['born_date'],"%d.%m")
d = born.tm_mday
m = born.tm_mon
rdate = date(datetime.now().year,m,d,)
else:
d = datetime.now().day
m = datetime.now().month
rdate = datetime.now()
c = Contract.objects.filter(is_current=True).values('client')
lst = Client.objects.filter(born_date__month=m, born_date__day=d, pk__in=c).order_by("last_name")
context_dict = dict(request=request, lst=lst, rdate=rdate)
context_dict.update(csrf(request))
return render_to_response("bithday.html", context_dict)
@login_required(login_url='/login/')
def clients_login(request,):
lst = []
employees = []
if request.method == 'POST':
try:
find = long(request.POST.get('lastname'))
except ValueError:
find = request.POST.get('lastname')
if isinstance(find, long):
res = Contract.objects.filter(card=find, is_current=1)
# if not find in the current try find in the prospect
if res.count() < 1:
res = Contract.objects.filter(card=find, is_current=2)
employees = Employee.objects.filter(card=find,)
else:
ac = Contract.objects.filter(is_current__in=[1, 2]).values('client')
res = Client.objects.filter(last_name__icontains=find, pk__in=ac)
employees = Employee.objects.filter(lastname__icontains=find)
if res.count() + employees.count() == 1:
if employees:
url = reverse('e_comein', args=(employees[0].pk, ))
else:
try: # if contract
url = reverse('person_card',args=[res[0].client.pk])
except AttributeError:
url = reverse('person_card',args=[res[0].pk])
return HttpResponseRedirect(url)
else:
lst = res
context_dict = dict(request=request, lst=lst, employees=employees)
context_dict.update(csrf(request))
return render_to_response("client_login.html", context_dict, )
@login_required(login_url='/login/')
def clients_online(request,):
lst = []
for v in Visits.objects.filter(is_online=-1).order_by('date_start'):
debts = Credits.objects.filter(client=v.contract.client).count()
lst.append((debts,v))
glst = []
for gv in GuestVisits.objects.filter(is_online=-1).order_by('date_start'):
debts = Credits.objects.filter(guest=gv.guest).count()
glst.append((debts, gv))
elst = eVisits.objects.filter(date_end__isnull=True).order_by('date_start')
context_dict = dict(request=request, lst = lst, glst=glst, elst=elst)
return render_to_response("online.html", context_dict, )
@login_required(login_url='/login/')
def reception_menu(request,):
Y = datetime.today().year
m = datetime.today().strftime("%m")
d = datetime.today().strftime("%d")
context_dict = dict(request=request, Y=Y, m=m, d=d, )
return render_to_response("reception_menu.html", context_dict, )
| keys():
qu | identifier_name |
views.py | # -*- coding: utf-8 -*-
from time import strptime
from datetime import datetime, date, time
from django.conf import settings
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.core.context_processors import csrf
from django.contrib.auth.decorators import login_required
from contract.models import *
from person.models import *
from employees.models import Employee, Visits as eVisits
from finance.models import *
from finance.forms import *
from .models import *
from .forms import *
day_name = "понедельник вторник среда четверг пятница суббота воскресенье"
day_name = day_name.split()
abc = ("А","Б","В","Г","Д","Е","Ё","Ж","З","И","К",
"Л","М","Н","О","П","Р","С","Т","У","Ф","Х",
"Ц","Ч","Ш","Щ","Э","Ю","Я",)
@login_required(login_url='/login/')
def guest_visit(request, id=0, ):
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
b_url = reverse('r_guest_card', args=(guest.pk, ))
if request.method == 'POST':
post_val = request.POST.copy()
post_val['date'] = datetime.now()
f = FormInvitation(post_val)
if f.is_valid():
f.save()
return HttpResponseRedirect(b_url)
else:
return HttpResponse(f.errors)
context_dict = dict(request=request, g=guest, b_url=b_url)
context_dict.update(csrf(request))
return render_to_response('guest_visit.html', context_dict)
@login_required(login_url='/login/')
def cashier(request, ):
p_title='Работа с кассой'
cashhost = settings.CASHIER_HOST
context_dict = dict(request=request, p_title=p_title, cashhost=cashhost,)
return render_to_response("cashier.html", context_dict)
@login_required(login_url='/login/')
def guest_card(request, id=0, act=None ):
b_url = reverse('r_guest')
p_title = 'Личная карта гостя'
cashhost = settings.CASHIER_HOST
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
try:
v = GuestVisits.objects.get(guest=guest, is_online=-1)
guest.is_online = True
except GuestVisits.DoesNotExist:
v = ""
guest.is_online = False
if act == 'inout':
guest.is_online = not guest.is_online
if guest.is_online:
v = GuestVisits(date_start=datetime.now(),
locker=request.POST['locker'],
date_end=None,
guest=guest)
v.save()
else:
i = Invitation.objects.filter(guest=guest, is_free=True)[0]
i.is_free = False
i.save()
v.out()
v = ""
visits = GuestVisits.objects.filter(guest=guest).order_by('date_start')
credits = Credits.objects.filter(guest=guest).order_by('plan_date')
context_dict = dict(request=request, b_url=b_url, p_title=p_title, guest=guest,
v=v, visits=visits, credits=credits, cashhost = cashhost)
context_dict.update(csrf(request))
return render_to_response("guest_card.html", context_dict)
@login_required(login_url='/login/')
def clientinvite(request,):
lst = []
ct = ContractType.objects.filter(period_days__in=[182, 365])
if 'query' in request.GET.keys(): | if len(query) > 0:
clnts = Client.objects.filter(last_name__icontains=query).order_by("last_name")
for c in Contract.objects.filter(contract_type__in=ct,
is_current=1, client__in=clnts):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
else:
for c in Contract.objects.filter(contract_type__in=ct, is_current=1):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
context_dict = dict(lst=lst, )
return render_to_response("client_invite.html", context_dict)
@login_required(login_url='/login/')
def guest(request, id=-1, act=None):
b_url = reverse('r_guest')
p_title = 'Гость'
lst = []
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['manager'] = request.user.pk
post_values['is_client'] = 0
post_values['date'] = datetime.now().date()
d = strptime(post_values['born'],"%d.%m.%Y")
post_values['born'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
form = FormGuest(post_values)
if form.is_valid():
# try:
f = form.save()
# except Exception:
# context_dict = dict(form=form)
# return render_to_response("form_err.html", context_dict)
else:
f = form.errors
if 'contract' in post_values.keys():
try:
c_pk = int(post_values['contract'])
except ValueError:
c_pk = 0
if c_pk > 0:
post_values['guest'] = f.pk
post_values['date'] = datetime.now()
post_values['is_free'] = True
fi = FormInvitation(post_values)
if fi.is_valid():
fi.save()
else:
fi = fi.errors
url = reverse('r_guest', args=(0, ))
return HttpResponseRedirect(url)
context_dict = dict(request=request, p_title=p_title, b_url=b_url, )
context_dict.update(csrf(request))
return render_to_response("guest_add.html", context_dict)
if 'query' in request.GET.keys():
query = request.GET.get('query')
lst = Guest.objects.filter(lastname__icontains=query).order_by("lastname")
elif id > -1:
lst = Guest.objects.filter(lastname__istartswith=abc[int(id)]).order_by("lastname")
else:
lst = Guest.objects.all().order_by("lastname")
context_dict = dict(request=request, lst=lst, abc=abc, id=id)
context_dict.update(csrf(request))
return render_to_response("guest.html", context_dict)
@login_required(login_url='/login/')
def reminder(request, id=0, act=None):
b_url = reverse('reminder')
p_title = 'Напоминание'
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['author'] = request.user.pk
t = strptime(request.POST['time'],"%H:%M")
post_values['time'] = time(t.tm_hour, t.tm_min)
post_values['is_everyday'] = False
post_values['wdays'] = ""
post_values['group1'] = int(post_values['group1'])
if post_values['group1'] == 1:
post_values['is_everyday'] = True
elif post_values['group1'] == 2:
d = strptime(request.POST['date'],"%d.%m.%Y")
post_values['date'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
elif post_values['group1'] == 3:
for i in xrange(0,7):
if "wday" + str(i) in post_values.keys():
post_values['wdays'] += str(i) + ","
form = FormReminder(post_values)
if form.is_valid():
form.save()
return HttpResponseRedirect(b_url)
else:
p_title = form.errors
context_dict = dict(request=request, p_title=p_title, b_url=b_url, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder_add.html", context_dict)
elif id > 0:
try:
r = Reminder.objects.get(pk=id)
except Reminder.DoesNotExist:
o_name = p_title
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
if act == 'del':
r.delete()
elif act == 'read':
r.read(request.user)
lst = []
for r in Reminder.objects.all().order_by('is_everyday','date','wdays'):
if r.is_everyday:
lst.append((r,1))
elif r.date:
lst.append((r,2))
else:
wl = [int(x) for x in r.wdays[:-1].split(',')]
lst.append((r,wl))
context_dict = dict(request=request, lst=lst, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder.html", context_dict)
@login_required(login_url='/login/')
def bithday(request):
if request.method == 'POST':
born = strptime(request.POST['born_date'],"%d.%m")
d = born.tm_mday
m = born.tm_mon
rdate = date(datetime.now().year,m,d,)
else:
d = datetime.now().day
m = datetime.now().month
rdate = datetime.now()
c = Contract.objects.filter(is_current=True).values('client')
lst = Client.objects.filter(born_date__month=m, born_date__day=d, pk__in=c).order_by("last_name")
context_dict = dict(request=request, lst=lst, rdate=rdate)
context_dict.update(csrf(request))
return render_to_response("bithday.html", context_dict)
@login_required(login_url='/login/')
def clients_login(request,):
lst = []
employees = []
if request.method == 'POST':
try:
find = long(request.POST.get('lastname'))
except ValueError:
find = request.POST.get('lastname')
if isinstance(find, long):
res = Contract.objects.filter(card=find, is_current=1)
# if not find in the current try find in the prospect
if res.count() < 1:
res = Contract.objects.filter(card=find, is_current=2)
employees = Employee.objects.filter(card=find,)
else:
ac = Contract.objects.filter(is_current__in=[1, 2]).values('client')
res = Client.objects.filter(last_name__icontains=find, pk__in=ac)
employees = Employee.objects.filter(lastname__icontains=find)
if res.count() + employees.count() == 1:
if employees:
url = reverse('e_comein', args=(employees[0].pk, ))
else:
try: # if contract
url = reverse('person_card',args=[res[0].client.pk])
except AttributeError:
url = reverse('person_card',args=[res[0].pk])
return HttpResponseRedirect(url)
else:
lst = res
context_dict = dict(request=request, lst=lst, employees=employees)
context_dict.update(csrf(request))
return render_to_response("client_login.html", context_dict, )
@login_required(login_url='/login/')
def clients_online(request,):
lst = []
for v in Visits.objects.filter(is_online=-1).order_by('date_start'):
debts = Credits.objects.filter(client=v.contract.client).count()
lst.append((debts,v))
glst = []
for gv in GuestVisits.objects.filter(is_online=-1).order_by('date_start'):
debts = Credits.objects.filter(guest=gv.guest).count()
glst.append((debts, gv))
elst = eVisits.objects.filter(date_end__isnull=True).order_by('date_start')
context_dict = dict(request=request, lst = lst, glst=glst, elst=elst)
return render_to_response("online.html", context_dict, )
@login_required(login_url='/login/')
def reception_menu(request,):
Y = datetime.today().year
m = datetime.today().strftime("%m")
d = datetime.today().strftime("%d")
context_dict = dict(request=request, Y=Y, m=m, d=d, )
return render_to_response("reception_menu.html", context_dict, ) | query = request.GET.get('query') | random_line_split |
warming.rs | use std::collections::HashSet;
use std::ops::Deref;
use std::sync::{Arc, Mutex, Weak};
use std::thread::JoinHandle;
use std::time::Duration;
use crate::{Executor, Inventory, Searcher, SearcherGeneration, TantivyError};
pub const GC_INTERVAL: Duration = Duration::from_secs(1);
/// `Warmer` can be used to maintain segment-level state e.g. caches.
///
/// They must be registered with the [super::IndexReaderBuilder].
pub trait Warmer: Sync + Send {
/// Perform any warming work using the provided [Searcher].
fn warm(&self, searcher: &Searcher) -> crate::Result<()>;
/// Discards internal state for any [SearcherGeneration] not provided.
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]);
}
/// Warming-related state with interior mutability.
#[derive(Clone)]
pub(crate) struct WarmingState(Arc<Mutex<WarmingStateInner>>);
impl WarmingState {
pub fn new(
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
) -> crate::Result<Self> {
Ok(Self(Arc::new(Mutex::new(WarmingStateInner {
num_warming_threads,
warmers,
gc_thread: None,
warmed_generation_ids: Default::default(),
searcher_generation_inventory, |
/// Start tracking a new generation of [Searcher], and [Warmer::warm] it if there are active
/// warmers.
///
/// A background GC thread for [Warmer::garbage_collect] calls is uniquely created if there are
/// active warmers.
pub fn warm_new_searcher_generation(&self, searcher: &Searcher) -> crate::Result<()> {
self.0
.lock()
.unwrap()
.warm_new_searcher_generation(searcher, &self.0)
}
#[cfg(test)]
fn gc_maybe(&self) -> bool {
self.0.lock().unwrap().gc_maybe()
}
}
struct WarmingStateInner {
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
gc_thread: Option<JoinHandle<()>>,
// Contains all generations that have been warmed up.
// This list is used to avoid triggers the individual Warmer GCs
// if no warmed generation needs to be collected.
warmed_generation_ids: HashSet<u64>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
}
impl WarmingStateInner {
/// Start tracking provided searcher as an exemplar of a new generation.
/// If there are active warmers, warm them with the provided searcher, and kick background GC
/// thread if it has not yet been kicked. Otherwise, prune state for dropped searcher
/// generations inline.
fn warm_new_searcher_generation(
&mut self,
searcher: &Searcher,
this: &Arc<Mutex<Self>>,
) -> crate::Result<()> {
let warmers = self.pruned_warmers();
// Avoid threads (warming as well as background GC) if there are no warmers
if warmers.is_empty() {
return Ok(());
}
self.start_gc_thread_maybe(this)?;
self.warmed_generation_ids
.insert(searcher.generation().generation_id());
warming_executor(self.num_warming_threads.min(warmers.len()))?
.map(|warmer| warmer.warm(searcher), warmers.into_iter())?;
Ok(())
}
/// Attempt to upgrade the weak Warmer references, pruning those which cannot be upgraded.
/// Return the strong references.
fn pruned_warmers(&mut self) -> Vec<Arc<dyn Warmer>> {
let strong_warmers = self
.warmers
.iter()
.flat_map(|weak_warmer| weak_warmer.upgrade())
.collect::<Vec<_>>();
self.warmers = strong_warmers.iter().map(Arc::downgrade).collect();
strong_warmers
}
/// [Warmer::garbage_collect] active warmers if some searcher generation is observed to have
/// been dropped.
fn gc_maybe(&mut self) -> bool {
let live_generations = self.searcher_generation_inventory.list();
let live_generation_ids: HashSet<u64> = live_generations
.iter()
.map(|searcher_generation| searcher_generation.generation_id())
.collect();
let gc_not_required = self
.warmed_generation_ids
.iter()
.all(|warmed_up_generation| live_generation_ids.contains(warmed_up_generation));
if gc_not_required {
return false;
}
let live_generation_refs = live_generations
.iter()
.map(Deref::deref)
.collect::<Vec<_>>();
for warmer in self.pruned_warmers() {
warmer.garbage_collect(&live_generation_refs);
}
self.warmed_generation_ids = live_generation_ids;
true
}
/// Start GC thread if one has not already been started.
fn start_gc_thread_maybe(&mut self, this: &Arc<Mutex<Self>>) -> crate::Result<bool> {
if self.gc_thread.is_some() {
return Ok(false);
}
let weak_inner = Arc::downgrade(this);
let handle = std::thread::Builder::new()
.name("tantivy-warm-gc".to_owned())
.spawn(|| Self::gc_loop(weak_inner))
.map_err(|_| {
TantivyError::SystemError("Failed to spawn warmer GC thread".to_owned())
})?;
self.gc_thread = Some(handle);
Ok(true)
}
/// Every [GC_INTERVAL] attempt to GC, with panics caught and logged using
/// [std::panic::catch_unwind].
fn gc_loop(inner: Weak<Mutex<WarmingStateInner>>) {
for _ in crossbeam_channel::tick(GC_INTERVAL) {
if let Some(inner) = inner.upgrade() {
// rely on deterministic gc in tests
#[cfg(not(test))]
if let Err(err) = std::panic::catch_unwind(|| inner.lock().unwrap().gc_maybe()) {
error!("Panic in Warmer GC {:?}", err);
}
// avoid unused var warning in tests
#[cfg(test)]
drop(inner);
}
}
}
}
fn warming_executor(num_threads: usize) -> crate::Result<Executor> {
if num_threads <= 1 {
Ok(Executor::single_thread())
} else {
Executor::multi_thread(num_threads, "tantivy-warm-")
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::sync::atomic::{self, AtomicUsize};
use std::sync::{Arc, RwLock, Weak};
use super::Warmer;
use crate::core::searcher::SearcherGeneration;
use crate::directory::RamDirectory;
use crate::schema::{Schema, INDEXED};
use crate::{Index, IndexSettings, ReloadPolicy, Searcher, SegmentId};
#[derive(Default)]
struct TestWarmer {
active_segment_ids: RwLock<HashSet<SegmentId>>,
warm_calls: AtomicUsize,
gc_calls: AtomicUsize,
}
impl TestWarmer {
fn live_segment_ids(&self) -> HashSet<SegmentId> {
self.active_segment_ids.read().unwrap().clone()
}
fn warm_calls(&self) -> usize {
self.warm_calls.load(atomic::Ordering::Acquire)
}
fn gc_calls(&self) -> usize {
self.gc_calls.load(atomic::Ordering::Acquire)
}
fn verify(
&self,
expected_warm_calls: usize,
expected_gc_calls: usize,
expected_segment_ids: HashSet<SegmentId>,
) {
assert_eq!(self.warm_calls(), expected_warm_calls);
assert_eq!(self.gc_calls(), expected_gc_calls);
assert_eq!(self.live_segment_ids(), expected_segment_ids);
}
}
impl Warmer for TestWarmer {
fn warm(&self, searcher: &crate::Searcher) -> crate::Result<()> {
self.warm_calls.fetch_add(1, atomic::Ordering::SeqCst);
for reader in searcher.segment_readers() {
self.active_segment_ids
.write()
.unwrap()
.insert(reader.segment_id());
}
Ok(())
}
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]) {
self.gc_calls
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
let active_segment_ids = live_generations
.iter()
.flat_map(|searcher_generation| searcher_generation.segments().keys().copied())
.collect();
*self.active_segment_ids.write().unwrap() = active_segment_ids;
}
}
fn segment_ids(searcher: &Searcher) -> HashSet<SegmentId> {
searcher
.segment_readers()
.iter()
.map(|reader| reader.segment_id())
.collect()
}
fn test_warming(num_warming_threads: usize) -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("pk", INDEXED);
let schema = schema_builder.build();
let directory = RamDirectory::create();
let index = Index::create(directory, schema, IndexSettings::default())?;
let num_writer_threads = 4;
let mut writer = index
.writer_with_num_threads(num_writer_threads, 25_000_000)
.unwrap();
for i in 0u64..1000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
let warmer1 = Arc::new(TestWarmer::default());
let warmer2 = Arc::new(TestWarmer::default());
warmer1.verify(0, 0, HashSet::new());
warmer2.verify(0, 0, HashSet::new());
let num_searchers = 4;
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.num_warming_threads(num_warming_threads)
.num_searchers(num_searchers)
.warmers(vec![
Arc::downgrade(&warmer1) as Weak<dyn Warmer>,
Arc::downgrade(&warmer2) as Weak<dyn Warmer>,
])
.try_into()?;
let warming_state = &reader.inner.warming_state;
let searcher = reader.searcher();
assert!(
!warming_state.gc_maybe(),
"no GC after first searcher generation"
);
warmer1.verify(1, 0, segment_ids(&searcher));
warmer2.verify(1, 0, segment_ids(&searcher));
assert_eq!(searcher.num_docs(), 1000);
for i in 1000u64..2000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
writer.wait_merging_threads()?;
drop(warmer1);
let old_searcher = searcher;
reader.reload()?;
assert!(!warming_state.gc_maybe(), "old searcher still around");
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2000);
warmer2.verify(
2,
0,
segment_ids(&old_searcher)
.union(&segment_ids(&searcher))
.copied()
.collect(),
);
drop(old_searcher);
for _ in 0..num_searchers {
// make sure the old searcher is dropped by the pool too
let _ = reader.searcher();
}
assert!(warming_state.gc_maybe(), "old searcher dropped");
warmer2.verify(2, 1, segment_ids(&searcher));
Ok(())
}
#[test]
fn warming_single_thread() -> crate::Result<()> {
test_warming(1)
}
#[test]
fn warming_four_threads() -> crate::Result<()> {
test_warming(4)
}
} | }))))
} | random_line_split |
warming.rs | use std::collections::HashSet;
use std::ops::Deref;
use std::sync::{Arc, Mutex, Weak};
use std::thread::JoinHandle;
use std::time::Duration;
use crate::{Executor, Inventory, Searcher, SearcherGeneration, TantivyError};
pub const GC_INTERVAL: Duration = Duration::from_secs(1);
/// `Warmer` can be used to maintain segment-level state e.g. caches.
///
/// They must be registered with the [super::IndexReaderBuilder].
pub trait Warmer: Sync + Send {
/// Perform any warming work using the provided [Searcher].
fn warm(&self, searcher: &Searcher) -> crate::Result<()>;
/// Discards internal state for any [SearcherGeneration] not provided.
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]);
}
/// Warming-related state with interior mutability.
#[derive(Clone)]
pub(crate) struct WarmingState(Arc<Mutex<WarmingStateInner>>);
impl WarmingState {
pub fn new(
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
) -> crate::Result<Self> {
Ok(Self(Arc::new(Mutex::new(WarmingStateInner {
num_warming_threads,
warmers,
gc_thread: None,
warmed_generation_ids: Default::default(),
searcher_generation_inventory,
}))))
}
/// Start tracking a new generation of [Searcher], and [Warmer::warm] it if there are active
/// warmers.
///
/// A background GC thread for [Warmer::garbage_collect] calls is uniquely created if there are
/// active warmers.
pub fn warm_new_searcher_generation(&self, searcher: &Searcher) -> crate::Result<()> {
self.0
.lock()
.unwrap()
.warm_new_searcher_generation(searcher, &self.0)
}
#[cfg(test)]
fn gc_maybe(&self) -> bool {
self.0.lock().unwrap().gc_maybe()
}
}
struct WarmingStateInner {
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
gc_thread: Option<JoinHandle<()>>,
// Contains all generations that have been warmed up.
// This list is used to avoid triggers the individual Warmer GCs
// if no warmed generation needs to be collected.
warmed_generation_ids: HashSet<u64>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
}
impl WarmingStateInner {
/// Start tracking provided searcher as an exemplar of a new generation.
/// If there are active warmers, warm them with the provided searcher, and kick background GC
/// thread if it has not yet been kicked. Otherwise, prune state for dropped searcher
/// generations inline.
fn | (
&mut self,
searcher: &Searcher,
this: &Arc<Mutex<Self>>,
) -> crate::Result<()> {
let warmers = self.pruned_warmers();
// Avoid threads (warming as well as background GC) if there are no warmers
if warmers.is_empty() {
return Ok(());
}
self.start_gc_thread_maybe(this)?;
self.warmed_generation_ids
.insert(searcher.generation().generation_id());
warming_executor(self.num_warming_threads.min(warmers.len()))?
.map(|warmer| warmer.warm(searcher), warmers.into_iter())?;
Ok(())
}
/// Attempt to upgrade the weak Warmer references, pruning those which cannot be upgraded.
/// Return the strong references.
fn pruned_warmers(&mut self) -> Vec<Arc<dyn Warmer>> {
let strong_warmers = self
.warmers
.iter()
.flat_map(|weak_warmer| weak_warmer.upgrade())
.collect::<Vec<_>>();
self.warmers = strong_warmers.iter().map(Arc::downgrade).collect();
strong_warmers
}
/// [Warmer::garbage_collect] active warmers if some searcher generation is observed to have
/// been dropped.
fn gc_maybe(&mut self) -> bool {
let live_generations = self.searcher_generation_inventory.list();
let live_generation_ids: HashSet<u64> = live_generations
.iter()
.map(|searcher_generation| searcher_generation.generation_id())
.collect();
let gc_not_required = self
.warmed_generation_ids
.iter()
.all(|warmed_up_generation| live_generation_ids.contains(warmed_up_generation));
if gc_not_required {
return false;
}
let live_generation_refs = live_generations
.iter()
.map(Deref::deref)
.collect::<Vec<_>>();
for warmer in self.pruned_warmers() {
warmer.garbage_collect(&live_generation_refs);
}
self.warmed_generation_ids = live_generation_ids;
true
}
/// Start GC thread if one has not already been started.
fn start_gc_thread_maybe(&mut self, this: &Arc<Mutex<Self>>) -> crate::Result<bool> {
if self.gc_thread.is_some() {
return Ok(false);
}
let weak_inner = Arc::downgrade(this);
let handle = std::thread::Builder::new()
.name("tantivy-warm-gc".to_owned())
.spawn(|| Self::gc_loop(weak_inner))
.map_err(|_| {
TantivyError::SystemError("Failed to spawn warmer GC thread".to_owned())
})?;
self.gc_thread = Some(handle);
Ok(true)
}
/// Every [GC_INTERVAL] attempt to GC, with panics caught and logged using
/// [std::panic::catch_unwind].
fn gc_loop(inner: Weak<Mutex<WarmingStateInner>>) {
for _ in crossbeam_channel::tick(GC_INTERVAL) {
if let Some(inner) = inner.upgrade() {
// rely on deterministic gc in tests
#[cfg(not(test))]
if let Err(err) = std::panic::catch_unwind(|| inner.lock().unwrap().gc_maybe()) {
error!("Panic in Warmer GC {:?}", err);
}
// avoid unused var warning in tests
#[cfg(test)]
drop(inner);
}
}
}
}
fn warming_executor(num_threads: usize) -> crate::Result<Executor> {
if num_threads <= 1 {
Ok(Executor::single_thread())
} else {
Executor::multi_thread(num_threads, "tantivy-warm-")
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::sync::atomic::{self, AtomicUsize};
use std::sync::{Arc, RwLock, Weak};
use super::Warmer;
use crate::core::searcher::SearcherGeneration;
use crate::directory::RamDirectory;
use crate::schema::{Schema, INDEXED};
use crate::{Index, IndexSettings, ReloadPolicy, Searcher, SegmentId};
#[derive(Default)]
struct TestWarmer {
active_segment_ids: RwLock<HashSet<SegmentId>>,
warm_calls: AtomicUsize,
gc_calls: AtomicUsize,
}
impl TestWarmer {
fn live_segment_ids(&self) -> HashSet<SegmentId> {
self.active_segment_ids.read().unwrap().clone()
}
fn warm_calls(&self) -> usize {
self.warm_calls.load(atomic::Ordering::Acquire)
}
fn gc_calls(&self) -> usize {
self.gc_calls.load(atomic::Ordering::Acquire)
}
fn verify(
&self,
expected_warm_calls: usize,
expected_gc_calls: usize,
expected_segment_ids: HashSet<SegmentId>,
) {
assert_eq!(self.warm_calls(), expected_warm_calls);
assert_eq!(self.gc_calls(), expected_gc_calls);
assert_eq!(self.live_segment_ids(), expected_segment_ids);
}
}
impl Warmer for TestWarmer {
fn warm(&self, searcher: &crate::Searcher) -> crate::Result<()> {
self.warm_calls.fetch_add(1, atomic::Ordering::SeqCst);
for reader in searcher.segment_readers() {
self.active_segment_ids
.write()
.unwrap()
.insert(reader.segment_id());
}
Ok(())
}
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]) {
self.gc_calls
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
let active_segment_ids = live_generations
.iter()
.flat_map(|searcher_generation| searcher_generation.segments().keys().copied())
.collect();
*self.active_segment_ids.write().unwrap() = active_segment_ids;
}
}
fn segment_ids(searcher: &Searcher) -> HashSet<SegmentId> {
searcher
.segment_readers()
.iter()
.map(|reader| reader.segment_id())
.collect()
}
fn test_warming(num_warming_threads: usize) -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("pk", INDEXED);
let schema = schema_builder.build();
let directory = RamDirectory::create();
let index = Index::create(directory, schema, IndexSettings::default())?;
let num_writer_threads = 4;
let mut writer = index
.writer_with_num_threads(num_writer_threads, 25_000_000)
.unwrap();
for i in 0u64..1000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
let warmer1 = Arc::new(TestWarmer::default());
let warmer2 = Arc::new(TestWarmer::default());
warmer1.verify(0, 0, HashSet::new());
warmer2.verify(0, 0, HashSet::new());
let num_searchers = 4;
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.num_warming_threads(num_warming_threads)
.num_searchers(num_searchers)
.warmers(vec![
Arc::downgrade(&warmer1) as Weak<dyn Warmer>,
Arc::downgrade(&warmer2) as Weak<dyn Warmer>,
])
.try_into()?;
let warming_state = &reader.inner.warming_state;
let searcher = reader.searcher();
assert!(
!warming_state.gc_maybe(),
"no GC after first searcher generation"
);
warmer1.verify(1, 0, segment_ids(&searcher));
warmer2.verify(1, 0, segment_ids(&searcher));
assert_eq!(searcher.num_docs(), 1000);
for i in 1000u64..2000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
writer.wait_merging_threads()?;
drop(warmer1);
let old_searcher = searcher;
reader.reload()?;
assert!(!warming_state.gc_maybe(), "old searcher still around");
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2000);
warmer2.verify(
2,
0,
segment_ids(&old_searcher)
.union(&segment_ids(&searcher))
.copied()
.collect(),
);
drop(old_searcher);
for _ in 0..num_searchers {
// make sure the old searcher is dropped by the pool too
let _ = reader.searcher();
}
assert!(warming_state.gc_maybe(), "old searcher dropped");
warmer2.verify(2, 1, segment_ids(&searcher));
Ok(())
}
#[test]
fn warming_single_thread() -> crate::Result<()> {
test_warming(1)
}
#[test]
fn warming_four_threads() -> crate::Result<()> {
test_warming(4)
}
}
| warm_new_searcher_generation | identifier_name |
warming.rs | use std::collections::HashSet;
use std::ops::Deref;
use std::sync::{Arc, Mutex, Weak};
use std::thread::JoinHandle;
use std::time::Duration;
use crate::{Executor, Inventory, Searcher, SearcherGeneration, TantivyError};
pub const GC_INTERVAL: Duration = Duration::from_secs(1);
/// `Warmer` can be used to maintain segment-level state e.g. caches.
///
/// They must be registered with the [super::IndexReaderBuilder].
pub trait Warmer: Sync + Send {
/// Perform any warming work using the provided [Searcher].
fn warm(&self, searcher: &Searcher) -> crate::Result<()>;
/// Discards internal state for any [SearcherGeneration] not provided.
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]);
}
/// Warming-related state with interior mutability.
#[derive(Clone)]
pub(crate) struct WarmingState(Arc<Mutex<WarmingStateInner>>);
impl WarmingState {
pub fn new(
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
) -> crate::Result<Self> {
Ok(Self(Arc::new(Mutex::new(WarmingStateInner {
num_warming_threads,
warmers,
gc_thread: None,
warmed_generation_ids: Default::default(),
searcher_generation_inventory,
}))))
}
/// Start tracking a new generation of [Searcher], and [Warmer::warm] it if there are active
/// warmers.
///
/// A background GC thread for [Warmer::garbage_collect] calls is uniquely created if there are
/// active warmers.
pub fn warm_new_searcher_generation(&self, searcher: &Searcher) -> crate::Result<()> {
self.0
.lock()
.unwrap()
.warm_new_searcher_generation(searcher, &self.0)
}
#[cfg(test)]
fn gc_maybe(&self) -> bool {
self.0.lock().unwrap().gc_maybe()
}
}
struct WarmingStateInner {
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
gc_thread: Option<JoinHandle<()>>,
// Contains all generations that have been warmed up.
// This list is used to avoid triggers the individual Warmer GCs
// if no warmed generation needs to be collected.
warmed_generation_ids: HashSet<u64>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
}
impl WarmingStateInner {
/// Start tracking provided searcher as an exemplar of a new generation.
/// If there are active warmers, warm them with the provided searcher, and kick background GC
/// thread if it has not yet been kicked. Otherwise, prune state for dropped searcher
/// generations inline.
fn warm_new_searcher_generation(
&mut self,
searcher: &Searcher,
this: &Arc<Mutex<Self>>,
) -> crate::Result<()> {
let warmers = self.pruned_warmers();
// Avoid threads (warming as well as background GC) if there are no warmers
if warmers.is_empty() {
return Ok(());
}
self.start_gc_thread_maybe(this)?;
self.warmed_generation_ids
.insert(searcher.generation().generation_id());
warming_executor(self.num_warming_threads.min(warmers.len()))?
.map(|warmer| warmer.warm(searcher), warmers.into_iter())?;
Ok(())
}
/// Attempt to upgrade the weak Warmer references, pruning those which cannot be upgraded.
/// Return the strong references.
fn pruned_warmers(&mut self) -> Vec<Arc<dyn Warmer>> {
let strong_warmers = self
.warmers
.iter()
.flat_map(|weak_warmer| weak_warmer.upgrade())
.collect::<Vec<_>>();
self.warmers = strong_warmers.iter().map(Arc::downgrade).collect();
strong_warmers
}
/// [Warmer::garbage_collect] active warmers if some searcher generation is observed to have
/// been dropped.
fn gc_maybe(&mut self) -> bool {
let live_generations = self.searcher_generation_inventory.list();
let live_generation_ids: HashSet<u64> = live_generations
.iter()
.map(|searcher_generation| searcher_generation.generation_id())
.collect();
let gc_not_required = self
.warmed_generation_ids
.iter()
.all(|warmed_up_generation| live_generation_ids.contains(warmed_up_generation));
if gc_not_required {
return false;
}
let live_generation_refs = live_generations
.iter()
.map(Deref::deref)
.collect::<Vec<_>>();
for warmer in self.pruned_warmers() {
warmer.garbage_collect(&live_generation_refs);
}
self.warmed_generation_ids = live_generation_ids;
true
}
/// Start GC thread if one has not already been started.
fn start_gc_thread_maybe(&mut self, this: &Arc<Mutex<Self>>) -> crate::Result<bool> {
if self.gc_thread.is_some() {
return Ok(false);
}
let weak_inner = Arc::downgrade(this);
let handle = std::thread::Builder::new()
.name("tantivy-warm-gc".to_owned())
.spawn(|| Self::gc_loop(weak_inner))
.map_err(|_| {
TantivyError::SystemError("Failed to spawn warmer GC thread".to_owned())
})?;
self.gc_thread = Some(handle);
Ok(true)
}
/// Every [GC_INTERVAL] attempt to GC, with panics caught and logged using
/// [std::panic::catch_unwind].
fn gc_loop(inner: Weak<Mutex<WarmingStateInner>>) {
for _ in crossbeam_channel::tick(GC_INTERVAL) {
if let Some(inner) = inner.upgrade() {
// rely on deterministic gc in tests
#[cfg(not(test))]
if let Err(err) = std::panic::catch_unwind(|| inner.lock().unwrap().gc_maybe()) {
error!("Panic in Warmer GC {:?}", err);
}
// avoid unused var warning in tests
#[cfg(test)]
drop(inner);
}
}
}
}
fn warming_executor(num_threads: usize) -> crate::Result<Executor> {
if num_threads <= 1 | else {
Executor::multi_thread(num_threads, "tantivy-warm-")
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::sync::atomic::{self, AtomicUsize};
use std::sync::{Arc, RwLock, Weak};
use super::Warmer;
use crate::core::searcher::SearcherGeneration;
use crate::directory::RamDirectory;
use crate::schema::{Schema, INDEXED};
use crate::{Index, IndexSettings, ReloadPolicy, Searcher, SegmentId};
#[derive(Default)]
struct TestWarmer {
active_segment_ids: RwLock<HashSet<SegmentId>>,
warm_calls: AtomicUsize,
gc_calls: AtomicUsize,
}
impl TestWarmer {
fn live_segment_ids(&self) -> HashSet<SegmentId> {
self.active_segment_ids.read().unwrap().clone()
}
fn warm_calls(&self) -> usize {
self.warm_calls.load(atomic::Ordering::Acquire)
}
fn gc_calls(&self) -> usize {
self.gc_calls.load(atomic::Ordering::Acquire)
}
fn verify(
&self,
expected_warm_calls: usize,
expected_gc_calls: usize,
expected_segment_ids: HashSet<SegmentId>,
) {
assert_eq!(self.warm_calls(), expected_warm_calls);
assert_eq!(self.gc_calls(), expected_gc_calls);
assert_eq!(self.live_segment_ids(), expected_segment_ids);
}
}
impl Warmer for TestWarmer {
fn warm(&self, searcher: &crate::Searcher) -> crate::Result<()> {
self.warm_calls.fetch_add(1, atomic::Ordering::SeqCst);
for reader in searcher.segment_readers() {
self.active_segment_ids
.write()
.unwrap()
.insert(reader.segment_id());
}
Ok(())
}
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]) {
self.gc_calls
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
let active_segment_ids = live_generations
.iter()
.flat_map(|searcher_generation| searcher_generation.segments().keys().copied())
.collect();
*self.active_segment_ids.write().unwrap() = active_segment_ids;
}
}
fn segment_ids(searcher: &Searcher) -> HashSet<SegmentId> {
searcher
.segment_readers()
.iter()
.map(|reader| reader.segment_id())
.collect()
}
fn test_warming(num_warming_threads: usize) -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("pk", INDEXED);
let schema = schema_builder.build();
let directory = RamDirectory::create();
let index = Index::create(directory, schema, IndexSettings::default())?;
let num_writer_threads = 4;
let mut writer = index
.writer_with_num_threads(num_writer_threads, 25_000_000)
.unwrap();
for i in 0u64..1000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
let warmer1 = Arc::new(TestWarmer::default());
let warmer2 = Arc::new(TestWarmer::default());
warmer1.verify(0, 0, HashSet::new());
warmer2.verify(0, 0, HashSet::new());
let num_searchers = 4;
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.num_warming_threads(num_warming_threads)
.num_searchers(num_searchers)
.warmers(vec![
Arc::downgrade(&warmer1) as Weak<dyn Warmer>,
Arc::downgrade(&warmer2) as Weak<dyn Warmer>,
])
.try_into()?;
let warming_state = &reader.inner.warming_state;
let searcher = reader.searcher();
assert!(
!warming_state.gc_maybe(),
"no GC after first searcher generation"
);
warmer1.verify(1, 0, segment_ids(&searcher));
warmer2.verify(1, 0, segment_ids(&searcher));
assert_eq!(searcher.num_docs(), 1000);
for i in 1000u64..2000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
writer.wait_merging_threads()?;
drop(warmer1);
let old_searcher = searcher;
reader.reload()?;
assert!(!warming_state.gc_maybe(), "old searcher still around");
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2000);
warmer2.verify(
2,
0,
segment_ids(&old_searcher)
.union(&segment_ids(&searcher))
.copied()
.collect(),
);
drop(old_searcher);
for _ in 0..num_searchers {
// make sure the old searcher is dropped by the pool too
let _ = reader.searcher();
}
assert!(warming_state.gc_maybe(), "old searcher dropped");
warmer2.verify(2, 1, segment_ids(&searcher));
Ok(())
}
#[test]
fn warming_single_thread() -> crate::Result<()> {
test_warming(1)
}
#[test]
fn warming_four_threads() -> crate::Result<()> {
test_warming(4)
}
}
| {
Ok(Executor::single_thread())
} | conditional_block |
main.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Core RAMP-UA model.
Created on Wed Apr 29 19:59:25 2020
@author: nick
"""
import sys
import os
os.environ['R_HOME'] = 'C:/Users/gy17m2a/AppData/Local/Programs/R/R-4.2.0' #path to your R installation
os.environ['R_USER'] = 'C:/ProgramData/Anaconda3/envs/analyse_results/Lib/site-packages/rpy2' #path depends on where you installed Python. Mine is the Anaconda distribution
sys.path.append("microsim") # This is only needed when testing. I'm so confused about the imports
sys.path.append("C:/users/gy17m2a/OneDrive - University of Leeds/Project/RAMP-UA-new/")
print(os.getcwd())
print(sys.path)
import multiprocessing
import pandas as pd
pd.set_option('display.expand_frame_repr', False) # Don't wrap lines when displaying DataFrames
# pd.set_option('display.width', 0) # Automatically find the best width
import os
import click # command-line interface
import pickle # to save data
from yaml import load, SafeLoader # pyyaml library for reading the parameters.yml file
from shutil import copyfile
from microsim.quant_api import QuantRampAPI
from microsim.population_initialisation import PopulationInitialisation
from microsim.microsim_model import Microsim
from microsim.opencl.ramp.run import run_opencl
from microsim.opencl.ramp.snapshot_convertor import SnapshotConvertor
from microsim.opencl.ramp.snapshot import Snapshot
from microsim.opencl.ramp.params import Params, IndividualHazardMultipliers, LocationHazardMultipliers
from microsim.initialisation_cache import InitialisationCache
from microsim.utilities import data_setup, unpack_data
# ********
# PROGRAM ENTRY POINT
# Uses 'click' library so that it can be run from the command line
# ********
@click.command()
@click.option('-p', '--parameters_file', default="./model_parameters/default.yml", type=click.Path(exists=True),
help="Parameters file to use to configure the model. Default: ./model_parameters/default.yml")
@click.option('-npf', '--no-parameters-file', is_flag=True,
help="Don't read a parameters file, use command line arguments instead")
@click.option('-init', '--initialise', is_flag=True,
help="Just initialise the model and create caches and snapshots. Dont' run it.")
@click.option('-i', '--iterations', default=10, help='Number of model iterations. 0 means just run the initialisation')
@click.option('-s', '--scenario', default="default", help="Name this scenario; output results will be put into a "
"directory with this name.")
@click.option('--data-dir', default="devon_data", help='Root directory to load data from')
@click.option('--output/--no-output', default=True,
help='Whether to generate output data (default yes).')
@click.option('--output-every-iteration/--no-output-every-iteration', default=False,
help='Whether to generate output data at every iteration rather than just at the end (default no).')
@click.option('--debug/--no-debug', default=False, help="Whether to run some more expensive checks (default no debug)")
@click.option('-r', '--repetitions', default=1, help="How many times to run the model (default 1)")
@click.option('-l', '--lockdown-file', default="google_mobility_lockdown_daily.csv",
help="Optionally read lockdown mobility data from a file (default use google mobility). To have no "
"lockdown pass an empty string, i.e. --lockdown-file='' ")
@click.option('-c', '--use-cache/--no-use-cache', default=True,
help="Whether to cache the population data initialisation")
@click.option('-ocl', '--opencl/--no-opencl', default=False, help="Run OpenCL model (runs in headless mode by default")
@click.option('-gui', '--opencl-gui/--no-opencl-gui', default=False,
help="Run the OpenCL model with GUI visualisation for OpenCL model")
@click.option('-gpu', '--opencl-gpu/--no-opencl-gpu', default=False,
help="Run OpenCL model on the GPU (if false then run using CPU")
def main(parameters_file, no_parameters_file, initialise, iterations, scenario, data_dir, output, output_every_iteration,
debug, repetitions, lockdown_file, use_cache, opencl, opencl_gui, opencl_gpu):
"""
Main function which runs the population initialisation, then chooses which model to run, either the Python/R
model or the OpenCL model
"""
# If we are running with opencl_gui then set opencl to True, so you only need to pass one flag
if opencl_gui:
opencl = True
# First see if we're reading a parameters file or using command-line arguments.
if no_parameters_file:
print("Not reading a parameters file")
else:
print(f"Reading parameters file: {parameters_file}. "
f"Any other model-related command-line arguments are being ignored")
with open(parameters_file, 'r') as f:
parameters = load(f, Loader=SafeLoader)
sim_params = parameters["microsim"] # Parameters for the dynamic microsim (python)
calibration_params = parameters["microsim_calibration"]
disease_params = parameters["disease"] # Parameters for the disease model (r)
# TODO Implement a more elegant way to set the parameters and pass them to the model. E.g.:
# self.params, self.params_changed = Model._init_kwargs(params, kwargs)
# [setattr(self, key, value) for key, value in self.params.items()]
# Utility parameters
scenario = sim_params["scenario"]
iterations = sim_params["iterations"]
data_dir = sim_params["data-dir"]
output = sim_params["output"]
output_every_iteration = sim_params["output-every-iteration"]
debug = sim_params["debug"]
repetitions = sim_params["repetitions"]
lockdown_file = sim_params["lockdown-file"]
# Check the parameters are sensible
if iterations < 1:
raise ValueError("Iterations must be > 1. If you want to just initialise the model and then exit, use"
"the --initialise flag")
if repetitions < 1:
raise ValueError("Repetitions must be greater than 0")
if (not output) and output_every_iteration:
raise ValueError("Can't choose to not output any data (output=False) but also write the data at every "
"iteration (output_every_iteration=True)")
print(f"Running model with the following parameters:\n"
f"\tParameters file: {parameters_file}\n"
f"\tScenario directory: {scenario}\n"
f"\tInitialise (and then exit?): {initialise}\n"
f"\tNumber of iterations: {iterations}\n"
f"\tData dir: {data_dir}\n"
f"\tOutputting results?: {output}\n"
f"\tOutputting results at every iteration?: {output_every_iteration}\n"
f"\tDebug mode?: {debug}\n"
f"\tNumber of repetitions: {repetitions}\n"
f"\tLockdown file: {lockdown_file}\n",
f"\tUse cache?: {use_cache}\n",
f"\tUse OpenCL version?: {opencl}\n",
f"\tUse OpenCL GUI?: {opencl_gui}\n",
f"\tUse OpenCL GPU for processing?: {opencl_gpu}\n",
f"\tCalibration parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(calibration_params)}\n",
f"\tDisease parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(disease_params)}\n")
# To fix file path issues, use absolute/full path at all times
# Pick either: get working directory (if user starts this script in place, or set working directory
# Option A: copy current working directory:
base_dir = os.getcwd() # get current directory
data_dir = os.path.join(base_dir, data_dir)
r_script_dir = os.path.join(base_dir, "R", "py_int")
### section for fetching data
if not os.path.isdir(data_dir):
print(f"No data directory detected.")
if os.path.isfile(data_dir + ".tar.gz"):
print(f"An archive file matching the name of the data directory has been detected!")
print(f"Unpacking this archive file now.")
unpack_data(data_dir + ".tar.gz")
else:
print(f"{data_dir} does not exist. Downloading devon_data.")
data_setup()
# Temporarily only want to use Devon MSOAs
# devon_msoas = pd.read_csv(os.path.join(data_dir, "devon_msoas.csv"), header=None,
# names=["x", "y", "Num", "Code", "Desc"])
# Prepare the QUANT api (for estimating school and retail destinations)
# we only need 1 QuantRampAPI object even if we do multiple iterations
# the quant_object object will be called by each microsim object
quant_path = os.path.join(data_dir, "QUANT_RAMP")
if not os.path.isdir(quant_path):
raise Exception("QUANT directory does not exist, please check input")
quant_object = QuantRampAPI(quant_path)
# args for population initialisation
population_args = {"data_dir": data_dir, "debug": debug,
"quant_object": quant_object}
# args for Python/R Microsim. Use same arguments whether running 1 repetition or many
msim_args = {"data_dir": data_dir, "r_script_dir": r_script_dir, "scen_dir": scenario, "output": output,
"output_every_iteration": output_every_iteration}
if not no_parameters_file: # When using a parameters file, include the calibration parameters
msim_args.update(**calibration_params) # python calibration parameters are unpacked now
# Also read the R calibration parameters (this is a separate section in the .yml file)
if disease_params is not None:
# (If the 'disease_params' section is included but has no calibration variables then we want to ignore it -
# it will be turned into an empty dictionary by the Microsim constructor)
msim_args["disease_params"] = disease_params # R parameters kept as a dictionary and unpacked later
# Temporarily use dummy data for testing
# data_dir = os.path.join(base_dir, "dummy_data")
# m = Microsim(data_dir=data_dir, testing=True, output=output)
# cache to hold previously calculate population data
cache = InitialisationCache(cache_dir=os.path.join(data_dir, "caches"))
# generate new population dataframes if we aren't using the cache, or if the cache is empty
if not use_cache or cache.is_empty():
print(f'Reading population data because {"caching is disabled" if not use_cache else "the cache is empty"}')
population = PopulationInitialisation(**population_args)
individuals = population.individuals
activity_locations = population.activity_locations
# store in cache so we can load later
cache.store_in_cache(individuals, activity_locations)
else: # load from cache
print("Loading data from previous cache")
individuals, activity_locations = cache.read_from_cache()
# Calculate the time-activity multiplier (this is for implementing lockdown)
time_activity_multiplier = None
if lockdown_file != "":
print(f"Implementing a lockdown with time activities from {lockdown_file}")
time_activity_multiplier: pd.DataFrame = \
PopulationInitialisation.read_time_activity_multiplier(os.path.join(data_dir, lockdown_file))
# Select which model implementation to run
if opencl:
run_opencl_model(individuals, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
opencl_gui, opencl_gpu, use_cache, initialise, calibration_params, disease_params)
else:
# If -init flag set the don't run the model. Note for the opencl model this check needs to happen
# after the snapshots have been created in run_opencl_model
|
def run_opencl_model(individuals_df, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
use_gui, use_gpu, use_cache, initialise, calibration_params, disease_params):
snapshot_cache_filepath = base_dir + "/microsim/opencl/snapshots/cache.npz"
# Choose whether to load snapshot file from cache, or create a snapshot from population data
if not use_cache or not os.path.exists(snapshot_cache_filepath):
print("\nGenerating Snapshot for OpenCL model")
snapshot_converter = SnapshotConvertor(individuals_df, activity_locations, time_activity_multiplier, data_dir)
snapshot = snapshot_converter.generate_snapshot()
snapshot.save(snapshot_cache_filepath) # store snapshot in cache so we can load later
else: # load cached snapshot
snapshot = Snapshot.load_full_snapshot(path=snapshot_cache_filepath)
# set the random seed of the model
snapshot.seed_prngs(42)
# set params
if calibration_params is not None and disease_params is not None:
snapshot.update_params(create_params(calibration_params, disease_params))
if disease_params["improve_health"]:
print("Switching to healthier population")
snapshot.switch_to_healthier_population()
if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exiting")
return
run_mode = "GUI" if use_gui else "headless"
print(f"\nRunning OpenCL model in {run_mode} mode")
run_opencl(snapshot, iterations, data_dir, use_gui, use_gpu, num_seed_days=disease_params["seed_days"], quiet=False)
def run_python_model(individuals_df, activity_locations_df, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file):
print("\nRunning Python / R model")
# Create a microsim object
m = Microsim(individuals_df, activity_locations_df, time_activity_multiplier, **msim_args)
copyfile(parameters_file, os.path.join(m.SCEN_DIR, "parameters.yml"))
# Run the Python / R model
if repetitions == 1:
m.run(iterations, 0)
elif repetitions >= 1: # Run it multiple times on lots of cores
try:
with multiprocessing.Pool(processes=int(os.cpu_count())) as pool:
# Copy the model instance so we don't have to re-read the data each time
# (Use a generator so we don't need to store all the models in memory at once).
models = (Microsim._make_a_copy(m) for _ in range(repetitions))
pickle_out = open(os.path.join("Models_m.pickle"), "wb")
pickle.dump(m, pickle_out)
pickle_out.close()
# models = ( Microsim(msim_args) for _ in range(repetitions))
# Also need a list giving the number of iterations for each model (same for each model)
iters = (iterations for _ in range(repetitions))
repnr = (r for r in range(repetitions))
# Run the models by passing each model and the number of iterations
pool.starmap(_run_multicore, zip(models, iters, repnr))
finally: # Make sure they get closed (shouldn't be necessary)
pool.close()
def _run_multicore(m, iter, rep):
return m.run(iter, rep)
def create_params(calibration_params, disease_params):
current_risk_beta = disease_params["current_risk_beta"]
# NB: OpenCL model incorporates the current risk beta by pre-multiplying the hazard multipliers with it
location_hazard_multipliers = LocationHazardMultipliers(
retail=calibration_params["hazard_location_multipliers"]["Retail"] * current_risk_beta,
primary_school=calibration_params["hazard_location_multipliers"]["PrimarySchool"] * current_risk_beta,
secondary_school=calibration_params["hazard_location_multipliers"]["SecondarySchool"] * current_risk_beta,
home=calibration_params["hazard_location_multipliers"]["Home"] * current_risk_beta,
work=calibration_params["hazard_location_multipliers"]["Work"] * current_risk_beta,
)
individual_hazard_multipliers = IndividualHazardMultipliers(
presymptomatic=calibration_params["hazard_individual_multipliers"]["presymptomatic"],
asymptomatic=calibration_params["hazard_individual_multipliers"]["asymptomatic"],
symptomatic=calibration_params["hazard_individual_multipliers"]["symptomatic"]
)
obesity_multipliers = [disease_params["overweight"], disease_params["obesity_30"], disease_params["obesity_35"],
disease_params["obesity_40"]]
return Params(
location_hazard_multipliers=location_hazard_multipliers,
individual_hazard_multipliers=individual_hazard_multipliers,
obesity_multipliers=obesity_multipliers,
cvd_multiplier=disease_params["cvd"],
diabetes_multiplier=disease_params["diabetes"],
bloodpressure_multiplier=disease_params["bloodpressure"],
)
if __name__ == "__main__":
main()
print("End of program")
| if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exitting")
return
run_python_model(individuals, activity_locations, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file) | conditional_block |
main.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Core RAMP-UA model.
Created on Wed Apr 29 19:59:25 2020
@author: nick
"""
import sys
import os
os.environ['R_HOME'] = 'C:/Users/gy17m2a/AppData/Local/Programs/R/R-4.2.0' #path to your R installation
os.environ['R_USER'] = 'C:/ProgramData/Anaconda3/envs/analyse_results/Lib/site-packages/rpy2' #path depends on where you installed Python. Mine is the Anaconda distribution
sys.path.append("microsim") # This is only needed when testing. I'm so confused about the imports
sys.path.append("C:/users/gy17m2a/OneDrive - University of Leeds/Project/RAMP-UA-new/")
print(os.getcwd())
print(sys.path)
import multiprocessing
import pandas as pd
pd.set_option('display.expand_frame_repr', False) # Don't wrap lines when displaying DataFrames
# pd.set_option('display.width', 0) # Automatically find the best width
import os
import click # command-line interface
import pickle # to save data
from yaml import load, SafeLoader # pyyaml library for reading the parameters.yml file
from shutil import copyfile
from microsim.quant_api import QuantRampAPI
from microsim.population_initialisation import PopulationInitialisation
from microsim.microsim_model import Microsim
from microsim.opencl.ramp.run import run_opencl
from microsim.opencl.ramp.snapshot_convertor import SnapshotConvertor
from microsim.opencl.ramp.snapshot import Snapshot
from microsim.opencl.ramp.params import Params, IndividualHazardMultipliers, LocationHazardMultipliers
from microsim.initialisation_cache import InitialisationCache
from microsim.utilities import data_setup, unpack_data
# ********
# PROGRAM ENTRY POINT
# Uses 'click' library so that it can be run from the command line
# ********
@click.command()
@click.option('-p', '--parameters_file', default="./model_parameters/default.yml", type=click.Path(exists=True),
help="Parameters file to use to configure the model. Default: ./model_parameters/default.yml")
@click.option('-npf', '--no-parameters-file', is_flag=True,
help="Don't read a parameters file, use command line arguments instead")
@click.option('-init', '--initialise', is_flag=True,
help="Just initialise the model and create caches and snapshots. Dont' run it.")
@click.option('-i', '--iterations', default=10, help='Number of model iterations. 0 means just run the initialisation')
@click.option('-s', '--scenario', default="default", help="Name this scenario; output results will be put into a "
"directory with this name.")
@click.option('--data-dir', default="devon_data", help='Root directory to load data from')
@click.option('--output/--no-output', default=True,
help='Whether to generate output data (default yes).')
@click.option('--output-every-iteration/--no-output-every-iteration', default=False,
help='Whether to generate output data at every iteration rather than just at the end (default no).')
@click.option('--debug/--no-debug', default=False, help="Whether to run some more expensive checks (default no debug)")
@click.option('-r', '--repetitions', default=1, help="How many times to run the model (default 1)")
@click.option('-l', '--lockdown-file', default="google_mobility_lockdown_daily.csv",
help="Optionally read lockdown mobility data from a file (default use google mobility). To have no "
"lockdown pass an empty string, i.e. --lockdown-file='' ")
@click.option('-c', '--use-cache/--no-use-cache', default=True,
help="Whether to cache the population data initialisation")
@click.option('-ocl', '--opencl/--no-opencl', default=False, help="Run OpenCL model (runs in headless mode by default")
@click.option('-gui', '--opencl-gui/--no-opencl-gui', default=False,
help="Run the OpenCL model with GUI visualisation for OpenCL model")
@click.option('-gpu', '--opencl-gpu/--no-opencl-gpu', default=False,
help="Run OpenCL model on the GPU (if false then run using CPU")
def main(parameters_file, no_parameters_file, initialise, iterations, scenario, data_dir, output, output_every_iteration,
debug, repetitions, lockdown_file, use_cache, opencl, opencl_gui, opencl_gpu):
"""
Main function which runs the population initialisation, then chooses which model to run, either the Python/R
model or the OpenCL model
"""
# If we are running with opencl_gui then set opencl to True, so you only need to pass one flag
if opencl_gui:
opencl = True
# First see if we're reading a parameters file or using command-line arguments.
if no_parameters_file:
print("Not reading a parameters file")
else:
print(f"Reading parameters file: {parameters_file}. "
f"Any other model-related command-line arguments are being ignored")
with open(parameters_file, 'r') as f:
parameters = load(f, Loader=SafeLoader)
sim_params = parameters["microsim"] # Parameters for the dynamic microsim (python)
calibration_params = parameters["microsim_calibration"]
disease_params = parameters["disease"] # Parameters for the disease model (r)
# TODO Implement a more elegant way to set the parameters and pass them to the model. E.g.:
# self.params, self.params_changed = Model._init_kwargs(params, kwargs)
# [setattr(self, key, value) for key, value in self.params.items()]
# Utility parameters
scenario = sim_params["scenario"]
iterations = sim_params["iterations"]
data_dir = sim_params["data-dir"]
output = sim_params["output"]
output_every_iteration = sim_params["output-every-iteration"]
debug = sim_params["debug"]
repetitions = sim_params["repetitions"]
lockdown_file = sim_params["lockdown-file"]
# Check the parameters are sensible
if iterations < 1:
raise ValueError("Iterations must be > 1. If you want to just initialise the model and then exit, use"
"the --initialise flag")
if repetitions < 1:
raise ValueError("Repetitions must be greater than 0")
if (not output) and output_every_iteration:
raise ValueError("Can't choose to not output any data (output=False) but also write the data at every "
"iteration (output_every_iteration=True)")
print(f"Running model with the following parameters:\n"
f"\tParameters file: {parameters_file}\n"
f"\tScenario directory: {scenario}\n"
f"\tInitialise (and then exit?): {initialise}\n"
f"\tNumber of iterations: {iterations}\n"
f"\tData dir: {data_dir}\n"
f"\tOutputting results?: {output}\n"
f"\tOutputting results at every iteration?: {output_every_iteration}\n"
f"\tDebug mode?: {debug}\n"
f"\tNumber of repetitions: {repetitions}\n"
f"\tLockdown file: {lockdown_file}\n",
f"\tUse cache?: {use_cache}\n",
f"\tUse OpenCL version?: {opencl}\n",
f"\tUse OpenCL GUI?: {opencl_gui}\n",
f"\tUse OpenCL GPU for processing?: {opencl_gpu}\n",
f"\tCalibration parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(calibration_params)}\n",
f"\tDisease parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(disease_params)}\n")
# To fix file path issues, use absolute/full path at all times
# Pick either: get working directory (if user starts this script in place, or set working directory
# Option A: copy current working directory:
base_dir = os.getcwd() # get current directory
data_dir = os.path.join(base_dir, data_dir)
r_script_dir = os.path.join(base_dir, "R", "py_int")
### section for fetching data
if not os.path.isdir(data_dir):
print(f"No data directory detected.")
if os.path.isfile(data_dir + ".tar.gz"):
print(f"An archive file matching the name of the data directory has been detected!")
print(f"Unpacking this archive file now.")
unpack_data(data_dir + ".tar.gz")
else:
print(f"{data_dir} does not exist. Downloading devon_data.")
data_setup()
# Temporarily only want to use Devon MSOAs
# devon_msoas = pd.read_csv(os.path.join(data_dir, "devon_msoas.csv"), header=None,
# names=["x", "y", "Num", "Code", "Desc"])
# Prepare the QUANT api (for estimating school and retail destinations)
# we only need 1 QuantRampAPI object even if we do multiple iterations
# the quant_object object will be called by each microsim object
quant_path = os.path.join(data_dir, "QUANT_RAMP")
if not os.path.isdir(quant_path):
raise Exception("QUANT directory does not exist, please check input")
quant_object = QuantRampAPI(quant_path)
# args for population initialisation
population_args = {"data_dir": data_dir, "debug": debug,
"quant_object": quant_object}
# args for Python/R Microsim. Use same arguments whether running 1 repetition or many
msim_args = {"data_dir": data_dir, "r_script_dir": r_script_dir, "scen_dir": scenario, "output": output,
"output_every_iteration": output_every_iteration}
if not no_parameters_file: # When using a parameters file, include the calibration parameters
msim_args.update(**calibration_params) # python calibration parameters are unpacked now
# Also read the R calibration parameters (this is a separate section in the .yml file)
if disease_params is not None:
# (If the 'disease_params' section is included but has no calibration variables then we want to ignore it -
# it will be turned into an empty dictionary by the Microsim constructor)
msim_args["disease_params"] = disease_params # R parameters kept as a dictionary and unpacked later
# Temporarily use dummy data for testing
# data_dir = os.path.join(base_dir, "dummy_data")
# m = Microsim(data_dir=data_dir, testing=True, output=output)
# cache to hold previously calculate population data
cache = InitialisationCache(cache_dir=os.path.join(data_dir, "caches"))
# generate new population dataframes if we aren't using the cache, or if the cache is empty
if not use_cache or cache.is_empty():
print(f'Reading population data because {"caching is disabled" if not use_cache else "the cache is empty"}')
population = PopulationInitialisation(**population_args)
individuals = population.individuals
activity_locations = population.activity_locations
# store in cache so we can load later
cache.store_in_cache(individuals, activity_locations)
else: # load from cache
print("Loading data from previous cache")
individuals, activity_locations = cache.read_from_cache()
# Calculate the time-activity multiplier (this is for implementing lockdown)
time_activity_multiplier = None
if lockdown_file != "":
print(f"Implementing a lockdown with time activities from {lockdown_file}")
time_activity_multiplier: pd.DataFrame = \
PopulationInitialisation.read_time_activity_multiplier(os.path.join(data_dir, lockdown_file))
# Select which model implementation to run
if opencl:
run_opencl_model(individuals, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
opencl_gui, opencl_gpu, use_cache, initialise, calibration_params, disease_params)
else:
# If -init flag set the don't run the model. Note for the opencl model this check needs to happen
# after the snapshots have been created in run_opencl_model
if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exitting")
return
run_python_model(individuals, activity_locations, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file)
def run_opencl_model(individuals_df, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
use_gui, use_gpu, use_cache, initialise, calibration_params, disease_params):
snapshot_cache_filepath = base_dir + "/microsim/opencl/snapshots/cache.npz"
# Choose whether to load snapshot file from cache, or create a snapshot from population data
if not use_cache or not os.path.exists(snapshot_cache_filepath):
print("\nGenerating Snapshot for OpenCL model")
snapshot_converter = SnapshotConvertor(individuals_df, activity_locations, time_activity_multiplier, data_dir)
snapshot = snapshot_converter.generate_snapshot()
snapshot.save(snapshot_cache_filepath) # store snapshot in cache so we can load later
else: # load cached snapshot
snapshot = Snapshot.load_full_snapshot(path=snapshot_cache_filepath)
# set the random seed of the model
snapshot.seed_prngs(42)
# set params
if calibration_params is not None and disease_params is not None:
snapshot.update_params(create_params(calibration_params, disease_params))
if disease_params["improve_health"]:
print("Switching to healthier population")
snapshot.switch_to_healthier_population()
if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exiting")
return
run_mode = "GUI" if use_gui else "headless"
print(f"\nRunning OpenCL model in {run_mode} mode")
run_opencl(snapshot, iterations, data_dir, use_gui, use_gpu, num_seed_days=disease_params["seed_days"], quiet=False)
def run_python_model(individuals_df, activity_locations_df, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file):
print("\nRunning Python / R model")
# Create a microsim object
m = Microsim(individuals_df, activity_locations_df, time_activity_multiplier, **msim_args)
copyfile(parameters_file, os.path.join(m.SCEN_DIR, "parameters.yml"))
# Run the Python / R model
if repetitions == 1:
m.run(iterations, 0)
elif repetitions >= 1: # Run it multiple times on lots of cores
try:
with multiprocessing.Pool(processes=int(os.cpu_count())) as pool:
# Copy the model instance so we don't have to re-read the data each time
# (Use a generator so we don't need to store all the models in memory at once).
models = (Microsim._make_a_copy(m) for _ in range(repetitions))
pickle_out = open(os.path.join("Models_m.pickle"), "wb")
pickle.dump(m, pickle_out)
pickle_out.close()
# models = ( Microsim(msim_args) for _ in range(repetitions))
# Also need a list giving the number of iterations for each model (same for each model)
iters = (iterations for _ in range(repetitions))
repnr = (r for r in range(repetitions))
# Run the models by passing each model and the number of iterations
pool.starmap(_run_multicore, zip(models, iters, repnr))
finally: # Make sure they get closed (shouldn't be necessary)
pool.close()
def _run_multicore(m, iter, rep):
return m.run(iter, rep)
def | (calibration_params, disease_params):
current_risk_beta = disease_params["current_risk_beta"]
# NB: OpenCL model incorporates the current risk beta by pre-multiplying the hazard multipliers with it
location_hazard_multipliers = LocationHazardMultipliers(
retail=calibration_params["hazard_location_multipliers"]["Retail"] * current_risk_beta,
primary_school=calibration_params["hazard_location_multipliers"]["PrimarySchool"] * current_risk_beta,
secondary_school=calibration_params["hazard_location_multipliers"]["SecondarySchool"] * current_risk_beta,
home=calibration_params["hazard_location_multipliers"]["Home"] * current_risk_beta,
work=calibration_params["hazard_location_multipliers"]["Work"] * current_risk_beta,
)
individual_hazard_multipliers = IndividualHazardMultipliers(
presymptomatic=calibration_params["hazard_individual_multipliers"]["presymptomatic"],
asymptomatic=calibration_params["hazard_individual_multipliers"]["asymptomatic"],
symptomatic=calibration_params["hazard_individual_multipliers"]["symptomatic"]
)
obesity_multipliers = [disease_params["overweight"], disease_params["obesity_30"], disease_params["obesity_35"],
disease_params["obesity_40"]]
return Params(
location_hazard_multipliers=location_hazard_multipliers,
individual_hazard_multipliers=individual_hazard_multipliers,
obesity_multipliers=obesity_multipliers,
cvd_multiplier=disease_params["cvd"],
diabetes_multiplier=disease_params["diabetes"],
bloodpressure_multiplier=disease_params["bloodpressure"],
)
if __name__ == "__main__":
main()
print("End of program")
| create_params | identifier_name |
main.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Core RAMP-UA model.
Created on Wed Apr 29 19:59:25 2020
@author: nick
"""
import sys
import os
os.environ['R_HOME'] = 'C:/Users/gy17m2a/AppData/Local/Programs/R/R-4.2.0' #path to your R installation
os.environ['R_USER'] = 'C:/ProgramData/Anaconda3/envs/analyse_results/Lib/site-packages/rpy2' #path depends on where you installed Python. Mine is the Anaconda distribution
sys.path.append("microsim") # This is only needed when testing. I'm so confused about the imports
sys.path.append("C:/users/gy17m2a/OneDrive - University of Leeds/Project/RAMP-UA-new/")
print(os.getcwd())
print(sys.path)
import multiprocessing
import pandas as pd
pd.set_option('display.expand_frame_repr', False) # Don't wrap lines when displaying DataFrames
# pd.set_option('display.width', 0) # Automatically find the best width
import os
import click # command-line interface
import pickle # to save data
from yaml import load, SafeLoader # pyyaml library for reading the parameters.yml file
from shutil import copyfile
from microsim.quant_api import QuantRampAPI
from microsim.population_initialisation import PopulationInitialisation
from microsim.microsim_model import Microsim
from microsim.opencl.ramp.run import run_opencl
from microsim.opencl.ramp.snapshot_convertor import SnapshotConvertor
from microsim.opencl.ramp.snapshot import Snapshot
from microsim.opencl.ramp.params import Params, IndividualHazardMultipliers, LocationHazardMultipliers
from microsim.initialisation_cache import InitialisationCache
from microsim.utilities import data_setup, unpack_data
# ********
# PROGRAM ENTRY POINT
# Uses 'click' library so that it can be run from the command line
# ********
@click.command()
@click.option('-p', '--parameters_file', default="./model_parameters/default.yml", type=click.Path(exists=True),
help="Parameters file to use to configure the model. Default: ./model_parameters/default.yml")
@click.option('-npf', '--no-parameters-file', is_flag=True,
help="Don't read a parameters file, use command line arguments instead")
@click.option('-init', '--initialise', is_flag=True,
help="Just initialise the model and create caches and snapshots. Dont' run it.")
@click.option('-i', '--iterations', default=10, help='Number of model iterations. 0 means just run the initialisation')
@click.option('-s', '--scenario', default="default", help="Name this scenario; output results will be put into a "
"directory with this name.")
@click.option('--data-dir', default="devon_data", help='Root directory to load data from')
@click.option('--output/--no-output', default=True,
help='Whether to generate output data (default yes).')
@click.option('--output-every-iteration/--no-output-every-iteration', default=False,
help='Whether to generate output data at every iteration rather than just at the end (default no).')
@click.option('--debug/--no-debug', default=False, help="Whether to run some more expensive checks (default no debug)")
@click.option('-r', '--repetitions', default=1, help="How many times to run the model (default 1)")
@click.option('-l', '--lockdown-file', default="google_mobility_lockdown_daily.csv",
help="Optionally read lockdown mobility data from a file (default use google mobility). To have no "
"lockdown pass an empty string, i.e. --lockdown-file='' ")
@click.option('-c', '--use-cache/--no-use-cache', default=True,
help="Whether to cache the population data initialisation")
@click.option('-ocl', '--opencl/--no-opencl', default=False, help="Run OpenCL model (runs in headless mode by default")
@click.option('-gui', '--opencl-gui/--no-opencl-gui', default=False,
help="Run the OpenCL model with GUI visualisation for OpenCL model")
@click.option('-gpu', '--opencl-gpu/--no-opencl-gpu', default=False,
help="Run OpenCL model on the GPU (if false then run using CPU")
def main(parameters_file, no_parameters_file, initialise, iterations, scenario, data_dir, output, output_every_iteration,
debug, repetitions, lockdown_file, use_cache, opencl, opencl_gui, opencl_gpu):
"""
Main function which runs the population initialisation, then chooses which model to run, either the Python/R
model or the OpenCL model
"""
# If we are running with opencl_gui then set opencl to True, so you only need to pass one flag
if opencl_gui:
opencl = True
# First see if we're reading a parameters file or using command-line arguments.
if no_parameters_file:
print("Not reading a parameters file")
else:
print(f"Reading parameters file: {parameters_file}. "
f"Any other model-related command-line arguments are being ignored")
with open(parameters_file, 'r') as f:
parameters = load(f, Loader=SafeLoader)
sim_params = parameters["microsim"] # Parameters for the dynamic microsim (python)
calibration_params = parameters["microsim_calibration"]
disease_params = parameters["disease"] # Parameters for the disease model (r)
# TODO Implement a more elegant way to set the parameters and pass them to the model. E.g.:
# self.params, self.params_changed = Model._init_kwargs(params, kwargs)
# [setattr(self, key, value) for key, value in self.params.items()]
# Utility parameters
scenario = sim_params["scenario"]
iterations = sim_params["iterations"]
data_dir = sim_params["data-dir"]
output = sim_params["output"]
output_every_iteration = sim_params["output-every-iteration"]
debug = sim_params["debug"]
repetitions = sim_params["repetitions"]
lockdown_file = sim_params["lockdown-file"]
# Check the parameters are sensible
if iterations < 1:
raise ValueError("Iterations must be > 1. If you want to just initialise the model and then exit, use"
"the --initialise flag")
if repetitions < 1:
raise ValueError("Repetitions must be greater than 0")
if (not output) and output_every_iteration:
raise ValueError("Can't choose to not output any data (output=False) but also write the data at every "
"iteration (output_every_iteration=True)")
print(f"Running model with the following parameters:\n"
f"\tParameters file: {parameters_file}\n"
f"\tScenario directory: {scenario}\n"
f"\tInitialise (and then exit?): {initialise}\n"
f"\tNumber of iterations: {iterations}\n"
f"\tData dir: {data_dir}\n"
f"\tOutputting results?: {output}\n"
f"\tOutputting results at every iteration?: {output_every_iteration}\n"
f"\tDebug mode?: {debug}\n"
f"\tNumber of repetitions: {repetitions}\n"
f"\tLockdown file: {lockdown_file}\n",
f"\tUse cache?: {use_cache}\n",
f"\tUse OpenCL version?: {opencl}\n",
f"\tUse OpenCL GUI?: {opencl_gui}\n",
f"\tUse OpenCL GPU for processing?: {opencl_gpu}\n",
f"\tCalibration parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(calibration_params)}\n",
f"\tDisease parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(disease_params)}\n")
# To fix file path issues, use absolute/full path at all times
# Pick either: get working directory (if user starts this script in place, or set working directory
# Option A: copy current working directory:
base_dir = os.getcwd() # get current directory
data_dir = os.path.join(base_dir, data_dir)
r_script_dir = os.path.join(base_dir, "R", "py_int")
### section for fetching data
if not os.path.isdir(data_dir):
print(f"No data directory detected.")
if os.path.isfile(data_dir + ".tar.gz"):
print(f"An archive file matching the name of the data directory has been detected!")
print(f"Unpacking this archive file now.")
unpack_data(data_dir + ".tar.gz")
else:
print(f"{data_dir} does not exist. Downloading devon_data.")
data_setup()
# Temporarily only want to use Devon MSOAs
# devon_msoas = pd.read_csv(os.path.join(data_dir, "devon_msoas.csv"), header=None,
# names=["x", "y", "Num", "Code", "Desc"])
# Prepare the QUANT api (for estimating school and retail destinations)
# we only need 1 QuantRampAPI object even if we do multiple iterations
# the quant_object object will be called by each microsim object
quant_path = os.path.join(data_dir, "QUANT_RAMP")
if not os.path.isdir(quant_path):
raise Exception("QUANT directory does not exist, please check input")
quant_object = QuantRampAPI(quant_path)
# args for population initialisation
population_args = {"data_dir": data_dir, "debug": debug,
"quant_object": quant_object}
# args for Python/R Microsim. Use same arguments whether running 1 repetition or many
msim_args = {"data_dir": data_dir, "r_script_dir": r_script_dir, "scen_dir": scenario, "output": output,
"output_every_iteration": output_every_iteration}
if not no_parameters_file: # When using a parameters file, include the calibration parameters
msim_args.update(**calibration_params) # python calibration parameters are unpacked now
# Also read the R calibration parameters (this is a separate section in the .yml file)
if disease_params is not None:
# (If the 'disease_params' section is included but has no calibration variables then we want to ignore it -
# it will be turned into an empty dictionary by the Microsim constructor)
msim_args["disease_params"] = disease_params # R parameters kept as a dictionary and unpacked later
# Temporarily use dummy data for testing
# data_dir = os.path.join(base_dir, "dummy_data")
# m = Microsim(data_dir=data_dir, testing=True, output=output)
# cache to hold previously calculate population data
cache = InitialisationCache(cache_dir=os.path.join(data_dir, "caches"))
# generate new population dataframes if we aren't using the cache, or if the cache is empty
if not use_cache or cache.is_empty():
print(f'Reading population data because {"caching is disabled" if not use_cache else "the cache is empty"}')
population = PopulationInitialisation(**population_args)
individuals = population.individuals
activity_locations = population.activity_locations
# store in cache so we can load later
cache.store_in_cache(individuals, activity_locations)
else: # load from cache
print("Loading data from previous cache")
individuals, activity_locations = cache.read_from_cache()
# Calculate the time-activity multiplier (this is for implementing lockdown)
time_activity_multiplier = None
if lockdown_file != "":
print(f"Implementing a lockdown with time activities from {lockdown_file}")
time_activity_multiplier: pd.DataFrame = \
PopulationInitialisation.read_time_activity_multiplier(os.path.join(data_dir, lockdown_file))
# Select which model implementation to run
if opencl:
run_opencl_model(individuals, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
opencl_gui, opencl_gpu, use_cache, initialise, calibration_params, disease_params)
else:
# If -init flag set the don't run the model. Note for the opencl model this check needs to happen
# after the snapshots have been created in run_opencl_model
if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exitting")
return
run_python_model(individuals, activity_locations, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file)
def run_opencl_model(individuals_df, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
use_gui, use_gpu, use_cache, initialise, calibration_params, disease_params):
snapshot_cache_filepath = base_dir + "/microsim/opencl/snapshots/cache.npz"
# Choose whether to load snapshot file from cache, or create a snapshot from population data
if not use_cache or not os.path.exists(snapshot_cache_filepath):
print("\nGenerating Snapshot for OpenCL model")
snapshot_converter = SnapshotConvertor(individuals_df, activity_locations, time_activity_multiplier, data_dir)
snapshot = snapshot_converter.generate_snapshot()
snapshot.save(snapshot_cache_filepath) # store snapshot in cache so we can load later
else: # load cached snapshot
snapshot = Snapshot.load_full_snapshot(path=snapshot_cache_filepath)
# set the random seed of the model
snapshot.seed_prngs(42)
# set params
if calibration_params is not None and disease_params is not None:
snapshot.update_params(create_params(calibration_params, disease_params))
if disease_params["improve_health"]:
print("Switching to healthier population")
snapshot.switch_to_healthier_population()
if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exiting")
return
run_mode = "GUI" if use_gui else "headless"
print(f"\nRunning OpenCL model in {run_mode} mode")
run_opencl(snapshot, iterations, data_dir, use_gui, use_gpu, num_seed_days=disease_params["seed_days"], quiet=False)
def run_python_model(individuals_df, activity_locations_df, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file):
print("\nRunning Python / R model")
# Create a microsim object
m = Microsim(individuals_df, activity_locations_df, time_activity_multiplier, **msim_args)
copyfile(parameters_file, os.path.join(m.SCEN_DIR, "parameters.yml"))
# Run the Python / R model
if repetitions == 1:
m.run(iterations, 0)
elif repetitions >= 1: # Run it multiple times on lots of cores
try:
with multiprocessing.Pool(processes=int(os.cpu_count())) as pool:
# Copy the model instance so we don't have to re-read the data each time
# (Use a generator so we don't need to store all the models in memory at once).
models = (Microsim._make_a_copy(m) for _ in range(repetitions))
pickle_out = open(os.path.join("Models_m.pickle"), "wb")
pickle.dump(m, pickle_out)
pickle_out.close()
# models = ( Microsim(msim_args) for _ in range(repetitions))
# Also need a list giving the number of iterations for each model (same for each model)
iters = (iterations for _ in range(repetitions))
repnr = (r for r in range(repetitions))
# Run the models by passing each model and the number of iterations
pool.starmap(_run_multicore, zip(models, iters, repnr))
finally: # Make sure they get closed (shouldn't be necessary)
pool.close()
def _run_multicore(m, iter, rep):
return m.run(iter, rep)
def create_params(calibration_params, disease_params):
|
if __name__ == "__main__":
main()
print("End of program")
| current_risk_beta = disease_params["current_risk_beta"]
# NB: OpenCL model incorporates the current risk beta by pre-multiplying the hazard multipliers with it
location_hazard_multipliers = LocationHazardMultipliers(
retail=calibration_params["hazard_location_multipliers"]["Retail"] * current_risk_beta,
primary_school=calibration_params["hazard_location_multipliers"]["PrimarySchool"] * current_risk_beta,
secondary_school=calibration_params["hazard_location_multipliers"]["SecondarySchool"] * current_risk_beta,
home=calibration_params["hazard_location_multipliers"]["Home"] * current_risk_beta,
work=calibration_params["hazard_location_multipliers"]["Work"] * current_risk_beta,
)
individual_hazard_multipliers = IndividualHazardMultipliers(
presymptomatic=calibration_params["hazard_individual_multipliers"]["presymptomatic"],
asymptomatic=calibration_params["hazard_individual_multipliers"]["asymptomatic"],
symptomatic=calibration_params["hazard_individual_multipliers"]["symptomatic"]
)
obesity_multipliers = [disease_params["overweight"], disease_params["obesity_30"], disease_params["obesity_35"],
disease_params["obesity_40"]]
return Params(
location_hazard_multipliers=location_hazard_multipliers,
individual_hazard_multipliers=individual_hazard_multipliers,
obesity_multipliers=obesity_multipliers,
cvd_multiplier=disease_params["cvd"],
diabetes_multiplier=disease_params["diabetes"],
bloodpressure_multiplier=disease_params["bloodpressure"],
) | identifier_body |
main.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Core RAMP-UA model.
Created on Wed Apr 29 19:59:25 2020
@author: nick
"""
import sys
import os
os.environ['R_HOME'] = 'C:/Users/gy17m2a/AppData/Local/Programs/R/R-4.2.0' #path to your R installation
os.environ['R_USER'] = 'C:/ProgramData/Anaconda3/envs/analyse_results/Lib/site-packages/rpy2' #path depends on where you installed Python. Mine is the Anaconda distribution
sys.path.append("microsim") # This is only needed when testing. I'm so confused about the imports
sys.path.append("C:/users/gy17m2a/OneDrive - University of Leeds/Project/RAMP-UA-new/")
print(os.getcwd())
print(sys.path)
import multiprocessing
import pandas as pd
pd.set_option('display.expand_frame_repr', False) # Don't wrap lines when displaying DataFrames
# pd.set_option('display.width', 0) # Automatically find the best width
import os
import click # command-line interface
import pickle # to save data
from yaml import load, SafeLoader # pyyaml library for reading the parameters.yml file
from shutil import copyfile
from microsim.quant_api import QuantRampAPI
from microsim.population_initialisation import PopulationInitialisation
from microsim.microsim_model import Microsim
from microsim.opencl.ramp.run import run_opencl
from microsim.opencl.ramp.snapshot_convertor import SnapshotConvertor
from microsim.opencl.ramp.snapshot import Snapshot
from microsim.opencl.ramp.params import Params, IndividualHazardMultipliers, LocationHazardMultipliers
from microsim.initialisation_cache import InitialisationCache
from microsim.utilities import data_setup, unpack_data
# ********
# PROGRAM ENTRY POINT
# Uses 'click' library so that it can be run from the command line
# ********
@click.command()
@click.option('-p', '--parameters_file', default="./model_parameters/default.yml", type=click.Path(exists=True),
help="Parameters file to use to configure the model. Default: ./model_parameters/default.yml")
@click.option('-npf', '--no-parameters-file', is_flag=True,
help="Don't read a parameters file, use command line arguments instead")
@click.option('-init', '--initialise', is_flag=True,
help="Just initialise the model and create caches and snapshots. Dont' run it.")
@click.option('-i', '--iterations', default=10, help='Number of model iterations. 0 means just run the initialisation')
@click.option('-s', '--scenario', default="default", help="Name this scenario; output results will be put into a "
"directory with this name.")
@click.option('--data-dir', default="devon_data", help='Root directory to load data from')
@click.option('--output/--no-output', default=True,
help='Whether to generate output data (default yes).')
@click.option('--output-every-iteration/--no-output-every-iteration', default=False,
help='Whether to generate output data at every iteration rather than just at the end (default no).')
@click.option('--debug/--no-debug', default=False, help="Whether to run some more expensive checks (default no debug)")
@click.option('-r', '--repetitions', default=1, help="How many times to run the model (default 1)")
@click.option('-l', '--lockdown-file', default="google_mobility_lockdown_daily.csv",
help="Optionally read lockdown mobility data from a file (default use google mobility). To have no "
"lockdown pass an empty string, i.e. --lockdown-file='' ")
@click.option('-c', '--use-cache/--no-use-cache', default=True,
help="Whether to cache the population data initialisation")
@click.option('-ocl', '--opencl/--no-opencl', default=False, help="Run OpenCL model (runs in headless mode by default")
@click.option('-gui', '--opencl-gui/--no-opencl-gui', default=False,
help="Run the OpenCL model with GUI visualisation for OpenCL model")
@click.option('-gpu', '--opencl-gpu/--no-opencl-gpu', default=False,
help="Run OpenCL model on the GPU (if false then run using CPU")
def main(parameters_file, no_parameters_file, initialise, iterations, scenario, data_dir, output, output_every_iteration,
debug, repetitions, lockdown_file, use_cache, opencl, opencl_gui, opencl_gpu):
"""
Main function which runs the population initialisation, then chooses which model to run, either the Python/R
model or the OpenCL model
"""
# If we are running with opencl_gui then set opencl to True, so you only need to pass one flag
if opencl_gui:
opencl = True
# First see if we're reading a parameters file or using command-line arguments.
if no_parameters_file:
print("Not reading a parameters file")
else:
print(f"Reading parameters file: {parameters_file}. "
f"Any other model-related command-line arguments are being ignored")
with open(parameters_file, 'r') as f:
parameters = load(f, Loader=SafeLoader)
sim_params = parameters["microsim"] # Parameters for the dynamic microsim (python)
calibration_params = parameters["microsim_calibration"]
disease_params = parameters["disease"] # Parameters for the disease model (r)
# TODO Implement a more elegant way to set the parameters and pass them to the model. E.g.:
# self.params, self.params_changed = Model._init_kwargs(params, kwargs)
# [setattr(self, key, value) for key, value in self.params.items()]
# Utility parameters
scenario = sim_params["scenario"]
iterations = sim_params["iterations"]
data_dir = sim_params["data-dir"]
output = sim_params["output"]
output_every_iteration = sim_params["output-every-iteration"]
debug = sim_params["debug"]
repetitions = sim_params["repetitions"]
lockdown_file = sim_params["lockdown-file"]
# Check the parameters are sensible
if iterations < 1:
raise ValueError("Iterations must be > 1. If you want to just initialise the model and then exit, use"
"the --initialise flag")
if repetitions < 1:
raise ValueError("Repetitions must be greater than 0")
if (not output) and output_every_iteration:
raise ValueError("Can't choose to not output any data (output=False) but also write the data at every "
"iteration (output_every_iteration=True)")
print(f"Running model with the following parameters:\n"
f"\tParameters file: {parameters_file}\n"
f"\tScenario directory: {scenario}\n"
f"\tInitialise (and then exit?): {initialise}\n"
f"\tNumber of iterations: {iterations}\n"
f"\tData dir: {data_dir}\n"
f"\tOutputting results?: {output}\n"
f"\tOutputting results at every iteration?: {output_every_iteration}\n"
f"\tDebug mode?: {debug}\n"
f"\tNumber of repetitions: {repetitions}\n"
f"\tLockdown file: {lockdown_file}\n",
f"\tUse cache?: {use_cache}\n",
f"\tUse OpenCL version?: {opencl}\n",
f"\tUse OpenCL GUI?: {opencl_gui}\n",
f"\tUse OpenCL GPU for processing?: {opencl_gpu}\n",
f"\tCalibration parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(calibration_params)}\n",
f"\tDisease parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(disease_params)}\n")
| # Option A: copy current working directory:
base_dir = os.getcwd() # get current directory
data_dir = os.path.join(base_dir, data_dir)
r_script_dir = os.path.join(base_dir, "R", "py_int")
### section for fetching data
if not os.path.isdir(data_dir):
print(f"No data directory detected.")
if os.path.isfile(data_dir + ".tar.gz"):
print(f"An archive file matching the name of the data directory has been detected!")
print(f"Unpacking this archive file now.")
unpack_data(data_dir + ".tar.gz")
else:
print(f"{data_dir} does not exist. Downloading devon_data.")
data_setup()
# Temporarily only want to use Devon MSOAs
# devon_msoas = pd.read_csv(os.path.join(data_dir, "devon_msoas.csv"), header=None,
# names=["x", "y", "Num", "Code", "Desc"])
# Prepare the QUANT api (for estimating school and retail destinations)
# we only need 1 QuantRampAPI object even if we do multiple iterations
# the quant_object object will be called by each microsim object
quant_path = os.path.join(data_dir, "QUANT_RAMP")
if not os.path.isdir(quant_path):
raise Exception("QUANT directory does not exist, please check input")
quant_object = QuantRampAPI(quant_path)
# args for population initialisation
population_args = {"data_dir": data_dir, "debug": debug,
"quant_object": quant_object}
# args for Python/R Microsim. Use same arguments whether running 1 repetition or many
msim_args = {"data_dir": data_dir, "r_script_dir": r_script_dir, "scen_dir": scenario, "output": output,
"output_every_iteration": output_every_iteration}
if not no_parameters_file: # When using a parameters file, include the calibration parameters
msim_args.update(**calibration_params) # python calibration parameters are unpacked now
# Also read the R calibration parameters (this is a separate section in the .yml file)
if disease_params is not None:
# (If the 'disease_params' section is included but has no calibration variables then we want to ignore it -
# it will be turned into an empty dictionary by the Microsim constructor)
msim_args["disease_params"] = disease_params # R parameters kept as a dictionary and unpacked later
# Temporarily use dummy data for testing
# data_dir = os.path.join(base_dir, "dummy_data")
# m = Microsim(data_dir=data_dir, testing=True, output=output)
# cache to hold previously calculate population data
cache = InitialisationCache(cache_dir=os.path.join(data_dir, "caches"))
# generate new population dataframes if we aren't using the cache, or if the cache is empty
if not use_cache or cache.is_empty():
print(f'Reading population data because {"caching is disabled" if not use_cache else "the cache is empty"}')
population = PopulationInitialisation(**population_args)
individuals = population.individuals
activity_locations = population.activity_locations
# store in cache so we can load later
cache.store_in_cache(individuals, activity_locations)
else: # load from cache
print("Loading data from previous cache")
individuals, activity_locations = cache.read_from_cache()
# Calculate the time-activity multiplier (this is for implementing lockdown)
time_activity_multiplier = None
if lockdown_file != "":
print(f"Implementing a lockdown with time activities from {lockdown_file}")
time_activity_multiplier: pd.DataFrame = \
PopulationInitialisation.read_time_activity_multiplier(os.path.join(data_dir, lockdown_file))
# Select which model implementation to run
if opencl:
run_opencl_model(individuals, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
opencl_gui, opencl_gpu, use_cache, initialise, calibration_params, disease_params)
else:
# If -init flag set the don't run the model. Note for the opencl model this check needs to happen
# after the snapshots have been created in run_opencl_model
if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exitting")
return
run_python_model(individuals, activity_locations, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file)
def run_opencl_model(individuals_df, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
use_gui, use_gpu, use_cache, initialise, calibration_params, disease_params):
snapshot_cache_filepath = base_dir + "/microsim/opencl/snapshots/cache.npz"
# Choose whether to load snapshot file from cache, or create a snapshot from population data
if not use_cache or not os.path.exists(snapshot_cache_filepath):
print("\nGenerating Snapshot for OpenCL model")
snapshot_converter = SnapshotConvertor(individuals_df, activity_locations, time_activity_multiplier, data_dir)
snapshot = snapshot_converter.generate_snapshot()
snapshot.save(snapshot_cache_filepath) # store snapshot in cache so we can load later
else: # load cached snapshot
snapshot = Snapshot.load_full_snapshot(path=snapshot_cache_filepath)
# set the random seed of the model
snapshot.seed_prngs(42)
# set params
if calibration_params is not None and disease_params is not None:
snapshot.update_params(create_params(calibration_params, disease_params))
if disease_params["improve_health"]:
print("Switching to healthier population")
snapshot.switch_to_healthier_population()
if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exiting")
return
run_mode = "GUI" if use_gui else "headless"
print(f"\nRunning OpenCL model in {run_mode} mode")
run_opencl(snapshot, iterations, data_dir, use_gui, use_gpu, num_seed_days=disease_params["seed_days"], quiet=False)
def run_python_model(individuals_df, activity_locations_df, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file):
print("\nRunning Python / R model")
# Create a microsim object
m = Microsim(individuals_df, activity_locations_df, time_activity_multiplier, **msim_args)
copyfile(parameters_file, os.path.join(m.SCEN_DIR, "parameters.yml"))
# Run the Python / R model
if repetitions == 1:
m.run(iterations, 0)
elif repetitions >= 1: # Run it multiple times on lots of cores
try:
with multiprocessing.Pool(processes=int(os.cpu_count())) as pool:
# Copy the model instance so we don't have to re-read the data each time
# (Use a generator so we don't need to store all the models in memory at once).
models = (Microsim._make_a_copy(m) for _ in range(repetitions))
pickle_out = open(os.path.join("Models_m.pickle"), "wb")
pickle.dump(m, pickle_out)
pickle_out.close()
# models = ( Microsim(msim_args) for _ in range(repetitions))
# Also need a list giving the number of iterations for each model (same for each model)
iters = (iterations for _ in range(repetitions))
repnr = (r for r in range(repetitions))
# Run the models by passing each model and the number of iterations
pool.starmap(_run_multicore, zip(models, iters, repnr))
finally: # Make sure they get closed (shouldn't be necessary)
pool.close()
def _run_multicore(m, iter, rep):
return m.run(iter, rep)
def create_params(calibration_params, disease_params):
current_risk_beta = disease_params["current_risk_beta"]
# NB: OpenCL model incorporates the current risk beta by pre-multiplying the hazard multipliers with it
location_hazard_multipliers = LocationHazardMultipliers(
retail=calibration_params["hazard_location_multipliers"]["Retail"] * current_risk_beta,
primary_school=calibration_params["hazard_location_multipliers"]["PrimarySchool"] * current_risk_beta,
secondary_school=calibration_params["hazard_location_multipliers"]["SecondarySchool"] * current_risk_beta,
home=calibration_params["hazard_location_multipliers"]["Home"] * current_risk_beta,
work=calibration_params["hazard_location_multipliers"]["Work"] * current_risk_beta,
)
individual_hazard_multipliers = IndividualHazardMultipliers(
presymptomatic=calibration_params["hazard_individual_multipliers"]["presymptomatic"],
asymptomatic=calibration_params["hazard_individual_multipliers"]["asymptomatic"],
symptomatic=calibration_params["hazard_individual_multipliers"]["symptomatic"]
)
obesity_multipliers = [disease_params["overweight"], disease_params["obesity_30"], disease_params["obesity_35"],
disease_params["obesity_40"]]
return Params(
location_hazard_multipliers=location_hazard_multipliers,
individual_hazard_multipliers=individual_hazard_multipliers,
obesity_multipliers=obesity_multipliers,
cvd_multiplier=disease_params["cvd"],
diabetes_multiplier=disease_params["diabetes"],
bloodpressure_multiplier=disease_params["bloodpressure"],
)
if __name__ == "__main__":
main()
print("End of program") | # To fix file path issues, use absolute/full path at all times
# Pick either: get working directory (if user starts this script in place, or set working directory | random_line_split |
main.rs | #![feature(test)]
#[macro_use] extern crate gfx;
extern crate gfx_window_glutin;
extern crate gfx_device_gl;
extern crate glutin;
extern crate rand;
extern crate failure;
#[macro_use] extern crate failure_derive;
extern crate image;
extern crate rusttype;
extern crate specs;
extern crate rayon;
#[macro_use] extern crate specs_derive;
extern crate num_integer;
#[macro_use] extern crate lazy_static;
extern crate serde;
extern crate serde_yaml;
extern crate cgmath;
#[macro_use] extern crate serde_derive;
#[cfg(test)]
extern crate test;
mod renderer;
mod comp;
mod input;
mod sys_control;
mod sys_health;
mod sys_phys;
mod sys_anim;
mod sys_lifetime;
mod sys_on_hit;
mod sys_pickup;
mod sys_death_drop;
mod sys_track_pos;
mod sys_match_anim;
mod sys_set_equipment;
mod vec;
mod ui;
mod camera;
mod math_util;
mod item;
mod inventory;
mod drop_tables;
mod asset_loader;
use comp::*;
use vec::*;
use specs::*;
use gfx::Device;
use gfx_window_glutin as gfx_glutin;
use glutin::{GlRequest, GlContext};
use glutin::Api::OpenGl;
use std::time;
use std::thread;
use rand::SeedableRng;
use renderer::get_asset_by_name;
pub struct CollisionMeta {
/// This normal points outwards from entity B to entity A (and is also used
/// to resolve circ - circ collisions)
/// Will be normalised.
#[allow(dead_code)]
normal: Vec32,
}
/// Lists pairs of collisions.
pub struct Collisions(Vec<(Entity, Entity, CollisionMeta)>);
pub struct DeltaTime(pub f32);
/// Vertex buffer for game objects
pub struct GameVertexBuffer(renderer::VertexBuffer);
/// Vertex buffer for terrain (tilesets). This is so we don't have to re-buffer
/// tilesets all the tiem, and means we don't have to implement perspective
/// frustum culling
pub struct TerrainVertexBuffer(renderer::VertexBuffer);
/// If true, we should update the terrain vertex buffer.
pub struct TerrainVertexBufferNeedsUpdate(bool);
/// Vertex buffer for UI objects (camera transform isn't applied)
pub struct UIVertexBuffer(renderer::VertexBuffer);
/// Entities that have been 'killed' and need to produce on-death effects. This
/// doesn't mean all deleted entities - it means alive characters have been
/// killed by combat or other effects.
pub struct KilledEntities(Vec<Entity>);
/// Empty specs::System to use in the dispatcher as a combiner for system
/// dependencies.
pub struct MarkerSys;
impl<'a> System<'a> for MarkerSys {
type SystemData = ();
fn run(&mut self, (): Self::SystemData) {}
}
/// Create the world and register all the components
fn create_world() -> specs::World {
let mut world = specs::World::new();
world.register::<Pos>();
world.register::<Vel>();
world.register::<PlayerControlled>();
world.register::<Tilemap>();
world.register::<AnimSprite>();
world.register::<StaticSprite>();
world.register::<CollCircle>();
world.register::<AISlime>();
world.register::<Hurt>();
world.register::<Health>();
world.register::<Lifetime>();
world.register::<Knockback>();
world.register::<HurtKnockbackDir>();
world.register::<Tint>();
world.register::<Rot>();
world.register::<Alliance>();
world.register::<FollowCamera>();
world.register::<Pickup>();
world.register::<Collector>();
world.register::<OnDeathDrop>();
world.register::<TrackPos>();
world.register::<MatchAnim>();
world.register::<Equipment>();
world
}
fn main() {
// Create the window
let mut events_loop = glutin::EventsLoop::new();
let windowbuilder = glutin::WindowBuilder::new()
.with_title("Triangle Example".to_string())
.with_dimensions(512, 512);
let contextbuilder = glutin::ContextBuilder::new()
.with_gl(GlRequest::Specific(OpenGl,(3, 3)));
let (window, mut device, mut factory, color_view, depth_view) =
gfx_glutin::init::<renderer::ColorFormat, renderer::DepthFormat>(
windowbuilder, contextbuilder, &events_loop);
// Create renderer
let (w, h) = window.get_inner_size().unwrap();
let (mut renderer, atlas) = renderer::Renderer::new(
&mut factory, color_view, depth_view, Default::default());
// Load items
item::load_item_definitions();
let camera = camera::Camera::new(w as f32, h as f32);
// Create the ECS world, and a test entity, plus trees
let mut world = create_world();
use specs::Builder;
// Player
let player = world.create_entity()
.with(Pos { pos: Vec32::new(32.0, 32.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Alliance::good())
.with(PlayerControlled::new())
.with(FollowCamera)
.with(Health::new(8, Hitmask(HITMASK_PLAYER)))
.with(Collector { magnet_radius: 64.0 })
.with(Equipment {
.. Default::default()
})
.with(CollCircle { r: 8.0, off: Vec32::zero(),
flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100.0,
4, get_asset_by_name("Human00Anim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Tree
world.create_entity()
.with(Pos { pos: Vec32::new(100.0, 100.0), z: 0.0 })
.with(CollCircle { r: 12.0, off: Vec32::zero(),
flags: COLL_SOLID | COLL_STATIC})
.with(StaticSprite { w: 64.0, h: 128.0,
sprite: get_asset_by_name("GreenTree00"),
flags: STATIC_SPRITE_UPRIGHT})
.build();
// Slime
world.create_entity()
.with(Pos { pos: Vec32::new(200.0, 200.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Health::new(4, Hitmask(HITMASK_ENEMY)))
.with(Hurt { damage: 2,
mask: Hitmask::default_enemy_attack(),
flags: 0 })
.with(Alliance::evil())
.with(OnDeathDrop {
drop_table: drop_tables::DropTableKey::Slime,
min_drops: 1,
max_drops: 3,
})
.with(AISlime { move_target: Vec32::new(200.0, 200.0),
attack_target: None,
charge_time: 0.0,
state: SlimeState::Idle })
.with(CollCircle { r: 8.0, off: Vec32::zero(), flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100000.0,
1, get_asset_by_name("SlimeAnim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Create tilemaps
for x in 0..10 {
for y in 0..10 {
world.create_entity()
.with(Pos { pos: Vec32::new(x as f32, y as f32), z: 0.0 })
.with(Tilemap { tileset: TilesetEnum::Grass,
data: [1u8; TILEMAP_SIZE * TILEMAP_SIZE] })
.build();
}
}
let mut inventory = inventory::Inventory::new();
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Money").unwrap(),
num: 10,
});
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Bronze Helmet").unwrap(),
num: 1,
});
let input_map = input::InputMap::new();
// Allocate cpu side v_buf
let v_buf = vec![Default::default(); renderer::V_BUF_SIZE];
// Add specs resources
world.add_resource(atlas);
world.add_resource(camera);
world.add_resource(DeltaTime(0.016));
world.add_resource(Collisions(Vec::with_capacity(128)));
world.add_resource::<ui::UIState>(Default::default());
world.add_resource(input::InputState::new());
world.add_resource(drop_tables::DropTableMap::new_standard_map());
world.add_resource(inventory);
world.add_resource(KilledEntities(Vec::new()));
world.add_resource(UIVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(GameVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBufferNeedsUpdate(true));
// Build dispatcher
let mut dispatcher = specs::DispatcherBuilder::new()
.with(sys_set_equipment::SetEquipmentSys, "set_equipment", &[])
.with(sys_lifetime::LifetimeSys, "lifetime", &[])
// Control
.with(ui::UIInputSystem, "ui_input", &[])
.with(sys_control::PlayerControllerSys, "player_controller", &[])
.with(sys_control::SlimeAISys, "slime_ai", &[])
.with(MarkerSys, "control", &["player_controller", "slime_ai", "ui_input"])
// Animation
.with(sys_anim::AnimSpriteSys, "anim_sprite", &["control"])
// Physics
.with(sys_phys::PhysSys::<CollCircle, CollCircle>::new(), "phys_circ_circ", &["player_controller"])
.with(MarkerSys, "phys", &["phys_circ_circ"])
.with(sys_track_pos::TrackPosSys, "track_pos", &["phys"])
.with(sys_match_anim::MatchAnimSys, "match_anim", &["phys"])
// Camera control
.with(camera::FollowCameraSys, "follow_camera", &["phys"])
// Pickups
.with(sys_pickup::PickupSys, "pickup", &["phys"])
// Combat
.with(sys_health::HealthSys, "health",
&["phys", "set_equipment"])
.with(sys_on_hit::KnockbackSys, "oh_knockback",
&["health", "set_equipment"])
.with(MarkerSys, "update",
&["phys", "anim_sprite", "health", "follow_camera",
"oh_knockback", "track_pos", "match_anim"])
// After-death effects
.with(sys_death_drop::OnDeathDropSys::new(
rand::rngs::StdRng::from_rng(
rand::thread_rng()).unwrap()),
"on_death_drop", &["update"])
// Paint
.with(renderer::TilemapPainter::new(), "tilemap_paint", &["update"])
.with(renderer::SpritePainter, "sprite_paint", &["update"])
.with(renderer::InventoryPainter, "ui_inventory_paint", &["update"])
.build();
dispatcher.setup(&mut world.res);
// Number of frames until we print another frame time
let mut fps_count_timer = 60;
loop {
let start = time::Instant::now();
// update input
{
let mut input_state = world.write_resource::<input::InputState>();
input_state.process_input(&input_map, &mut events_loop);
if input_state.should_close { break; } // Early return for speedy exit
// Update window size if needed
if input_state.window_dimensions_need_update |
}
// Update & paint the world
{
dispatcher.dispatch_seq(&mut world.res);
// Get the player position
let player_pos = world.read_storage::<Pos>().get(player).unwrap().clone();
let player_pos = [player_pos.pos.x, player_pos.z, player_pos.pos.y];
let mut ui_v_buf = world.write_resource::<UIVertexBuffer>();
let mut game_v_buf = world.write_resource::<GameVertexBuffer>();
let mut terrain_v_buf = world.write_resource::<TerrainVertexBuffer>();
let mut terrain_v_buf_needs_update =
world.write_resource::<TerrainVertexBufferNeedsUpdate>();
let camera = &world.read_resource::<camera::Camera>();
// Update buffers
renderer.update_buffer(&ui_v_buf.0, renderer::BufferType::UI);
renderer.update_buffer(&game_v_buf.0, renderer::BufferType::Game);
if terrain_v_buf_needs_update.0 {
renderer.update_buffer(&terrain_v_buf.0, renderer::BufferType::Terrain);
terrain_v_buf_needs_update.0 = false;
}
// Clear & render
renderer.clear();
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Terrain);
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Game);
renderer.clear_depth();
renderer.render_buffer(&camera, [0.0, 0.0, 0.0], renderer::BufferType::UI);
renderer.flush(&mut device);
window.swap_buffers().unwrap();
device.cleanup();
// Reset ECS state after rendering
// After painting, we need to clear the v_buf
ui_v_buf.0.size = 0;
game_v_buf.0.size = 0;
terrain_v_buf.0.size = 0;
// Clear collision list for next frame
let mut collisions = world.write_resource::<Collisions>();
collisions.0.clear();
let mut killed = world.write_resource::<KilledEntities>();
killed.0.clear();
}
// Actually delete all entities that need to be deleted
world.maintain();
// Calculate frame time
let elapsed = start.elapsed();
if fps_count_timer <= 0 {
println!("Time taken (millis): {:?}",
elapsed.as_secs() * 1000 + elapsed.subsec_millis() as u64);
fps_count_timer = 60;
}
fps_count_timer -= 1;
// Sleep until we hit 60fps. Vsync works until the window isn't being
// rendered, then we just consume CPU!
if elapsed.subsec_millis() < 17 && elapsed.as_secs() == 0 {
thread::sleep(time::Duration::from_millis(17) - elapsed);
}
}
}
| {
println!("Resizing window viewport");
renderer.update_window_size(&window);
} | conditional_block |
main.rs | #![feature(test)]
#[macro_use] extern crate gfx;
extern crate gfx_window_glutin;
extern crate gfx_device_gl;
extern crate glutin;
extern crate rand;
extern crate failure;
#[macro_use] extern crate failure_derive;
extern crate image;
extern crate rusttype;
extern crate specs;
extern crate rayon;
#[macro_use] extern crate specs_derive;
extern crate num_integer;
#[macro_use] extern crate lazy_static;
extern crate serde;
extern crate serde_yaml;
extern crate cgmath;
#[macro_use] extern crate serde_derive;
#[cfg(test)]
extern crate test;
mod renderer;
mod comp;
mod input;
mod sys_control;
mod sys_health;
mod sys_phys;
mod sys_anim;
mod sys_lifetime;
mod sys_on_hit;
mod sys_pickup;
mod sys_death_drop;
mod sys_track_pos;
mod sys_match_anim;
mod sys_set_equipment;
mod vec;
mod ui;
mod camera;
mod math_util;
mod item;
mod inventory;
mod drop_tables;
mod asset_loader;
use comp::*;
use vec::*;
use specs::*;
use gfx::Device;
use gfx_window_glutin as gfx_glutin;
use glutin::{GlRequest, GlContext};
use glutin::Api::OpenGl;
use std::time;
use std::thread;
use rand::SeedableRng;
use renderer::get_asset_by_name;
pub struct CollisionMeta {
/// This normal points outwards from entity B to entity A (and is also used
/// to resolve circ - circ collisions)
/// Will be normalised.
#[allow(dead_code)]
normal: Vec32,
}
/// Lists pairs of collisions.
pub struct Collisions(Vec<(Entity, Entity, CollisionMeta)>);
pub struct DeltaTime(pub f32);
/// Vertex buffer for game objects
pub struct GameVertexBuffer(renderer::VertexBuffer);
/// Vertex buffer for terrain (tilesets). This is so we don't have to re-buffer
/// tilesets all the tiem, and means we don't have to implement perspective
/// frustum culling
pub struct TerrainVertexBuffer(renderer::VertexBuffer);
/// If true, we should update the terrain vertex buffer.
pub struct TerrainVertexBufferNeedsUpdate(bool);
/// Vertex buffer for UI objects (camera transform isn't applied)
pub struct UIVertexBuffer(renderer::VertexBuffer);
/// Entities that have been 'killed' and need to produce on-death effects. This
/// doesn't mean all deleted entities - it means alive characters have been
/// killed by combat or other effects.
pub struct KilledEntities(Vec<Entity>);
/// Empty specs::System to use in the dispatcher as a combiner for system
/// dependencies.
pub struct MarkerSys;
impl<'a> System<'a> for MarkerSys {
type SystemData = ();
fn run(&mut self, (): Self::SystemData) {}
}
/// Create the world and register all the components
fn create_world() -> specs::World {
let mut world = specs::World::new();
world.register::<Pos>();
world.register::<Vel>();
world.register::<PlayerControlled>();
world.register::<Tilemap>();
world.register::<AnimSprite>();
world.register::<StaticSprite>();
world.register::<CollCircle>();
world.register::<AISlime>();
world.register::<Hurt>();
world.register::<Health>();
world.register::<Lifetime>();
world.register::<Knockback>();
world.register::<HurtKnockbackDir>();
world.register::<Tint>();
world.register::<Rot>();
world.register::<Alliance>();
world.register::<FollowCamera>();
world.register::<Pickup>();
world.register::<Collector>();
world.register::<OnDeathDrop>();
world.register::<TrackPos>();
world.register::<MatchAnim>();
world.register::<Equipment>();
world
}
fn main() {
// Create the window
let mut events_loop = glutin::EventsLoop::new();
let windowbuilder = glutin::WindowBuilder::new()
.with_title("Triangle Example".to_string())
.with_dimensions(512, 512);
let contextbuilder = glutin::ContextBuilder::new()
.with_gl(GlRequest::Specific(OpenGl,(3, 3)));
let (window, mut device, mut factory, color_view, depth_view) =
gfx_glutin::init::<renderer::ColorFormat, renderer::DepthFormat>(
windowbuilder, contextbuilder, &events_loop);
// Create renderer
let (w, h) = window.get_inner_size().unwrap();
let (mut renderer, atlas) = renderer::Renderer::new(
&mut factory, color_view, depth_view, Default::default());
// Load items
item::load_item_definitions();
let camera = camera::Camera::new(w as f32, h as f32);
// Create the ECS world, and a test entity, plus trees
let mut world = create_world();
use specs::Builder;
// Player
let player = world.create_entity()
.with(Pos { pos: Vec32::new(32.0, 32.0), z: 0.0 }) | .with(PlayerControlled::new())
.with(FollowCamera)
.with(Health::new(8, Hitmask(HITMASK_PLAYER)))
.with(Collector { magnet_radius: 64.0 })
.with(Equipment {
.. Default::default()
})
.with(CollCircle { r: 8.0, off: Vec32::zero(),
flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100.0,
4, get_asset_by_name("Human00Anim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Tree
world.create_entity()
.with(Pos { pos: Vec32::new(100.0, 100.0), z: 0.0 })
.with(CollCircle { r: 12.0, off: Vec32::zero(),
flags: COLL_SOLID | COLL_STATIC})
.with(StaticSprite { w: 64.0, h: 128.0,
sprite: get_asset_by_name("GreenTree00"),
flags: STATIC_SPRITE_UPRIGHT})
.build();
// Slime
world.create_entity()
.with(Pos { pos: Vec32::new(200.0, 200.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Health::new(4, Hitmask(HITMASK_ENEMY)))
.with(Hurt { damage: 2,
mask: Hitmask::default_enemy_attack(),
flags: 0 })
.with(Alliance::evil())
.with(OnDeathDrop {
drop_table: drop_tables::DropTableKey::Slime,
min_drops: 1,
max_drops: 3,
})
.with(AISlime { move_target: Vec32::new(200.0, 200.0),
attack_target: None,
charge_time: 0.0,
state: SlimeState::Idle })
.with(CollCircle { r: 8.0, off: Vec32::zero(), flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100000.0,
1, get_asset_by_name("SlimeAnim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Create tilemaps
for x in 0..10 {
for y in 0..10 {
world.create_entity()
.with(Pos { pos: Vec32::new(x as f32, y as f32), z: 0.0 })
.with(Tilemap { tileset: TilesetEnum::Grass,
data: [1u8; TILEMAP_SIZE * TILEMAP_SIZE] })
.build();
}
}
let mut inventory = inventory::Inventory::new();
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Money").unwrap(),
num: 10,
});
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Bronze Helmet").unwrap(),
num: 1,
});
let input_map = input::InputMap::new();
// Allocate cpu side v_buf
let v_buf = vec![Default::default(); renderer::V_BUF_SIZE];
// Add specs resources
world.add_resource(atlas);
world.add_resource(camera);
world.add_resource(DeltaTime(0.016));
world.add_resource(Collisions(Vec::with_capacity(128)));
world.add_resource::<ui::UIState>(Default::default());
world.add_resource(input::InputState::new());
world.add_resource(drop_tables::DropTableMap::new_standard_map());
world.add_resource(inventory);
world.add_resource(KilledEntities(Vec::new()));
world.add_resource(UIVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(GameVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBufferNeedsUpdate(true));
// Build dispatcher
let mut dispatcher = specs::DispatcherBuilder::new()
.with(sys_set_equipment::SetEquipmentSys, "set_equipment", &[])
.with(sys_lifetime::LifetimeSys, "lifetime", &[])
// Control
.with(ui::UIInputSystem, "ui_input", &[])
.with(sys_control::PlayerControllerSys, "player_controller", &[])
.with(sys_control::SlimeAISys, "slime_ai", &[])
.with(MarkerSys, "control", &["player_controller", "slime_ai", "ui_input"])
// Animation
.with(sys_anim::AnimSpriteSys, "anim_sprite", &["control"])
// Physics
.with(sys_phys::PhysSys::<CollCircle, CollCircle>::new(), "phys_circ_circ", &["player_controller"])
.with(MarkerSys, "phys", &["phys_circ_circ"])
.with(sys_track_pos::TrackPosSys, "track_pos", &["phys"])
.with(sys_match_anim::MatchAnimSys, "match_anim", &["phys"])
// Camera control
.with(camera::FollowCameraSys, "follow_camera", &["phys"])
// Pickups
.with(sys_pickup::PickupSys, "pickup", &["phys"])
// Combat
.with(sys_health::HealthSys, "health",
&["phys", "set_equipment"])
.with(sys_on_hit::KnockbackSys, "oh_knockback",
&["health", "set_equipment"])
.with(MarkerSys, "update",
&["phys", "anim_sprite", "health", "follow_camera",
"oh_knockback", "track_pos", "match_anim"])
// After-death effects
.with(sys_death_drop::OnDeathDropSys::new(
rand::rngs::StdRng::from_rng(
rand::thread_rng()).unwrap()),
"on_death_drop", &["update"])
// Paint
.with(renderer::TilemapPainter::new(), "tilemap_paint", &["update"])
.with(renderer::SpritePainter, "sprite_paint", &["update"])
.with(renderer::InventoryPainter, "ui_inventory_paint", &["update"])
.build();
dispatcher.setup(&mut world.res);
// Number of frames until we print another frame time
let mut fps_count_timer = 60;
loop {
let start = time::Instant::now();
// update input
{
let mut input_state = world.write_resource::<input::InputState>();
input_state.process_input(&input_map, &mut events_loop);
if input_state.should_close { break; } // Early return for speedy exit
// Update window size if needed
if input_state.window_dimensions_need_update {
println!("Resizing window viewport");
renderer.update_window_size(&window);
}
}
// Update & paint the world
{
dispatcher.dispatch_seq(&mut world.res);
// Get the player position
let player_pos = world.read_storage::<Pos>().get(player).unwrap().clone();
let player_pos = [player_pos.pos.x, player_pos.z, player_pos.pos.y];
let mut ui_v_buf = world.write_resource::<UIVertexBuffer>();
let mut game_v_buf = world.write_resource::<GameVertexBuffer>();
let mut terrain_v_buf = world.write_resource::<TerrainVertexBuffer>();
let mut terrain_v_buf_needs_update =
world.write_resource::<TerrainVertexBufferNeedsUpdate>();
let camera = &world.read_resource::<camera::Camera>();
// Update buffers
renderer.update_buffer(&ui_v_buf.0, renderer::BufferType::UI);
renderer.update_buffer(&game_v_buf.0, renderer::BufferType::Game);
if terrain_v_buf_needs_update.0 {
renderer.update_buffer(&terrain_v_buf.0, renderer::BufferType::Terrain);
terrain_v_buf_needs_update.0 = false;
}
// Clear & render
renderer.clear();
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Terrain);
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Game);
renderer.clear_depth();
renderer.render_buffer(&camera, [0.0, 0.0, 0.0], renderer::BufferType::UI);
renderer.flush(&mut device);
window.swap_buffers().unwrap();
device.cleanup();
// Reset ECS state after rendering
// After painting, we need to clear the v_buf
ui_v_buf.0.size = 0;
game_v_buf.0.size = 0;
terrain_v_buf.0.size = 0;
// Clear collision list for next frame
let mut collisions = world.write_resource::<Collisions>();
collisions.0.clear();
let mut killed = world.write_resource::<KilledEntities>();
killed.0.clear();
}
// Actually delete all entities that need to be deleted
world.maintain();
// Calculate frame time
let elapsed = start.elapsed();
if fps_count_timer <= 0 {
println!("Time taken (millis): {:?}",
elapsed.as_secs() * 1000 + elapsed.subsec_millis() as u64);
fps_count_timer = 60;
}
fps_count_timer -= 1;
// Sleep until we hit 60fps. Vsync works until the window isn't being
// rendered, then we just consume CPU!
if elapsed.subsec_millis() < 17 && elapsed.as_secs() == 0 {
thread::sleep(time::Duration::from_millis(17) - elapsed);
}
}
} | .with(Vel { vel: Vec32::zero() })
.with(Alliance::good()) | random_line_split |
main.rs | #![feature(test)]
#[macro_use] extern crate gfx;
extern crate gfx_window_glutin;
extern crate gfx_device_gl;
extern crate glutin;
extern crate rand;
extern crate failure;
#[macro_use] extern crate failure_derive;
extern crate image;
extern crate rusttype;
extern crate specs;
extern crate rayon;
#[macro_use] extern crate specs_derive;
extern crate num_integer;
#[macro_use] extern crate lazy_static;
extern crate serde;
extern crate serde_yaml;
extern crate cgmath;
#[macro_use] extern crate serde_derive;
#[cfg(test)]
extern crate test;
mod renderer;
mod comp;
mod input;
mod sys_control;
mod sys_health;
mod sys_phys;
mod sys_anim;
mod sys_lifetime;
mod sys_on_hit;
mod sys_pickup;
mod sys_death_drop;
mod sys_track_pos;
mod sys_match_anim;
mod sys_set_equipment;
mod vec;
mod ui;
mod camera;
mod math_util;
mod item;
mod inventory;
mod drop_tables;
mod asset_loader;
use comp::*;
use vec::*;
use specs::*;
use gfx::Device;
use gfx_window_glutin as gfx_glutin;
use glutin::{GlRequest, GlContext};
use glutin::Api::OpenGl;
use std::time;
use std::thread;
use rand::SeedableRng;
use renderer::get_asset_by_name;
pub struct CollisionMeta {
/// This normal points outwards from entity B to entity A (and is also used
/// to resolve circ - circ collisions)
/// Will be normalised.
#[allow(dead_code)]
normal: Vec32,
}
/// Lists pairs of collisions.
pub struct Collisions(Vec<(Entity, Entity, CollisionMeta)>);
pub struct DeltaTime(pub f32);
/// Vertex buffer for game objects
pub struct GameVertexBuffer(renderer::VertexBuffer);
/// Vertex buffer for terrain (tilesets). This is so we don't have to re-buffer
/// tilesets all the tiem, and means we don't have to implement perspective
/// frustum culling
pub struct TerrainVertexBuffer(renderer::VertexBuffer);
/// If true, we should update the terrain vertex buffer.
pub struct TerrainVertexBufferNeedsUpdate(bool);
/// Vertex buffer for UI objects (camera transform isn't applied)
pub struct | (renderer::VertexBuffer);
/// Entities that have been 'killed' and need to produce on-death effects. This
/// doesn't mean all deleted entities - it means alive characters have been
/// killed by combat or other effects.
pub struct KilledEntities(Vec<Entity>);
/// Empty specs::System to use in the dispatcher as a combiner for system
/// dependencies.
pub struct MarkerSys;
impl<'a> System<'a> for MarkerSys {
type SystemData = ();
fn run(&mut self, (): Self::SystemData) {}
}
/// Create the world and register all the components
fn create_world() -> specs::World {
let mut world = specs::World::new();
world.register::<Pos>();
world.register::<Vel>();
world.register::<PlayerControlled>();
world.register::<Tilemap>();
world.register::<AnimSprite>();
world.register::<StaticSprite>();
world.register::<CollCircle>();
world.register::<AISlime>();
world.register::<Hurt>();
world.register::<Health>();
world.register::<Lifetime>();
world.register::<Knockback>();
world.register::<HurtKnockbackDir>();
world.register::<Tint>();
world.register::<Rot>();
world.register::<Alliance>();
world.register::<FollowCamera>();
world.register::<Pickup>();
world.register::<Collector>();
world.register::<OnDeathDrop>();
world.register::<TrackPos>();
world.register::<MatchAnim>();
world.register::<Equipment>();
world
}
fn main() {
// Create the window
let mut events_loop = glutin::EventsLoop::new();
let windowbuilder = glutin::WindowBuilder::new()
.with_title("Triangle Example".to_string())
.with_dimensions(512, 512);
let contextbuilder = glutin::ContextBuilder::new()
.with_gl(GlRequest::Specific(OpenGl,(3, 3)));
let (window, mut device, mut factory, color_view, depth_view) =
gfx_glutin::init::<renderer::ColorFormat, renderer::DepthFormat>(
windowbuilder, contextbuilder, &events_loop);
// Create renderer
let (w, h) = window.get_inner_size().unwrap();
let (mut renderer, atlas) = renderer::Renderer::new(
&mut factory, color_view, depth_view, Default::default());
// Load items
item::load_item_definitions();
let camera = camera::Camera::new(w as f32, h as f32);
// Create the ECS world, and a test entity, plus trees
let mut world = create_world();
use specs::Builder;
// Player
let player = world.create_entity()
.with(Pos { pos: Vec32::new(32.0, 32.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Alliance::good())
.with(PlayerControlled::new())
.with(FollowCamera)
.with(Health::new(8, Hitmask(HITMASK_PLAYER)))
.with(Collector { magnet_radius: 64.0 })
.with(Equipment {
.. Default::default()
})
.with(CollCircle { r: 8.0, off: Vec32::zero(),
flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100.0,
4, get_asset_by_name("Human00Anim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Tree
world.create_entity()
.with(Pos { pos: Vec32::new(100.0, 100.0), z: 0.0 })
.with(CollCircle { r: 12.0, off: Vec32::zero(),
flags: COLL_SOLID | COLL_STATIC})
.with(StaticSprite { w: 64.0, h: 128.0,
sprite: get_asset_by_name("GreenTree00"),
flags: STATIC_SPRITE_UPRIGHT})
.build();
// Slime
world.create_entity()
.with(Pos { pos: Vec32::new(200.0, 200.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Health::new(4, Hitmask(HITMASK_ENEMY)))
.with(Hurt { damage: 2,
mask: Hitmask::default_enemy_attack(),
flags: 0 })
.with(Alliance::evil())
.with(OnDeathDrop {
drop_table: drop_tables::DropTableKey::Slime,
min_drops: 1,
max_drops: 3,
})
.with(AISlime { move_target: Vec32::new(200.0, 200.0),
attack_target: None,
charge_time: 0.0,
state: SlimeState::Idle })
.with(CollCircle { r: 8.0, off: Vec32::zero(), flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100000.0,
1, get_asset_by_name("SlimeAnim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Create tilemaps
for x in 0..10 {
for y in 0..10 {
world.create_entity()
.with(Pos { pos: Vec32::new(x as f32, y as f32), z: 0.0 })
.with(Tilemap { tileset: TilesetEnum::Grass,
data: [1u8; TILEMAP_SIZE * TILEMAP_SIZE] })
.build();
}
}
let mut inventory = inventory::Inventory::new();
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Money").unwrap(),
num: 10,
});
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Bronze Helmet").unwrap(),
num: 1,
});
let input_map = input::InputMap::new();
// Allocate cpu side v_buf
let v_buf = vec![Default::default(); renderer::V_BUF_SIZE];
// Add specs resources
world.add_resource(atlas);
world.add_resource(camera);
world.add_resource(DeltaTime(0.016));
world.add_resource(Collisions(Vec::with_capacity(128)));
world.add_resource::<ui::UIState>(Default::default());
world.add_resource(input::InputState::new());
world.add_resource(drop_tables::DropTableMap::new_standard_map());
world.add_resource(inventory);
world.add_resource(KilledEntities(Vec::new()));
world.add_resource(UIVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(GameVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBufferNeedsUpdate(true));
// Build dispatcher
let mut dispatcher = specs::DispatcherBuilder::new()
.with(sys_set_equipment::SetEquipmentSys, "set_equipment", &[])
.with(sys_lifetime::LifetimeSys, "lifetime", &[])
// Control
.with(ui::UIInputSystem, "ui_input", &[])
.with(sys_control::PlayerControllerSys, "player_controller", &[])
.with(sys_control::SlimeAISys, "slime_ai", &[])
.with(MarkerSys, "control", &["player_controller", "slime_ai", "ui_input"])
// Animation
.with(sys_anim::AnimSpriteSys, "anim_sprite", &["control"])
// Physics
.with(sys_phys::PhysSys::<CollCircle, CollCircle>::new(), "phys_circ_circ", &["player_controller"])
.with(MarkerSys, "phys", &["phys_circ_circ"])
.with(sys_track_pos::TrackPosSys, "track_pos", &["phys"])
.with(sys_match_anim::MatchAnimSys, "match_anim", &["phys"])
// Camera control
.with(camera::FollowCameraSys, "follow_camera", &["phys"])
// Pickups
.with(sys_pickup::PickupSys, "pickup", &["phys"])
// Combat
.with(sys_health::HealthSys, "health",
&["phys", "set_equipment"])
.with(sys_on_hit::KnockbackSys, "oh_knockback",
&["health", "set_equipment"])
.with(MarkerSys, "update",
&["phys", "anim_sprite", "health", "follow_camera",
"oh_knockback", "track_pos", "match_anim"])
// After-death effects
.with(sys_death_drop::OnDeathDropSys::new(
rand::rngs::StdRng::from_rng(
rand::thread_rng()).unwrap()),
"on_death_drop", &["update"])
// Paint
.with(renderer::TilemapPainter::new(), "tilemap_paint", &["update"])
.with(renderer::SpritePainter, "sprite_paint", &["update"])
.with(renderer::InventoryPainter, "ui_inventory_paint", &["update"])
.build();
dispatcher.setup(&mut world.res);
// Number of frames until we print another frame time
let mut fps_count_timer = 60;
loop {
let start = time::Instant::now();
// update input
{
let mut input_state = world.write_resource::<input::InputState>();
input_state.process_input(&input_map, &mut events_loop);
if input_state.should_close { break; } // Early return for speedy exit
// Update window size if needed
if input_state.window_dimensions_need_update {
println!("Resizing window viewport");
renderer.update_window_size(&window);
}
}
// Update & paint the world
{
dispatcher.dispatch_seq(&mut world.res);
// Get the player position
let player_pos = world.read_storage::<Pos>().get(player).unwrap().clone();
let player_pos = [player_pos.pos.x, player_pos.z, player_pos.pos.y];
let mut ui_v_buf = world.write_resource::<UIVertexBuffer>();
let mut game_v_buf = world.write_resource::<GameVertexBuffer>();
let mut terrain_v_buf = world.write_resource::<TerrainVertexBuffer>();
let mut terrain_v_buf_needs_update =
world.write_resource::<TerrainVertexBufferNeedsUpdate>();
let camera = &world.read_resource::<camera::Camera>();
// Update buffers
renderer.update_buffer(&ui_v_buf.0, renderer::BufferType::UI);
renderer.update_buffer(&game_v_buf.0, renderer::BufferType::Game);
if terrain_v_buf_needs_update.0 {
renderer.update_buffer(&terrain_v_buf.0, renderer::BufferType::Terrain);
terrain_v_buf_needs_update.0 = false;
}
// Clear & render
renderer.clear();
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Terrain);
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Game);
renderer.clear_depth();
renderer.render_buffer(&camera, [0.0, 0.0, 0.0], renderer::BufferType::UI);
renderer.flush(&mut device);
window.swap_buffers().unwrap();
device.cleanup();
// Reset ECS state after rendering
// After painting, we need to clear the v_buf
ui_v_buf.0.size = 0;
game_v_buf.0.size = 0;
terrain_v_buf.0.size = 0;
// Clear collision list for next frame
let mut collisions = world.write_resource::<Collisions>();
collisions.0.clear();
let mut killed = world.write_resource::<KilledEntities>();
killed.0.clear();
}
// Actually delete all entities that need to be deleted
world.maintain();
// Calculate frame time
let elapsed = start.elapsed();
if fps_count_timer <= 0 {
println!("Time taken (millis): {:?}",
elapsed.as_secs() * 1000 + elapsed.subsec_millis() as u64);
fps_count_timer = 60;
}
fps_count_timer -= 1;
// Sleep until we hit 60fps. Vsync works until the window isn't being
// rendered, then we just consume CPU!
if elapsed.subsec_millis() < 17 && elapsed.as_secs() == 0 {
thread::sleep(time::Duration::from_millis(17) - elapsed);
}
}
}
| UIVertexBuffer | identifier_name |
main.rs | #![feature(test)]
#[macro_use] extern crate gfx;
extern crate gfx_window_glutin;
extern crate gfx_device_gl;
extern crate glutin;
extern crate rand;
extern crate failure;
#[macro_use] extern crate failure_derive;
extern crate image;
extern crate rusttype;
extern crate specs;
extern crate rayon;
#[macro_use] extern crate specs_derive;
extern crate num_integer;
#[macro_use] extern crate lazy_static;
extern crate serde;
extern crate serde_yaml;
extern crate cgmath;
#[macro_use] extern crate serde_derive;
#[cfg(test)]
extern crate test;
mod renderer;
mod comp;
mod input;
mod sys_control;
mod sys_health;
mod sys_phys;
mod sys_anim;
mod sys_lifetime;
mod sys_on_hit;
mod sys_pickup;
mod sys_death_drop;
mod sys_track_pos;
mod sys_match_anim;
mod sys_set_equipment;
mod vec;
mod ui;
mod camera;
mod math_util;
mod item;
mod inventory;
mod drop_tables;
mod asset_loader;
use comp::*;
use vec::*;
use specs::*;
use gfx::Device;
use gfx_window_glutin as gfx_glutin;
use glutin::{GlRequest, GlContext};
use glutin::Api::OpenGl;
use std::time;
use std::thread;
use rand::SeedableRng;
use renderer::get_asset_by_name;
pub struct CollisionMeta {
/// This normal points outwards from entity B to entity A (and is also used
/// to resolve circ - circ collisions)
/// Will be normalised.
#[allow(dead_code)]
normal: Vec32,
}
/// Lists pairs of collisions.
pub struct Collisions(Vec<(Entity, Entity, CollisionMeta)>);
pub struct DeltaTime(pub f32);
/// Vertex buffer for game objects
pub struct GameVertexBuffer(renderer::VertexBuffer);
/// Vertex buffer for terrain (tilesets). This is so we don't have to re-buffer
/// tilesets all the tiem, and means we don't have to implement perspective
/// frustum culling
pub struct TerrainVertexBuffer(renderer::VertexBuffer);
/// If true, we should update the terrain vertex buffer.
pub struct TerrainVertexBufferNeedsUpdate(bool);
/// Vertex buffer for UI objects (camera transform isn't applied)
pub struct UIVertexBuffer(renderer::VertexBuffer);
/// Entities that have been 'killed' and need to produce on-death effects. This
/// doesn't mean all deleted entities - it means alive characters have been
/// killed by combat or other effects.
pub struct KilledEntities(Vec<Entity>);
/// Empty specs::System to use in the dispatcher as a combiner for system
/// dependencies.
pub struct MarkerSys;
impl<'a> System<'a> for MarkerSys {
type SystemData = ();
fn run(&mut self, (): Self::SystemData) |
}
/// Create the world and register all the components
fn create_world() -> specs::World {
let mut world = specs::World::new();
world.register::<Pos>();
world.register::<Vel>();
world.register::<PlayerControlled>();
world.register::<Tilemap>();
world.register::<AnimSprite>();
world.register::<StaticSprite>();
world.register::<CollCircle>();
world.register::<AISlime>();
world.register::<Hurt>();
world.register::<Health>();
world.register::<Lifetime>();
world.register::<Knockback>();
world.register::<HurtKnockbackDir>();
world.register::<Tint>();
world.register::<Rot>();
world.register::<Alliance>();
world.register::<FollowCamera>();
world.register::<Pickup>();
world.register::<Collector>();
world.register::<OnDeathDrop>();
world.register::<TrackPos>();
world.register::<MatchAnim>();
world.register::<Equipment>();
world
}
fn main() {
// Create the window
let mut events_loop = glutin::EventsLoop::new();
let windowbuilder = glutin::WindowBuilder::new()
.with_title("Triangle Example".to_string())
.with_dimensions(512, 512);
let contextbuilder = glutin::ContextBuilder::new()
.with_gl(GlRequest::Specific(OpenGl,(3, 3)));
let (window, mut device, mut factory, color_view, depth_view) =
gfx_glutin::init::<renderer::ColorFormat, renderer::DepthFormat>(
windowbuilder, contextbuilder, &events_loop);
// Create renderer
let (w, h) = window.get_inner_size().unwrap();
let (mut renderer, atlas) = renderer::Renderer::new(
&mut factory, color_view, depth_view, Default::default());
// Load items
item::load_item_definitions();
let camera = camera::Camera::new(w as f32, h as f32);
// Create the ECS world, and a test entity, plus trees
let mut world = create_world();
use specs::Builder;
// Player
let player = world.create_entity()
.with(Pos { pos: Vec32::new(32.0, 32.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Alliance::good())
.with(PlayerControlled::new())
.with(FollowCamera)
.with(Health::new(8, Hitmask(HITMASK_PLAYER)))
.with(Collector { magnet_radius: 64.0 })
.with(Equipment {
.. Default::default()
})
.with(CollCircle { r: 8.0, off: Vec32::zero(),
flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100.0,
4, get_asset_by_name("Human00Anim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Tree
world.create_entity()
.with(Pos { pos: Vec32::new(100.0, 100.0), z: 0.0 })
.with(CollCircle { r: 12.0, off: Vec32::zero(),
flags: COLL_SOLID | COLL_STATIC})
.with(StaticSprite { w: 64.0, h: 128.0,
sprite: get_asset_by_name("GreenTree00"),
flags: STATIC_SPRITE_UPRIGHT})
.build();
// Slime
world.create_entity()
.with(Pos { pos: Vec32::new(200.0, 200.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Health::new(4, Hitmask(HITMASK_ENEMY)))
.with(Hurt { damage: 2,
mask: Hitmask::default_enemy_attack(),
flags: 0 })
.with(Alliance::evil())
.with(OnDeathDrop {
drop_table: drop_tables::DropTableKey::Slime,
min_drops: 1,
max_drops: 3,
})
.with(AISlime { move_target: Vec32::new(200.0, 200.0),
attack_target: None,
charge_time: 0.0,
state: SlimeState::Idle })
.with(CollCircle { r: 8.0, off: Vec32::zero(), flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100000.0,
1, get_asset_by_name("SlimeAnim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Create tilemaps
for x in 0..10 {
for y in 0..10 {
world.create_entity()
.with(Pos { pos: Vec32::new(x as f32, y as f32), z: 0.0 })
.with(Tilemap { tileset: TilesetEnum::Grass,
data: [1u8; TILEMAP_SIZE * TILEMAP_SIZE] })
.build();
}
}
let mut inventory = inventory::Inventory::new();
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Money").unwrap(),
num: 10,
});
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Bronze Helmet").unwrap(),
num: 1,
});
let input_map = input::InputMap::new();
// Allocate cpu side v_buf
let v_buf = vec![Default::default(); renderer::V_BUF_SIZE];
// Add specs resources
world.add_resource(atlas);
world.add_resource(camera);
world.add_resource(DeltaTime(0.016));
world.add_resource(Collisions(Vec::with_capacity(128)));
world.add_resource::<ui::UIState>(Default::default());
world.add_resource(input::InputState::new());
world.add_resource(drop_tables::DropTableMap::new_standard_map());
world.add_resource(inventory);
world.add_resource(KilledEntities(Vec::new()));
world.add_resource(UIVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(GameVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBufferNeedsUpdate(true));
// Build dispatcher
let mut dispatcher = specs::DispatcherBuilder::new()
.with(sys_set_equipment::SetEquipmentSys, "set_equipment", &[])
.with(sys_lifetime::LifetimeSys, "lifetime", &[])
// Control
.with(ui::UIInputSystem, "ui_input", &[])
.with(sys_control::PlayerControllerSys, "player_controller", &[])
.with(sys_control::SlimeAISys, "slime_ai", &[])
.with(MarkerSys, "control", &["player_controller", "slime_ai", "ui_input"])
// Animation
.with(sys_anim::AnimSpriteSys, "anim_sprite", &["control"])
// Physics
.with(sys_phys::PhysSys::<CollCircle, CollCircle>::new(), "phys_circ_circ", &["player_controller"])
.with(MarkerSys, "phys", &["phys_circ_circ"])
.with(sys_track_pos::TrackPosSys, "track_pos", &["phys"])
.with(sys_match_anim::MatchAnimSys, "match_anim", &["phys"])
// Camera control
.with(camera::FollowCameraSys, "follow_camera", &["phys"])
// Pickups
.with(sys_pickup::PickupSys, "pickup", &["phys"])
// Combat
.with(sys_health::HealthSys, "health",
&["phys", "set_equipment"])
.with(sys_on_hit::KnockbackSys, "oh_knockback",
&["health", "set_equipment"])
.with(MarkerSys, "update",
&["phys", "anim_sprite", "health", "follow_camera",
"oh_knockback", "track_pos", "match_anim"])
// After-death effects
.with(sys_death_drop::OnDeathDropSys::new(
rand::rngs::StdRng::from_rng(
rand::thread_rng()).unwrap()),
"on_death_drop", &["update"])
// Paint
.with(renderer::TilemapPainter::new(), "tilemap_paint", &["update"])
.with(renderer::SpritePainter, "sprite_paint", &["update"])
.with(renderer::InventoryPainter, "ui_inventory_paint", &["update"])
.build();
dispatcher.setup(&mut world.res);
// Number of frames until we print another frame time
let mut fps_count_timer = 60;
loop {
let start = time::Instant::now();
// update input
{
let mut input_state = world.write_resource::<input::InputState>();
input_state.process_input(&input_map, &mut events_loop);
if input_state.should_close { break; } // Early return for speedy exit
// Update window size if needed
if input_state.window_dimensions_need_update {
println!("Resizing window viewport");
renderer.update_window_size(&window);
}
}
// Update & paint the world
{
dispatcher.dispatch_seq(&mut world.res);
// Get the player position
let player_pos = world.read_storage::<Pos>().get(player).unwrap().clone();
let player_pos = [player_pos.pos.x, player_pos.z, player_pos.pos.y];
let mut ui_v_buf = world.write_resource::<UIVertexBuffer>();
let mut game_v_buf = world.write_resource::<GameVertexBuffer>();
let mut terrain_v_buf = world.write_resource::<TerrainVertexBuffer>();
let mut terrain_v_buf_needs_update =
world.write_resource::<TerrainVertexBufferNeedsUpdate>();
let camera = &world.read_resource::<camera::Camera>();
// Update buffers
renderer.update_buffer(&ui_v_buf.0, renderer::BufferType::UI);
renderer.update_buffer(&game_v_buf.0, renderer::BufferType::Game);
if terrain_v_buf_needs_update.0 {
renderer.update_buffer(&terrain_v_buf.0, renderer::BufferType::Terrain);
terrain_v_buf_needs_update.0 = false;
}
// Clear & render
renderer.clear();
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Terrain);
renderer.render_buffer(&camera, player_pos, renderer::BufferType::Game);
renderer.clear_depth();
renderer.render_buffer(&camera, [0.0, 0.0, 0.0], renderer::BufferType::UI);
renderer.flush(&mut device);
window.swap_buffers().unwrap();
device.cleanup();
// Reset ECS state after rendering
// After painting, we need to clear the v_buf
ui_v_buf.0.size = 0;
game_v_buf.0.size = 0;
terrain_v_buf.0.size = 0;
// Clear collision list for next frame
let mut collisions = world.write_resource::<Collisions>();
collisions.0.clear();
let mut killed = world.write_resource::<KilledEntities>();
killed.0.clear();
}
// Actually delete all entities that need to be deleted
world.maintain();
// Calculate frame time
let elapsed = start.elapsed();
if fps_count_timer <= 0 {
println!("Time taken (millis): {:?}",
elapsed.as_secs() * 1000 + elapsed.subsec_millis() as u64);
fps_count_timer = 60;
}
fps_count_timer -= 1;
// Sleep until we hit 60fps. Vsync works until the window isn't being
// rendered, then we just consume CPU!
if elapsed.subsec_millis() < 17 && elapsed.as_secs() == 0 {
thread::sleep(time::Duration::from_millis(17) - elapsed);
}
}
}
| {} | identifier_body |
mod.rs | use crate::cipher::Cipher;
use crate::error::*;
use crate::format::ossh_privkey::*;
use crate::format::ossh_pubkey::*;
use crate::format::parse_keystr;
use crate::format::pem::*;
use crate::format::pkcs8::*;
use digest::{Digest, FixedOutputReset};
use md5::Md5;
use openssl::pkey::{Id, PKey, PKeyRef, Private, Public};
use sha2::{Sha256, Sha512};
use std::fmt;
/// DSA key type
pub mod dsa;
/// EcDSA key type
pub mod ecdsa;
/// Ed25519 key type
pub mod ed25519;
/// RSA key type
pub mod rsa;
/// The name of the MD5 hashing algorithm returned by [`FingerprintHash::name()`](enum.FingerprintHash.html#method.name)
pub const MD5_NAME: &str = "MD5";
/// The name of the sha2-256 algorithm returned by [`FingerprintHash::name()`](enum.FingerprintHash.html#method.name)
pub const SHA256_NAME: &str = "SHA256";
/// The name of the sha2-512 algorithm returned by [`FingerprintHash::name()`](enum.FingerprintHash.html#method.name)
pub const SHA512_NAME: &str = "SHA512";
/// An enum representing the hash function used to generate fingerprint
///
/// Used with [`PublicPart::fingerprint()`](trait.PublicPart.html#method.fingerprint) and
/// [`PublicPart::fingerprint_randomart()`](trait.PublicPart.html#method.fingerprint) to generate
/// different types fingerprint and randomarts.
///
/// # Hash Algorithm
/// MD5: This is the default fingerprint type in older versions of openssh.
///
/// SHA2-256: Since OpenSSH 6.8, this became the default option of fingerprint.
///
/// SHA2-512: Although not being documented, it can also be used.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum FingerprintHash {
MD5,
SHA256,
SHA512,
}
impl FingerprintHash {
fn hash(self, data: &[u8]) -> Vec<u8> {
fn digest_hash<D>(hasher: &mut D, data: &[u8]) -> Vec<u8>
where
D: Digest + FixedOutputReset,
{
// Fix error[E0034]: multiple applicable items in scope
Digest::update(hasher, data);
hasher.finalize_reset().to_vec()
}
match self {
FingerprintHash::MD5 => digest_hash(&mut Md5::default(), data),
FingerprintHash::SHA256 => digest_hash(&mut Sha256::default(), data),
FingerprintHash::SHA512 => digest_hash(&mut Sha512::default(), data),
}
}
fn name(self) -> &'static str {
match self {
FingerprintHash::MD5 => MD5_NAME,
FingerprintHash::SHA256 => SHA256_NAME,
FingerprintHash::SHA512 => SHA512_NAME,
}
}
}
/// An enum representing the type of key being stored
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum KeyType {
RSA,
DSA,
ECDSA,
ED25519,
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, PartialEq)]
pub(crate) enum PublicKeyType {
RSA(rsa::RsaPublicKey),
DSA(dsa::DsaPublicKey),
ECDSA(ecdsa::EcDsaPublicKey),
ED25519(ed25519::Ed25519PublicKey),
}
#[allow(clippy::upper_case_acronyms)]
pub(crate) enum KeyPairType {
RSA(rsa::RsaKeyPair),
DSA(dsa::DsaKeyPair),
ECDSA(ecdsa::EcDsaKeyPair),
ED25519(ed25519::Ed25519KeyPair),
}
/// General public key type
///
/// This is a type to make it easy to store different types of public key in the container.
/// Each can contain one of the types supported in this crate.
///
/// Public key is usually stored in the `.pub` file when generating the key.
pub struct PublicKey {
pub(crate) key: PublicKeyType,
comment: String,
}
impl PublicKey {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Public>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaPublicKey::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaPublicKey::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaPublicKey::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519PublicKey::from_ossl_ed25519(&pkey.raw_public_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
/// Parse the openssh/PEM format public key file
pub fn from_keystr(keystr: &str) -> OsshResult<Self> {
if keystr.trim().starts_with("-----BEGIN") {
// PEM format
Ok(parse_pem_pubkey(keystr.as_bytes())?)
} else {
// openssh format
Ok(parse_ossh_pubkey(keystr)?)
}
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
PublicKeyType::RSA(_) => KeyType::RSA,
PublicKeyType::DSA(_) => KeyType::DSA,
PublicKeyType::ECDSA(_) => KeyType::ECDSA,
PublicKeyType::ED25519(_) => KeyType::ED25519,
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Serialize the public key as OpenSSH format
pub fn serialize(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Serialize the public key as PEM format
///
/// # Representation
/// - Begin with `-----BEGIN PUBLIC KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PUBLIC KEY-----` for rsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ed25519 key.
///
/// # Note
/// This format cannot store the comment!
pub fn serialize_pem(&self) -> OsshResult<String> {
stringify_pem_pubkey(self)
}
fn inner_key(&self) -> &dyn PublicParts {
match &self.key {
PublicKeyType::RSA(key) => key,
PublicKeyType::DSA(key) => key,
PublicKeyType::ECDSA(key) => key,
PublicKeyType::ED25519(key) => key,
}
}
}
impl Key for PublicKey {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for PublicKey {
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key().blob()
}
fn fingerprint(&self, hash: FingerprintHash) -> Result<Vec<u8>, Error> {
self.inner_key().fingerprint(hash)
}
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key().verify(data, sig)
}
}
impl fmt::Display for PublicKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.serialize().unwrap())
}
}
impl From<rsa::RsaPublicKey> for PublicKey {
fn from(inner: rsa::RsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaPublicKey> for PublicKey {
fn from(inner: dsa::DsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaPublicKey> for PublicKey {
fn from(inner: ecdsa::EcDsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519PublicKey> for PublicKey {
fn from(inner: ed25519::Ed25519PublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ED25519(inner),
comment: String::new(),
}
}
}
/// General key pair type
///
/// This is a type to make it easy to store different types of key pair in the container.
/// Each can contain one of the types supported in this crate.
///
/// Key pair is the so-called "private key" which contains both public and private parts of an asymmetry key.
pub struct KeyPair {
pub(crate) key: KeyPairType,
comment: String,
}
impl KeyPair {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Private>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaKeyPair::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaKeyPair::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaKeyPair::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519KeyPair::from_ossl_ed25519(&pkey.raw_private_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
pub(crate) fn ossl_pkey(&self) -> OsshResult<PKey<Private>> {
match &self.key {
KeyPairType::RSA(key) => Ok(PKey::from_rsa(key.ossl_rsa().to_owned())?),
KeyPairType::DSA(key) => Ok(PKey::from_dsa(key.ossl_dsa().to_owned())?),
KeyPairType::ECDSA(key) => Ok(PKey::from_ec_key(key.ossl_ec().to_owned())?),
KeyPairType::ED25519(key) => Ok(key.ossl_pkey()?),
}
}
/// Parse a keypair from supporting file types
///
/// The passphrase is required if the keypair is encrypted.
///
/// # OpenSSL PEM
/// - Begin with `-----BEGIN DSA PRIVATE KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PRIVATE KEY-----` for rsa key.
/// - Begin with `-----BEGIN EC PRIVATE KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PRIVATE KEY-----` for Ed25519 key.
///
/// # PKCS#8 Format
/// - Begin with `-----BEGIN PRIVATE KEY-----`
///
/// # Openssh
/// - Begin with `-----BEGIN OPENSSH PRIVATE KEY-----`
///
/// This is the new format which is supported since OpenSSH 6.5, and it became the default format in OpenSSH 7.8.
/// The Ed25519 key can only be stored in this type.
pub fn from_keystr(pem: &str, passphrase: Option<&str>) -> OsshResult<Self> {
parse_keystr(pem.as_bytes(), passphrase)
}
/// Generate a key of the specified type and size
///
/// # Key Size
/// There are some limitations to the key size:
/// - RSA: the size should `>= 1024` and `<= 16384` bits.
/// - DSA: the size should be `1024` bits.
/// - EcDSA: the size should be `256`, `384`, or `521` bits.
/// - Ed25519: the size should be `256` bits.
///
/// If the key size parameter is zero, then it will use the default size to generate the key
/// - RSA: `2048` bits
/// - DSA: `1024` bits
/// - EcDSA: `256` bits
/// - Ed25519: `256` bits
pub fn generate(keytype: KeyType, bits: usize) -> OsshResult<Self> {
Ok(match keytype {
KeyType::RSA => rsa::RsaKeyPair::generate(bits)?.into(),
KeyType::DSA => dsa::DsaKeyPair::generate(bits)?.into(),
KeyType::ECDSA => ecdsa::EcDsaKeyPair::generate(bits)?.into(),
KeyType::ED25519 => ed25519::Ed25519KeyPair::generate(bits)?.into(),
})
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
KeyPairType::RSA(_) => KeyType::RSA,
KeyPairType::DSA(_) => KeyType::DSA,
KeyPairType::ECDSA(_) => KeyType::ECDSA,
KeyPairType::ED25519(_) => KeyType::ED25519,
}
}
/// Serialize the keypair to the OpenSSL PEM format
///
/// If the passphrase is given (set to `Some(...)`), then the generated PEM key will be encrypted.
pub fn serialize_pem(&self, passphrase: Option<&str>) -> OsshResult<String> |
/// Serialize the keypair to the OpenSSL PKCS#8 PEM format
///
/// If the passphrase is given (set to `Some(...)`), then the generated PKCS#8 key will be encrypted.
pub fn serialize_pkcs8(&self, passphrase: Option<&str>) -> OsshResult<String> {
serialize_pkcs8_privkey(self, passphrase)
}
/// Serialize the keypair to the OpenSSH private key format
///
/// If the passphrase is given (set to `Some(...)`) and cipher is not null,
/// then the generated private key will be encrypted.
pub fn serialize_openssh(
&self,
passphrase: Option<&str>,
cipher: Cipher,
) -> OsshResult<String> {
if let Some(passphrase) = passphrase {
Ok(serialize_ossh_privkey(self, passphrase, cipher, 0)?)
} else {
Ok(serialize_ossh_privkey(self, "", Cipher::Null, 0)?)
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Get the OpenSSH public key of the public parts
pub fn serialize_publickey(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Clone the public parts of the key pair
pub fn clone_public_key(&self) -> Result<PublicKey, Error> {
let key = match &self.key {
KeyPairType::RSA(key) => PublicKeyType::RSA(key.clone_public_key()?),
KeyPairType::DSA(key) => PublicKeyType::DSA(key.clone_public_key()?),
KeyPairType::ECDSA(key) => PublicKeyType::ECDSA(key.clone_public_key()?),
KeyPairType::ED25519(key) => PublicKeyType::ED25519(key.clone_public_key()?),
};
Ok(PublicKey {
key,
comment: self.comment.clone(),
})
}
fn inner_key(&self) -> &dyn PrivateParts {
match &self.key {
KeyPairType::RSA(key) => key,
KeyPairType::DSA(key) => key,
KeyPairType::ECDSA(key) => key,
KeyPairType::ED25519(key) => key,
}
}
fn inner_key_pub(&self) -> &dyn PublicParts {
match &self.key {
KeyPairType::RSA(key) => key,
KeyPairType::DSA(key) => key,
KeyPairType::ECDSA(key) => key,
KeyPairType::ED25519(key) => key,
}
}
}
impl Key for KeyPair {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for KeyPair {
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key_pub().verify(data, sig)
}
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key_pub().blob()
}
}
impl PrivateParts for KeyPair {
fn sign(&self, data: &[u8]) -> Result<Vec<u8>, Error> {
self.inner_key().sign(data)
}
}
impl From<rsa::RsaKeyPair> for KeyPair {
fn from(inner: rsa::RsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaKeyPair> for KeyPair {
fn from(inner: dsa::DsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaKeyPair> for KeyPair {
fn from(inner: ecdsa::EcDsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519KeyPair> for KeyPair {
fn from(inner: ed25519::Ed25519KeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::ED25519(inner),
comment: String::new(),
}
}
}
/// The basic trait of a key
pub trait Key {
/// The size in bits of the key
fn size(&self) -> usize;
/// The key name of the key
fn keyname(&self) -> &'static str;
/// The short key name of the key
fn short_keyname(&self) -> &'static str;
}
/// A trait for operations of a public key
pub trait PublicParts: Key {
/// Verify the data with a detached signature, returning true if the signature is not malformed
fn verify(&self, data: &[u8], sig: &[u8]) -> OsshResult<bool>;
/// Return the binary representation of the public key
fn blob(&self) -> OsshResult<Vec<u8>>;
/// Hash the blob of the public key to generate the fingerprint
fn fingerprint(&self, hash: FingerprintHash) -> OsshResult<Vec<u8>> {
let b = self.blob()?;
Ok(hash.hash(&b))
}
// Rewritten from the OpenSSH project. OpenBSD notice is included below.
/* $OpenBSD: sshkey.c,v 1.120 2022/01/06 22:05:42 djm Exp $ */
/*
* Copyright (c) 2000, 2001 Markus Friedl. All rights reserved.
* Copyright (c) 2008 Alexander von Gernler. All rights reserved.
* Copyright (c) 2010,2011 Damien Miller. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/// Draw an ASCII-art picture from the fingerprint, also known as "randomart"
fn fingerprint_randomart(&self, hash: FingerprintHash) -> OsshResult<String> {
const FLDBASE: usize = 8;
const FLDSIZE_Y: usize = FLDBASE + 1;
const FLDSIZE_X: usize = FLDBASE * 2 + 1;
// Chars to be used after each other every time the worm intersects with itself. Matter of
// taste.
const AUGMENTATION_CHARS: &[u8] = b" .o+=*BOX@%&#/^SE";
let len = AUGMENTATION_CHARS.len() - 1;
let mut art = String::with_capacity((FLDSIZE_X + 3) * (FLDSIZE_Y + 2));
// Initialize field.
let mut field = [[0; FLDSIZE_X]; FLDSIZE_Y];
let mut x = FLDSIZE_X / 2;
let mut y = FLDSIZE_Y / 2;
// Process raw key.
let dgst_raw = self.fingerprint(hash)?;
for mut input in dgst_raw.iter().copied() {
// Each byte conveys four 2-bit move commands.
for _ in 0..4 {
// Evaluate 2 bit, rest is shifted later.
x = if (input & 0x1) != 0 {
x + 1
} else {
x.saturating_sub(1)
};
y = if (input & 0x2) != 0 {
y + 1
} else {
y.saturating_sub(1)
};
// Assure we are still in bounds.
x = x.min(FLDSIZE_X - 1);
y = y.min(FLDSIZE_Y - 1);
// Augment the field.
if field[y][x] < len as u8 - 2 {
field[y][x] += 1;
}
input >>= 2;
}
}
// Mark starting point and end point.
field[FLDSIZE_Y / 2][FLDSIZE_X / 2] = len as u8 - 1;
field[y][x] = len as u8;
// Assemble title.
let title = format!("[{} {}]", self.short_keyname(), self.size());
// If [type size] won't fit, then try [type]; fits "[ED25519-CERT]".
let title = if title.chars().count() > FLDSIZE_X {
format!("[{}]", self.short_keyname())
} else {
title
};
// Assemble hash ID.
let hash = format!("[{}]", hash.name());
// Output upper border.
art += &format!("+{:-^width$}+\n", title, width = FLDSIZE_X);
// Output content.
#[allow(clippy::needless_range_loop)]
for y in 0..FLDSIZE_Y {
art.push('|');
art.extend(
field[y]
.iter()
.map(|&c| AUGMENTATION_CHARS[c as usize] as char),
);
art += "|\n";
}
// Output lower border.
art += &format!("+{:-^width$}+", hash, width = FLDSIZE_X);
Ok(art)
}
}
/// A trait for operations of a private key
pub trait PrivateParts: Key {
/// Sign the data with the key, returning the "detached" signature
fn sign(&self, data: &[u8]) -> OsshResult<Vec<u8>>;
}
// This test is used to print the struct size of [`PublicKey`] and [`KeyPair`].
// It is intented to be run manually, and the result is read by the developers.
#[test]
#[ignore]
fn test_size() {
use std::mem::size_of;
eprintln!("PublicKey: {} bytes", size_of::<PublicKey>());
eprintln!("\tRSA: {} bytes", size_of::<rsa::RsaPublicKey>());
eprintln!("\tDSA: {} bytes", size_of::<dsa::DsaPublicKey>());
eprintln!("\tECDSA: {} bytes", size_of::<ecdsa::EcDsaPublicKey>());
eprintln!(
"\tED25519: {} bytes",
size_of::<ed25519::Ed25519PublicKey>()
);
eprintln!("KeyPair: {} bytes", size_of::<KeyPair>());
eprintln!("\tRSA: {} bytes", size_of::<rsa::RsaKeyPair>());
eprintln!("\tDSA: {} bytes", size_of::<dsa::DsaKeyPair>());
eprintln!("\tECDSA: {} bytes", size_of::<ecdsa::EcDsaKeyPair>());
eprintln!("\tED25519: {} bytes", size_of::<ed25519::Ed25519KeyPair>());
}
| {
stringify_pem_privkey(self, passphrase)
} | identifier_body |
mod.rs | use crate::cipher::Cipher;
use crate::error::*;
use crate::format::ossh_privkey::*;
use crate::format::ossh_pubkey::*;
use crate::format::parse_keystr;
use crate::format::pem::*;
use crate::format::pkcs8::*;
use digest::{Digest, FixedOutputReset};
use md5::Md5;
use openssl::pkey::{Id, PKey, PKeyRef, Private, Public};
use sha2::{Sha256, Sha512};
use std::fmt;
/// DSA key type
pub mod dsa;
/// EcDSA key type
pub mod ecdsa;
/// Ed25519 key type
pub mod ed25519;
/// RSA key type
pub mod rsa;
/// The name of the MD5 hashing algorithm returned by [`FingerprintHash::name()`](enum.FingerprintHash.html#method.name)
pub const MD5_NAME: &str = "MD5";
/// The name of the sha2-256 algorithm returned by [`FingerprintHash::name()`](enum.FingerprintHash.html#method.name)
pub const SHA256_NAME: &str = "SHA256";
/// The name of the sha2-512 algorithm returned by [`FingerprintHash::name()`](enum.FingerprintHash.html#method.name)
pub const SHA512_NAME: &str = "SHA512";
/// An enum representing the hash function used to generate fingerprint
///
/// Used with [`PublicPart::fingerprint()`](trait.PublicPart.html#method.fingerprint) and
/// [`PublicPart::fingerprint_randomart()`](trait.PublicPart.html#method.fingerprint) to generate
/// different types fingerprint and randomarts.
///
/// # Hash Algorithm
/// MD5: This is the default fingerprint type in older versions of openssh.
///
/// SHA2-256: Since OpenSSH 6.8, this became the default option of fingerprint.
///
/// SHA2-512: Although not being documented, it can also be used.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum FingerprintHash {
MD5, | SHA512,
}
impl FingerprintHash {
fn hash(self, data: &[u8]) -> Vec<u8> {
fn digest_hash<D>(hasher: &mut D, data: &[u8]) -> Vec<u8>
where
D: Digest + FixedOutputReset,
{
// Fix error[E0034]: multiple applicable items in scope
Digest::update(hasher, data);
hasher.finalize_reset().to_vec()
}
match self {
FingerprintHash::MD5 => digest_hash(&mut Md5::default(), data),
FingerprintHash::SHA256 => digest_hash(&mut Sha256::default(), data),
FingerprintHash::SHA512 => digest_hash(&mut Sha512::default(), data),
}
}
fn name(self) -> &'static str {
match self {
FingerprintHash::MD5 => MD5_NAME,
FingerprintHash::SHA256 => SHA256_NAME,
FingerprintHash::SHA512 => SHA512_NAME,
}
}
}
/// An enum representing the type of key being stored
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum KeyType {
RSA,
DSA,
ECDSA,
ED25519,
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, PartialEq)]
pub(crate) enum PublicKeyType {
RSA(rsa::RsaPublicKey),
DSA(dsa::DsaPublicKey),
ECDSA(ecdsa::EcDsaPublicKey),
ED25519(ed25519::Ed25519PublicKey),
}
#[allow(clippy::upper_case_acronyms)]
pub(crate) enum KeyPairType {
RSA(rsa::RsaKeyPair),
DSA(dsa::DsaKeyPair),
ECDSA(ecdsa::EcDsaKeyPair),
ED25519(ed25519::Ed25519KeyPair),
}
/// General public key type
///
/// This is a type to make it easy to store different types of public key in the container.
/// Each can contain one of the types supported in this crate.
///
/// Public key is usually stored in the `.pub` file when generating the key.
pub struct PublicKey {
pub(crate) key: PublicKeyType,
comment: String,
}
impl PublicKey {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Public>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaPublicKey::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaPublicKey::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaPublicKey::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519PublicKey::from_ossl_ed25519(&pkey.raw_public_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
/// Parse the openssh/PEM format public key file
pub fn from_keystr(keystr: &str) -> OsshResult<Self> {
if keystr.trim().starts_with("-----BEGIN") {
// PEM format
Ok(parse_pem_pubkey(keystr.as_bytes())?)
} else {
// openssh format
Ok(parse_ossh_pubkey(keystr)?)
}
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
PublicKeyType::RSA(_) => KeyType::RSA,
PublicKeyType::DSA(_) => KeyType::DSA,
PublicKeyType::ECDSA(_) => KeyType::ECDSA,
PublicKeyType::ED25519(_) => KeyType::ED25519,
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Serialize the public key as OpenSSH format
pub fn serialize(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Serialize the public key as PEM format
///
/// # Representation
/// - Begin with `-----BEGIN PUBLIC KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PUBLIC KEY-----` for rsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ed25519 key.
///
/// # Note
/// This format cannot store the comment!
pub fn serialize_pem(&self) -> OsshResult<String> {
stringify_pem_pubkey(self)
}
fn inner_key(&self) -> &dyn PublicParts {
match &self.key {
PublicKeyType::RSA(key) => key,
PublicKeyType::DSA(key) => key,
PublicKeyType::ECDSA(key) => key,
PublicKeyType::ED25519(key) => key,
}
}
}
impl Key for PublicKey {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for PublicKey {
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key().blob()
}
fn fingerprint(&self, hash: FingerprintHash) -> Result<Vec<u8>, Error> {
self.inner_key().fingerprint(hash)
}
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key().verify(data, sig)
}
}
impl fmt::Display for PublicKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.serialize().unwrap())
}
}
impl From<rsa::RsaPublicKey> for PublicKey {
fn from(inner: rsa::RsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaPublicKey> for PublicKey {
fn from(inner: dsa::DsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaPublicKey> for PublicKey {
fn from(inner: ecdsa::EcDsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519PublicKey> for PublicKey {
fn from(inner: ed25519::Ed25519PublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ED25519(inner),
comment: String::new(),
}
}
}
/// General key pair type
///
/// This is a type to make it easy to store different types of key pair in the container.
/// Each can contain one of the types supported in this crate.
///
/// Key pair is the so-called "private key" which contains both public and private parts of an asymmetry key.
pub struct KeyPair {
pub(crate) key: KeyPairType,
comment: String,
}
impl KeyPair {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Private>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaKeyPair::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaKeyPair::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaKeyPair::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519KeyPair::from_ossl_ed25519(&pkey.raw_private_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
pub(crate) fn ossl_pkey(&self) -> OsshResult<PKey<Private>> {
match &self.key {
KeyPairType::RSA(key) => Ok(PKey::from_rsa(key.ossl_rsa().to_owned())?),
KeyPairType::DSA(key) => Ok(PKey::from_dsa(key.ossl_dsa().to_owned())?),
KeyPairType::ECDSA(key) => Ok(PKey::from_ec_key(key.ossl_ec().to_owned())?),
KeyPairType::ED25519(key) => Ok(key.ossl_pkey()?),
}
}
/// Parse a keypair from supporting file types
///
/// The passphrase is required if the keypair is encrypted.
///
/// # OpenSSL PEM
/// - Begin with `-----BEGIN DSA PRIVATE KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PRIVATE KEY-----` for rsa key.
/// - Begin with `-----BEGIN EC PRIVATE KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PRIVATE KEY-----` for Ed25519 key.
///
/// # PKCS#8 Format
/// - Begin with `-----BEGIN PRIVATE KEY-----`
///
/// # Openssh
/// - Begin with `-----BEGIN OPENSSH PRIVATE KEY-----`
///
/// This is the new format which is supported since OpenSSH 6.5, and it became the default format in OpenSSH 7.8.
/// The Ed25519 key can only be stored in this type.
pub fn from_keystr(pem: &str, passphrase: Option<&str>) -> OsshResult<Self> {
parse_keystr(pem.as_bytes(), passphrase)
}
/// Generate a key of the specified type and size
///
/// # Key Size
/// There are some limitations to the key size:
/// - RSA: the size should `>= 1024` and `<= 16384` bits.
/// - DSA: the size should be `1024` bits.
/// - EcDSA: the size should be `256`, `384`, or `521` bits.
/// - Ed25519: the size should be `256` bits.
///
/// If the key size parameter is zero, then it will use the default size to generate the key
/// - RSA: `2048` bits
/// - DSA: `1024` bits
/// - EcDSA: `256` bits
/// - Ed25519: `256` bits
pub fn generate(keytype: KeyType, bits: usize) -> OsshResult<Self> {
Ok(match keytype {
KeyType::RSA => rsa::RsaKeyPair::generate(bits)?.into(),
KeyType::DSA => dsa::DsaKeyPair::generate(bits)?.into(),
KeyType::ECDSA => ecdsa::EcDsaKeyPair::generate(bits)?.into(),
KeyType::ED25519 => ed25519::Ed25519KeyPair::generate(bits)?.into(),
})
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
KeyPairType::RSA(_) => KeyType::RSA,
KeyPairType::DSA(_) => KeyType::DSA,
KeyPairType::ECDSA(_) => KeyType::ECDSA,
KeyPairType::ED25519(_) => KeyType::ED25519,
}
}
/// Serialize the keypair to the OpenSSL PEM format
///
/// If the passphrase is given (set to `Some(...)`), then the generated PEM key will be encrypted.
pub fn serialize_pem(&self, passphrase: Option<&str>) -> OsshResult<String> {
stringify_pem_privkey(self, passphrase)
}
/// Serialize the keypair to the OpenSSL PKCS#8 PEM format
///
/// If the passphrase is given (set to `Some(...)`), then the generated PKCS#8 key will be encrypted.
pub fn serialize_pkcs8(&self, passphrase: Option<&str>) -> OsshResult<String> {
serialize_pkcs8_privkey(self, passphrase)
}
/// Serialize the keypair to the OpenSSH private key format
///
/// If the passphrase is given (set to `Some(...)`) and cipher is not null,
/// then the generated private key will be encrypted.
pub fn serialize_openssh(
&self,
passphrase: Option<&str>,
cipher: Cipher,
) -> OsshResult<String> {
if let Some(passphrase) = passphrase {
Ok(serialize_ossh_privkey(self, passphrase, cipher, 0)?)
} else {
Ok(serialize_ossh_privkey(self, "", Cipher::Null, 0)?)
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Get the OpenSSH public key of the public parts
pub fn serialize_publickey(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Clone the public parts of the key pair
pub fn clone_public_key(&self) -> Result<PublicKey, Error> {
let key = match &self.key {
KeyPairType::RSA(key) => PublicKeyType::RSA(key.clone_public_key()?),
KeyPairType::DSA(key) => PublicKeyType::DSA(key.clone_public_key()?),
KeyPairType::ECDSA(key) => PublicKeyType::ECDSA(key.clone_public_key()?),
KeyPairType::ED25519(key) => PublicKeyType::ED25519(key.clone_public_key()?),
};
Ok(PublicKey {
key,
comment: self.comment.clone(),
})
}
fn inner_key(&self) -> &dyn PrivateParts {
match &self.key {
KeyPairType::RSA(key) => key,
KeyPairType::DSA(key) => key,
KeyPairType::ECDSA(key) => key,
KeyPairType::ED25519(key) => key,
}
}
fn inner_key_pub(&self) -> &dyn PublicParts {
match &self.key {
KeyPairType::RSA(key) => key,
KeyPairType::DSA(key) => key,
KeyPairType::ECDSA(key) => key,
KeyPairType::ED25519(key) => key,
}
}
}
impl Key for KeyPair {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for KeyPair {
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key_pub().verify(data, sig)
}
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key_pub().blob()
}
}
impl PrivateParts for KeyPair {
fn sign(&self, data: &[u8]) -> Result<Vec<u8>, Error> {
self.inner_key().sign(data)
}
}
impl From<rsa::RsaKeyPair> for KeyPair {
fn from(inner: rsa::RsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaKeyPair> for KeyPair {
fn from(inner: dsa::DsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaKeyPair> for KeyPair {
fn from(inner: ecdsa::EcDsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519KeyPair> for KeyPair {
fn from(inner: ed25519::Ed25519KeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::ED25519(inner),
comment: String::new(),
}
}
}
/// The basic trait of a key
pub trait Key {
/// The size in bits of the key
fn size(&self) -> usize;
/// The key name of the key
fn keyname(&self) -> &'static str;
/// The short key name of the key
fn short_keyname(&self) -> &'static str;
}
/// A trait for operations of a public key
pub trait PublicParts: Key {
/// Verify the data with a detached signature, returning true if the signature is not malformed
fn verify(&self, data: &[u8], sig: &[u8]) -> OsshResult<bool>;
/// Return the binary representation of the public key
fn blob(&self) -> OsshResult<Vec<u8>>;
/// Hash the blob of the public key to generate the fingerprint
fn fingerprint(&self, hash: FingerprintHash) -> OsshResult<Vec<u8>> {
let b = self.blob()?;
Ok(hash.hash(&b))
}
// Rewritten from the OpenSSH project. OpenBSD notice is included below.
/* $OpenBSD: sshkey.c,v 1.120 2022/01/06 22:05:42 djm Exp $ */
/*
* Copyright (c) 2000, 2001 Markus Friedl. All rights reserved.
* Copyright (c) 2008 Alexander von Gernler. All rights reserved.
* Copyright (c) 2010,2011 Damien Miller. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/// Draw an ASCII-art picture from the fingerprint, also known as "randomart"
fn fingerprint_randomart(&self, hash: FingerprintHash) -> OsshResult<String> {
const FLDBASE: usize = 8;
const FLDSIZE_Y: usize = FLDBASE + 1;
const FLDSIZE_X: usize = FLDBASE * 2 + 1;
// Chars to be used after each other every time the worm intersects with itself. Matter of
// taste.
const AUGMENTATION_CHARS: &[u8] = b" .o+=*BOX@%&#/^SE";
let len = AUGMENTATION_CHARS.len() - 1;
let mut art = String::with_capacity((FLDSIZE_X + 3) * (FLDSIZE_Y + 2));
// Initialize field.
let mut field = [[0; FLDSIZE_X]; FLDSIZE_Y];
let mut x = FLDSIZE_X / 2;
let mut y = FLDSIZE_Y / 2;
// Process raw key.
let dgst_raw = self.fingerprint(hash)?;
for mut input in dgst_raw.iter().copied() {
// Each byte conveys four 2-bit move commands.
for _ in 0..4 {
// Evaluate 2 bit, rest is shifted later.
x = if (input & 0x1) != 0 {
x + 1
} else {
x.saturating_sub(1)
};
y = if (input & 0x2) != 0 {
y + 1
} else {
y.saturating_sub(1)
};
// Assure we are still in bounds.
x = x.min(FLDSIZE_X - 1);
y = y.min(FLDSIZE_Y - 1);
// Augment the field.
if field[y][x] < len as u8 - 2 {
field[y][x] += 1;
}
input >>= 2;
}
}
// Mark starting point and end point.
field[FLDSIZE_Y / 2][FLDSIZE_X / 2] = len as u8 - 1;
field[y][x] = len as u8;
// Assemble title.
let title = format!("[{} {}]", self.short_keyname(), self.size());
// If [type size] won't fit, then try [type]; fits "[ED25519-CERT]".
let title = if title.chars().count() > FLDSIZE_X {
format!("[{}]", self.short_keyname())
} else {
title
};
// Assemble hash ID.
let hash = format!("[{}]", hash.name());
// Output upper border.
art += &format!("+{:-^width$}+\n", title, width = FLDSIZE_X);
// Output content.
#[allow(clippy::needless_range_loop)]
for y in 0..FLDSIZE_Y {
art.push('|');
art.extend(
field[y]
.iter()
.map(|&c| AUGMENTATION_CHARS[c as usize] as char),
);
art += "|\n";
}
// Output lower border.
art += &format!("+{:-^width$}+", hash, width = FLDSIZE_X);
Ok(art)
}
}
/// A trait for operations of a private key
pub trait PrivateParts: Key {
/// Sign the data with the key, returning the "detached" signature
fn sign(&self, data: &[u8]) -> OsshResult<Vec<u8>>;
}
// This test is used to print the struct size of [`PublicKey`] and [`KeyPair`].
// It is intented to be run manually, and the result is read by the developers.
#[test]
#[ignore]
fn test_size() {
use std::mem::size_of;
eprintln!("PublicKey: {} bytes", size_of::<PublicKey>());
eprintln!("\tRSA: {} bytes", size_of::<rsa::RsaPublicKey>());
eprintln!("\tDSA: {} bytes", size_of::<dsa::DsaPublicKey>());
eprintln!("\tECDSA: {} bytes", size_of::<ecdsa::EcDsaPublicKey>());
eprintln!(
"\tED25519: {} bytes",
size_of::<ed25519::Ed25519PublicKey>()
);
eprintln!("KeyPair: {} bytes", size_of::<KeyPair>());
eprintln!("\tRSA: {} bytes", size_of::<rsa::RsaKeyPair>());
eprintln!("\tDSA: {} bytes", size_of::<dsa::DsaKeyPair>());
eprintln!("\tECDSA: {} bytes", size_of::<ecdsa::EcDsaKeyPair>());
eprintln!("\tED25519: {} bytes", size_of::<ed25519::Ed25519KeyPair>());
} | SHA256, | random_line_split |
mod.rs | use crate::cipher::Cipher;
use crate::error::*;
use crate::format::ossh_privkey::*;
use crate::format::ossh_pubkey::*;
use crate::format::parse_keystr;
use crate::format::pem::*;
use crate::format::pkcs8::*;
use digest::{Digest, FixedOutputReset};
use md5::Md5;
use openssl::pkey::{Id, PKey, PKeyRef, Private, Public};
use sha2::{Sha256, Sha512};
use std::fmt;
/// DSA key type
pub mod dsa;
/// EcDSA key type
pub mod ecdsa;
/// Ed25519 key type
pub mod ed25519;
/// RSA key type
pub mod rsa;
/// The name of the MD5 hashing algorithm returned by [`FingerprintHash::name()`](enum.FingerprintHash.html#method.name)
pub const MD5_NAME: &str = "MD5";
/// The name of the sha2-256 algorithm returned by [`FingerprintHash::name()`](enum.FingerprintHash.html#method.name)
pub const SHA256_NAME: &str = "SHA256";
/// The name of the sha2-512 algorithm returned by [`FingerprintHash::name()`](enum.FingerprintHash.html#method.name)
pub const SHA512_NAME: &str = "SHA512";
/// An enum representing the hash function used to generate fingerprint
///
/// Used with [`PublicPart::fingerprint()`](trait.PublicPart.html#method.fingerprint) and
/// [`PublicPart::fingerprint_randomart()`](trait.PublicPart.html#method.fingerprint) to generate
/// different types fingerprint and randomarts.
///
/// # Hash Algorithm
/// MD5: This is the default fingerprint type in older versions of openssh.
///
/// SHA2-256: Since OpenSSH 6.8, this became the default option of fingerprint.
///
/// SHA2-512: Although not being documented, it can also be used.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum FingerprintHash {
MD5,
SHA256,
SHA512,
}
impl FingerprintHash {
fn hash(self, data: &[u8]) -> Vec<u8> {
fn digest_hash<D>(hasher: &mut D, data: &[u8]) -> Vec<u8>
where
D: Digest + FixedOutputReset,
{
// Fix error[E0034]: multiple applicable items in scope
Digest::update(hasher, data);
hasher.finalize_reset().to_vec()
}
match self {
FingerprintHash::MD5 => digest_hash(&mut Md5::default(), data),
FingerprintHash::SHA256 => digest_hash(&mut Sha256::default(), data),
FingerprintHash::SHA512 => digest_hash(&mut Sha512::default(), data),
}
}
fn name(self) -> &'static str {
match self {
FingerprintHash::MD5 => MD5_NAME,
FingerprintHash::SHA256 => SHA256_NAME,
FingerprintHash::SHA512 => SHA512_NAME,
}
}
}
/// An enum representing the type of key being stored
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum KeyType {
RSA,
DSA,
ECDSA,
ED25519,
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, PartialEq)]
pub(crate) enum PublicKeyType {
RSA(rsa::RsaPublicKey),
DSA(dsa::DsaPublicKey),
ECDSA(ecdsa::EcDsaPublicKey),
ED25519(ed25519::Ed25519PublicKey),
}
#[allow(clippy::upper_case_acronyms)]
pub(crate) enum KeyPairType {
RSA(rsa::RsaKeyPair),
DSA(dsa::DsaKeyPair),
ECDSA(ecdsa::EcDsaKeyPair),
ED25519(ed25519::Ed25519KeyPair),
}
/// General public key type
///
/// This is a type to make it easy to store different types of public key in the container.
/// Each can contain one of the types supported in this crate.
///
/// Public key is usually stored in the `.pub` file when generating the key.
pub struct PublicKey {
pub(crate) key: PublicKeyType,
comment: String,
}
impl PublicKey {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Public>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaPublicKey::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaPublicKey::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaPublicKey::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519PublicKey::from_ossl_ed25519(&pkey.raw_public_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
/// Parse the openssh/PEM format public key file
pub fn from_keystr(keystr: &str) -> OsshResult<Self> {
if keystr.trim().starts_with("-----BEGIN") {
// PEM format
Ok(parse_pem_pubkey(keystr.as_bytes())?)
} else {
// openssh format
Ok(parse_ossh_pubkey(keystr)?)
}
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
PublicKeyType::RSA(_) => KeyType::RSA,
PublicKeyType::DSA(_) => KeyType::DSA,
PublicKeyType::ECDSA(_) => KeyType::ECDSA,
PublicKeyType::ED25519(_) => KeyType::ED25519,
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Serialize the public key as OpenSSH format
pub fn serialize(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Serialize the public key as PEM format
///
/// # Representation
/// - Begin with `-----BEGIN PUBLIC KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PUBLIC KEY-----` for rsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ed25519 key.
///
/// # Note
/// This format cannot store the comment!
pub fn serialize_pem(&self) -> OsshResult<String> {
stringify_pem_pubkey(self)
}
fn inner_key(&self) -> &dyn PublicParts {
match &self.key {
PublicKeyType::RSA(key) => key,
PublicKeyType::DSA(key) => key,
PublicKeyType::ECDSA(key) => key,
PublicKeyType::ED25519(key) => key,
}
}
}
impl Key for PublicKey {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for PublicKey {
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key().blob()
}
fn fingerprint(&self, hash: FingerprintHash) -> Result<Vec<u8>, Error> {
self.inner_key().fingerprint(hash)
}
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key().verify(data, sig)
}
}
impl fmt::Display for PublicKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.serialize().unwrap())
}
}
impl From<rsa::RsaPublicKey> for PublicKey {
fn | (inner: rsa::RsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaPublicKey> for PublicKey {
fn from(inner: dsa::DsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaPublicKey> for PublicKey {
fn from(inner: ecdsa::EcDsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519PublicKey> for PublicKey {
fn from(inner: ed25519::Ed25519PublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ED25519(inner),
comment: String::new(),
}
}
}
/// General key pair type
///
/// This is a type to make it easy to store different types of key pair in the container.
/// Each can contain one of the types supported in this crate.
///
/// Key pair is the so-called "private key" which contains both public and private parts of an asymmetry key.
pub struct KeyPair {
pub(crate) key: KeyPairType,
comment: String,
}
impl KeyPair {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Private>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaKeyPair::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaKeyPair::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaKeyPair::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519KeyPair::from_ossl_ed25519(&pkey.raw_private_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
pub(crate) fn ossl_pkey(&self) -> OsshResult<PKey<Private>> {
match &self.key {
KeyPairType::RSA(key) => Ok(PKey::from_rsa(key.ossl_rsa().to_owned())?),
KeyPairType::DSA(key) => Ok(PKey::from_dsa(key.ossl_dsa().to_owned())?),
KeyPairType::ECDSA(key) => Ok(PKey::from_ec_key(key.ossl_ec().to_owned())?),
KeyPairType::ED25519(key) => Ok(key.ossl_pkey()?),
}
}
/// Parse a keypair from supporting file types
///
/// The passphrase is required if the keypair is encrypted.
///
/// # OpenSSL PEM
/// - Begin with `-----BEGIN DSA PRIVATE KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PRIVATE KEY-----` for rsa key.
/// - Begin with `-----BEGIN EC PRIVATE KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PRIVATE KEY-----` for Ed25519 key.
///
/// # PKCS#8 Format
/// - Begin with `-----BEGIN PRIVATE KEY-----`
///
/// # Openssh
/// - Begin with `-----BEGIN OPENSSH PRIVATE KEY-----`
///
/// This is the new format which is supported since OpenSSH 6.5, and it became the default format in OpenSSH 7.8.
/// The Ed25519 key can only be stored in this type.
pub fn from_keystr(pem: &str, passphrase: Option<&str>) -> OsshResult<Self> {
parse_keystr(pem.as_bytes(), passphrase)
}
/// Generate a key of the specified type and size
///
/// # Key Size
/// There are some limitations to the key size:
/// - RSA: the size should `>= 1024` and `<= 16384` bits.
/// - DSA: the size should be `1024` bits.
/// - EcDSA: the size should be `256`, `384`, or `521` bits.
/// - Ed25519: the size should be `256` bits.
///
/// If the key size parameter is zero, then it will use the default size to generate the key
/// - RSA: `2048` bits
/// - DSA: `1024` bits
/// - EcDSA: `256` bits
/// - Ed25519: `256` bits
pub fn generate(keytype: KeyType, bits: usize) -> OsshResult<Self> {
Ok(match keytype {
KeyType::RSA => rsa::RsaKeyPair::generate(bits)?.into(),
KeyType::DSA => dsa::DsaKeyPair::generate(bits)?.into(),
KeyType::ECDSA => ecdsa::EcDsaKeyPair::generate(bits)?.into(),
KeyType::ED25519 => ed25519::Ed25519KeyPair::generate(bits)?.into(),
})
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
KeyPairType::RSA(_) => KeyType::RSA,
KeyPairType::DSA(_) => KeyType::DSA,
KeyPairType::ECDSA(_) => KeyType::ECDSA,
KeyPairType::ED25519(_) => KeyType::ED25519,
}
}
/// Serialize the keypair to the OpenSSL PEM format
///
/// If the passphrase is given (set to `Some(...)`), then the generated PEM key will be encrypted.
pub fn serialize_pem(&self, passphrase: Option<&str>) -> OsshResult<String> {
stringify_pem_privkey(self, passphrase)
}
/// Serialize the keypair to the OpenSSL PKCS#8 PEM format
///
/// If the passphrase is given (set to `Some(...)`), then the generated PKCS#8 key will be encrypted.
pub fn serialize_pkcs8(&self, passphrase: Option<&str>) -> OsshResult<String> {
serialize_pkcs8_privkey(self, passphrase)
}
/// Serialize the keypair to the OpenSSH private key format
///
/// If the passphrase is given (set to `Some(...)`) and cipher is not null,
/// then the generated private key will be encrypted.
pub fn serialize_openssh(
&self,
passphrase: Option<&str>,
cipher: Cipher,
) -> OsshResult<String> {
if let Some(passphrase) = passphrase {
Ok(serialize_ossh_privkey(self, passphrase, cipher, 0)?)
} else {
Ok(serialize_ossh_privkey(self, "", Cipher::Null, 0)?)
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Get the OpenSSH public key of the public parts
pub fn serialize_publickey(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Clone the public parts of the key pair
pub fn clone_public_key(&self) -> Result<PublicKey, Error> {
let key = match &self.key {
KeyPairType::RSA(key) => PublicKeyType::RSA(key.clone_public_key()?),
KeyPairType::DSA(key) => PublicKeyType::DSA(key.clone_public_key()?),
KeyPairType::ECDSA(key) => PublicKeyType::ECDSA(key.clone_public_key()?),
KeyPairType::ED25519(key) => PublicKeyType::ED25519(key.clone_public_key()?),
};
Ok(PublicKey {
key,
comment: self.comment.clone(),
})
}
fn inner_key(&self) -> &dyn PrivateParts {
match &self.key {
KeyPairType::RSA(key) => key,
KeyPairType::DSA(key) => key,
KeyPairType::ECDSA(key) => key,
KeyPairType::ED25519(key) => key,
}
}
fn inner_key_pub(&self) -> &dyn PublicParts {
match &self.key {
KeyPairType::RSA(key) => key,
KeyPairType::DSA(key) => key,
KeyPairType::ECDSA(key) => key,
KeyPairType::ED25519(key) => key,
}
}
}
impl Key for KeyPair {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for KeyPair {
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key_pub().verify(data, sig)
}
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key_pub().blob()
}
}
impl PrivateParts for KeyPair {
fn sign(&self, data: &[u8]) -> Result<Vec<u8>, Error> {
self.inner_key().sign(data)
}
}
impl From<rsa::RsaKeyPair> for KeyPair {
fn from(inner: rsa::RsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaKeyPair> for KeyPair {
fn from(inner: dsa::DsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaKeyPair> for KeyPair {
fn from(inner: ecdsa::EcDsaKeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519KeyPair> for KeyPair {
fn from(inner: ed25519::Ed25519KeyPair) -> KeyPair {
KeyPair {
key: KeyPairType::ED25519(inner),
comment: String::new(),
}
}
}
/// The basic trait of a key
pub trait Key {
/// The size in bits of the key
fn size(&self) -> usize;
/// The key name of the key
fn keyname(&self) -> &'static str;
/// The short key name of the key
fn short_keyname(&self) -> &'static str;
}
/// A trait for operations of a public key
pub trait PublicParts: Key {
/// Verify the data with a detached signature, returning true if the signature is not malformed
fn verify(&self, data: &[u8], sig: &[u8]) -> OsshResult<bool>;
/// Return the binary representation of the public key
fn blob(&self) -> OsshResult<Vec<u8>>;
/// Hash the blob of the public key to generate the fingerprint
fn fingerprint(&self, hash: FingerprintHash) -> OsshResult<Vec<u8>> {
let b = self.blob()?;
Ok(hash.hash(&b))
}
// Rewritten from the OpenSSH project. OpenBSD notice is included below.
/* $OpenBSD: sshkey.c,v 1.120 2022/01/06 22:05:42 djm Exp $ */
/*
* Copyright (c) 2000, 2001 Markus Friedl. All rights reserved.
* Copyright (c) 2008 Alexander von Gernler. All rights reserved.
* Copyright (c) 2010,2011 Damien Miller. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/// Draw an ASCII-art picture from the fingerprint, also known as "randomart"
fn fingerprint_randomart(&self, hash: FingerprintHash) -> OsshResult<String> {
const FLDBASE: usize = 8;
const FLDSIZE_Y: usize = FLDBASE + 1;
const FLDSIZE_X: usize = FLDBASE * 2 + 1;
// Chars to be used after each other every time the worm intersects with itself. Matter of
// taste.
const AUGMENTATION_CHARS: &[u8] = b" .o+=*BOX@%&#/^SE";
let len = AUGMENTATION_CHARS.len() - 1;
let mut art = String::with_capacity((FLDSIZE_X + 3) * (FLDSIZE_Y + 2));
// Initialize field.
let mut field = [[0; FLDSIZE_X]; FLDSIZE_Y];
let mut x = FLDSIZE_X / 2;
let mut y = FLDSIZE_Y / 2;
// Process raw key.
let dgst_raw = self.fingerprint(hash)?;
for mut input in dgst_raw.iter().copied() {
// Each byte conveys four 2-bit move commands.
for _ in 0..4 {
// Evaluate 2 bit, rest is shifted later.
x = if (input & 0x1) != 0 {
x + 1
} else {
x.saturating_sub(1)
};
y = if (input & 0x2) != 0 {
y + 1
} else {
y.saturating_sub(1)
};
// Assure we are still in bounds.
x = x.min(FLDSIZE_X - 1);
y = y.min(FLDSIZE_Y - 1);
// Augment the field.
if field[y][x] < len as u8 - 2 {
field[y][x] += 1;
}
input >>= 2;
}
}
// Mark starting point and end point.
field[FLDSIZE_Y / 2][FLDSIZE_X / 2] = len as u8 - 1;
field[y][x] = len as u8;
// Assemble title.
let title = format!("[{} {}]", self.short_keyname(), self.size());
// If [type size] won't fit, then try [type]; fits "[ED25519-CERT]".
let title = if title.chars().count() > FLDSIZE_X {
format!("[{}]", self.short_keyname())
} else {
title
};
// Assemble hash ID.
let hash = format!("[{}]", hash.name());
// Output upper border.
art += &format!("+{:-^width$}+\n", title, width = FLDSIZE_X);
// Output content.
#[allow(clippy::needless_range_loop)]
for y in 0..FLDSIZE_Y {
art.push('|');
art.extend(
field[y]
.iter()
.map(|&c| AUGMENTATION_CHARS[c as usize] as char),
);
art += "|\n";
}
// Output lower border.
art += &format!("+{:-^width$}+", hash, width = FLDSIZE_X);
Ok(art)
}
}
/// A trait for operations of a private key
pub trait PrivateParts: Key {
/// Sign the data with the key, returning the "detached" signature
fn sign(&self, data: &[u8]) -> OsshResult<Vec<u8>>;
}
// This test is used to print the struct size of [`PublicKey`] and [`KeyPair`].
// It is intented to be run manually, and the result is read by the developers.
#[test]
#[ignore]
fn test_size() {
use std::mem::size_of;
eprintln!("PublicKey: {} bytes", size_of::<PublicKey>());
eprintln!("\tRSA: {} bytes", size_of::<rsa::RsaPublicKey>());
eprintln!("\tDSA: {} bytes", size_of::<dsa::DsaPublicKey>());
eprintln!("\tECDSA: {} bytes", size_of::<ecdsa::EcDsaPublicKey>());
eprintln!(
"\tED25519: {} bytes",
size_of::<ed25519::Ed25519PublicKey>()
);
eprintln!("KeyPair: {} bytes", size_of::<KeyPair>());
eprintln!("\tRSA: {} bytes", size_of::<rsa::RsaKeyPair>());
eprintln!("\tDSA: {} bytes", size_of::<dsa::DsaKeyPair>());
eprintln!("\tECDSA: {} bytes", size_of::<ecdsa::EcDsaKeyPair>());
eprintln!("\tED25519: {} bytes", size_of::<ed25519::Ed25519KeyPair>());
}
| from | identifier_name |
mod.rs | //! Text handling.
mod deco;
mod misc;
mod quotes;
mod raw;
mod shaping;
mod shift;
pub use self::deco::*;
pub use self::misc::*;
pub use self::quotes::*;
pub use self::raw::*;
pub use self::shaping::*;
pub use self::shift::*;
use rustybuzz::Tag;
use ttf_parser::Rect;
use typst::font::{Font, FontStretch, FontStyle, FontWeight, VerticalFontMetric};
use crate::layout::ParElem;
use crate::prelude::*;
/// Hook up all text definitions.
pub(super) fn define(global: &mut Scope) {
global.define("text", TextElem::func());
global.define("linebreak", LinebreakElem::func());
global.define("smartquote", SmartQuoteElem::func());
global.define("strong", StrongElem::func());
global.define("emph", EmphElem::func());
global.define("lower", lower_func());
global.define("upper", upper_func());
global.define("smallcaps", smallcaps_func());
global.define("sub", SubElem::func());
global.define("super", SuperElem::func());
global.define("underline", UnderlineElem::func());
global.define("strike", StrikeElem::func());
global.define("overline", OverlineElem::func());
global.define("raw", RawElem::func());
global.define("lorem", lorem_func());
}
/// Customizes the look and layout of text in a variety of ways.
///
/// This function is used frequently, both with set rules and directly. While
/// the set rule is often the simpler choice, calling the `text` function
/// directly can be useful when passing text as an argument to another function.
///
/// ## Example { #example }
/// ```example
/// #set text(18pt)
/// With a set rule.
///
/// #emph(text(blue)[
/// With a function call.
/// ])
/// ```
///
/// Display: Text
/// Category: text
#[element(Construct, PlainText)]
pub struct TextElem {
/// A prioritized sequence of font families.
///
/// When processing text, Typst tries all specified font families in order
/// until it finds a font that has the necessary glyphs. In the example
/// below, the font `Inria Serif` is preferred, but since it does not
/// contain Arabic glyphs, the arabic text uses `Noto Sans Arabic` instead.
///
/// ```example
/// #set text(font: (
/// "Inria Serif",
/// "Noto Sans Arabic",
/// ))
///
/// This is Latin. \
/// هذا عربي.
///
/// ```
#[default(FontList(vec![FontFamily::new("Linux Libertine")]))]
pub font: FontList,
/// Whether to allow last resort font fallback when the primary font list
/// contains no match. This lets Typst search through all available fonts
/// for the most similar one that has the necessary glyphs.
///
/// _Note:_ Currently, there are no warnings when fallback is disabled and
/// no glyphs are found. Instead, your text shows up in the form of "tofus":
/// Small boxes that indicate the lack of an appropriate glyph. In the
/// future, you will be able to instruct Typst to issue warnings so you know
/// something is up.
///
/// ```example
/// #set text(font: "Inria Serif")
/// هذا عربي
///
/// #set text(fallback: false)
/// هذا عربي
/// ```
#[default(true)]
pub fallback: bool,
/// The desired font style.
///
/// When an italic style is requested and only an oblique one is available,
/// it is used. Similarly, the other way around, an italic style can stand
/// in for an oblique one. When neither an italic nor an oblique style is
/// available, Typst selects the normal style. Since most fonts are only
/// available either in an italic or oblique style, the difference between
/// italic and oblique style is rarely observable.
///
/// If you want to emphasize your text, you should do so using the
/// [emph]($func/emph) function instead. This makes it easy to adapt the
/// style later if you change your mind about how to signify the emphasis.
///
/// ```example
/// #text(font: "Linux Libertine", style: "italic")[Italic]
/// #text(font: "DejaVu Sans", style: "oblique")[Oblique]
/// ```
pub style: FontStyle,
/// The desired thickness of the font's glyphs. Accepts an integer between
/// `{100}` and `{900}` or one of the predefined weight names. When the
/// desired weight is not available, Typst selects the font from the family
/// that is closest in weight.
///
/// If you want to strongly emphasize your text, you should do so using the
/// [strong]($func/strong) function instead. This makes it easy to adapt the
/// style later if you change your mind about how to signify the strong
/// emphasis.
///
/// ```example
/// #set text(font: "IBM Plex Sans")
///
/// #text(weight: "light")[Light] \
/// #text(weight: "regular")[Regular] \
/// #text(weight: "medium")[Medium] \
/// #text(weight: 500)[Medium] \
/// #text(weight: "bold")[Bold]
/// ```
pub weight: FontWeight,
/// The desired width of the glyphs. Accepts a ratio between `{50%}` and
/// `{200%}`. When the desired width is not available, Typst selects the
/// font from the family that is closest in stretch. This will only stretch
/// the text if a condensed or expanded version of the font is available.
///
/// If you want to adjust the amount of space between characters instead of
/// stretching the glyphs itself, use the [`tracking`]($func/text.tracking)
/// property instead.
///
/// ```example
/// #text(stretch: 75%)[Condensed] \
/// #text(stretch: 100%)[Normal]
/// ```
pub stretch: FontStretch,
/// The size of the glyphs. This value forms the basis of the `em` unit:
/// `{1em}` is equivalent to the font size.
///
/// You can also give the font size itself in `em` units. Then, it is
/// relative to the previous font size.
///
/// ```example
/// #set text(size: 20pt)
/// very #text(1.5em)[big] text
/// ```
#[parse(args.named_or_find("size")?)]
#[fold]
#[default(Abs::pt(11.0))]
pub size: TextSize,
/// The glyph fill color.
///
/// ```example
/// #set text(fill: red)
/// This text is red.
/// ```
#[parse(args.named_or_find("fill")?)]
#[default(Color::BLACK.into())]
pub fill: Paint,
/// The amount of space that should be added between characters.
///
/// ```example
/// #set text(tracking: 1.5pt)
/// Distant text.
/// ```
#[resolve]
pub tracking: Length,
/// The amount of space between words.
///
/// Can be given as an absolute length, but also relative to the width of
/// the space character in the font.
///
/// If you want to adjust the amount of space between characters rather than
/// words, use the [`tracking`]($func/text.tracking) property instead.
///
/// ```example
/// #set text(spacing: 200%)
/// Text with distant words.
/// ```
#[resolve]
#[default(Rel::one())]
pub spacing: Rel<Length>,
/// An amount to shift the text baseline by.
///
/// ```example
/// A #text(baseline: 3pt)[lowered]
/// word.
/// ```
#[resolve]
pub baseline: Length,
/// Whether certain glyphs can hang over into the margin in justified text.
/// This can make justification visually more pleasing.
///
/// ```example
/// #set par(justify: true)
/// This justified text has a hyphen in
/// the paragraph's first line. Hanging
/// the hyphen slightly into the margin
/// results in a clearer paragraph edge.
///
/// #set text(overhang: false)
/// This justified text has a hyphen in
/// the paragraph's first line. Hanging
/// the hyphen slightly into the margin
/// results in a clearer paragraph edge.
/// ```
#[default(true)]
pub overhang: bool,
/// The top end of the conceptual frame around the text used for layout and
/// positioning. This affects the size of containers that hold text.
///
/// ```example
/// #set rect(inset: 0pt)
/// #set text(size: 20pt)
///
/// #set text(top-edge: "ascender")
/// #rect(fill: aqua)[Typst]
///
/// #set text(top-edge: "cap-height")
/// #rect(fill: aqua)[Typst]
/// ```
#[default(TopEdge::Metric(TopEdgeMetric::CapHeight))]
pub top_edge: TopEdge,
/// The bottom end of the conceptual frame around the text used for layout
/// and positioning. This affects the size of containers that hold text.
///
/// ```example
/// #set rect(inset: 0pt)
/// #set text(size: 20pt)
///
/// #set text(bottom-edge: "baseline")
/// #rect(fill: aqua)[Typst]
///
/// #set text(bottom-edge: "descender")
/// #rect(fill: aqua)[Typst]
/// ```
#[default(BottomEdge::Metric(BottomEdgeMetric::Baseline))]
pub bottom_edge: BottomEdge,
/// An [ISO 639-1/2/3 language code.](https://en.wikipedia.org/wiki/ISO_639)
///
/// Setting the correct language affects various parts of Typst:
///
/// - The text processing pipeline can make more informed choices.
/// - Hyphenation will use the correct patterns for the language.
/// - [Smart quotes]($func/smartquote) turns into the correct quotes for the
/// language.
/// - And all other things which are language-aware.
///
/// ```example
/// #set text(lang: "de")
/// #outline()
///
/// = Einleitung
/// In diesem Dokument, ...
/// ```
#[default(Lang::ENGLISH)]
pub lang: Lang,
/// An [ISO 3166-1 alpha-2 region code.](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
///
/// This lets the text processing pipeline make more informed choices.
pub region: Option<Region>,
/// The OpenType writing script.
///
/// The combination of `{lang}` and `{script}` determine how font features,
/// such as glyph substitution, are implemented. Frequently the value is a
/// modified (all-lowercase) ISO 15924 script identifier, and the `math`
/// writing script is used for features appropriate for mathematical
/// symbols.
///
/// When set to `{auto}`, the default and recommended setting, an
/// appropriate script is chosen for each block of characters sharing a
/// common Unicode script property.
///
/// ```example
/// #set text(
/// font: "Linux Libertine",
/// size: 20pt,
/// )
///
/// #let scedilla = [Ş]
/// #scedilla // S with a cedilla
///
/// #set text(lang: "ro", script: "latn")
/// #scedilla // S with a subscript comma
///
/// #set text(lang: "ro", script: "grek")
/// #scedilla // S with a cedilla
/// ```
pub script: Smart<WritingScript>,
/// The dominant direction for text and inline objects. Possible values are:
///
/// - `{auto}`: Automatically infer the direction from the `lang` property.
/// - `{ltr}`: Layout text from left to right.
/// - `{rtl}`: Layout text from right to left.
///
/// When writing in right-to-left scripts like Arabic or Hebrew, you should
/// set the [text language]($func/text.lang) or direction. While individual
/// runs of text are automatically layouted in the correct direction,
/// setting the dominant direction gives the bidirectional reordering
/// algorithm the necessary information to correctly place punctuation and
/// inline objects. Furthermore, setting the direction affects the alignment
/// values `start` and `end`, which are equivalent to `left` and `right` in
/// `ltr` text and the other way around in `rtl` text.
///
/// If you set this to `rtl` and experience bugs or in some way bad looking
/// output, please do get in touch with us through the
/// [contact form](https://typst.app/contact) or our
/// [Discord server]($community/#discord)!
///
/// ```example
/// #set text(dir: rtl)
/// هذا عربي.
/// ```
#[resolve]
pub dir: TextDir,
/// Whether to hyphenate text to improve line breaking. When `{auto}`, text
/// will be hyphenated if and only if justification is enabled.
///
/// Setting the [text language]($func/text.lang) ensures that the correct
/// hyphenation patterns are used.
///
/// ```example
/// #set page(width: 200pt)
///
/// #set par(justify: true)
/// This text illustrates how
/// enabling hyphenation can
/// improve justification.
///
/// #set text(hyphenate: false)
/// This text illustrates how
/// enabling hyphenation can
/// improve justification.
/// ```
#[resolve]
pub hyphenate: Hyphenate,
/// Whether to apply kerning.
///
/// When enabled, specific letter pairings move closer together or further
/// apart for a more visually pleasing result. The example below
/// demonstrates how decreasing the gap between the "T" and "o" results in a
/// more natural look. Setting this to `{false}` disables kerning by turning
/// off the OpenType `kern` font feature.
///
/// ```example
/// #set text(size: 25pt)
/// Totally
///
/// #set text(kerning: false)
/// Totally
/// ```
#[default(true)]
pub kerning: bool,
/// Whether to apply stylistic alternates.
///
/// Sometimes fonts contain alternative glyphs for the same codepoint.
/// Setting this to `{true}` switches to these by enabling the OpenType
/// `salt` font feature.
///
/// ```example
/// #set text(
/// font: "IBM Plex Sans",
/// size: 20pt,
/// )
///
/// 0, a, g, ß
///
/// #set text(alternates: true)
/// 0, a, g, ß
/// ```
#[default(false)]
pub alternates: bool,
/// Which stylistic set to apply. Font designers can categorize alternative
/// glyphs forms into stylistic sets. As this value is highly font-specific,
/// you need to consult your font to know which sets are available. When set
/// to an integer between `{1}` and `{20}`, enables the corresponding
/// OpenType font feature from `ss01`, ..., `ss20`.
pub stylistic_set: Option<StylisticSet>,
/// Whether standard ligatures are active.
///
/// Certain letter combinations like "fi" are often displayed as a single
/// merged glyph called a _ligature._ Setting this to `{false}` disables
/// these ligatures by turning off the OpenType `liga` and `clig` font
/// features.
///
/// ```example
/// #set text(size: 20pt)
/// A fine ligature.
///
/// #set text(ligatures: false)
/// A fine ligature.
/// ```
#[default(true)]
pub ligatures: bool,
/// Whether ligatures that should be used sparingly are active. Setting this
/// to `{true}` enables the OpenType `dlig` font feature.
#[default(false)]
pub discretionary_ligatures: bool,
/// Whether historical ligatures are active. Setting this to `{true}`
/// enables the OpenType `hlig` font feature.
#[default(false)]
pub historical_ligatures: bool,
/// Which kind of numbers / figures to select. When set to `{auto}`, the
/// default numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-type: "lining")
/// Number 9.
///
/// #set text(number-type: "old-style")
/// Number 9.
/// ```
pub number_type: Smart<NumberType>,
/// The width of numbers / figures. When set to `{auto}`, the default
/// numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-width: "proportional")
/// A 12 B 34. \
/// A 56 B 78.
///
/// #set text(number-width: "tabular")
/// A 12 B 34. \
/// A 56 B 78.
/// ```
pub number_width: Smart<NumberWidth>,
/// Whether to have a slash through the zero glyph. Setting this to `{true}`
/// enables the OpenType `zero` font feature.
///
/// ```example
/// 0, #text(slashed-zero: true)[0]
/// ```
#[default(false)]
pub slashed_zero: bool,
/// Whether to turn numbers into fractions. Setting this to `{true}`
/// enables the OpenType `frac` font feature.
///
/// It is not advisable to enable this property globally as it will mess
/// with all appearances of numbers after a slash (e.g., in URLs). Instead,
/// enable it locally when you want a fraction.
///
/// ```example
/// 1/2 \
/// #text(fractions: true)[1/2]
/// ```
#[default(false)]
pub fractions: bool,
/// Raw OpenType features to apply.
///
/// - If given an array of strings, sets the features identified by the
/// strings to `{1}`.
/// - If given a dictionary mapping to numbers, sets the features
/// identified by the keys to the values.
///
/// ```example
/// // Enable the `frac` feature manually.
/// #set text(features: ("frac",))
/// 1/2
/// ```
#[fold]
pub features: FontFeatures,
/// Content in which all text is styled according to the other arguments.
#[external]
#[required]
pub body: Content,
/// The text.
#[internal]
#[required]
pub text: EcoString,
/// A delta to apply on the font weight.
#[internal]
#[fold]
pub delta: Delta,
/// Whether the font style should be inverted.
#[internal]
#[fold]
#[default(false)]
pub emph: Toggle,
/// Decorative lines.
#[internal]
#[fold]
pub deco: Decoration,
/// A case transformation that should be applied to the text.
#[internal]
pub case: Option<Case>,
/// Whether small capital glyphs should be used. ("smcp")
#[internal]
#[default(false)]
pub smallcaps: bool,
}
impl TextElem {
/// Create a new packed text element.
pub fn packed(text: impl Into<EcoString>) -> Content {
Self::new(text.into()).pack()
}
}
impl Construct for TextElem {
fn construct(vm: &mut Vm, args: &mut Args) -> SourceResult<Content> {
// The text constructor is special: It doesn't create a text element.
// Instead, it leaves the passed argument structurally unchanged, but
// styles all text in it.
let styles = Self::set(vm, args)?;
let body = args.expect::<Content>("body")?;
Ok(body.styled_with_map(styles))
}
}
impl PlainText for TextElem {
fn plain_text(&self, text: &mut EcoString) {
text.push_str(&self.text());
}
}
/// A lowercased font family like "arial".
#[derive(Clone, Eq, PartialEq, Hash)]
pub struct FontFamily(EcoString);
impl FontFamily {
/// Create a named font family variant.
pub fn new(string: &str) -> Self {
Self(string.to_lowercase().into())
}
/// The lowercased family name.
pub fn as_str(&self) -> &str {
&self.0
}
}
impl Debug for FontFamily {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
cast! {
FontFamily,
self => self.0.into_value(),
string: EcoString => Self::new(&string),
}
/// Font family fallback list.
#[derive(Debug, Default, Clone, Eq, PartialEq, Hash)]
pub struct FontList(pub Vec<FontFamily>);
impl IntoIterator for FontList {
type IntoIter = std::vec::IntoIter<FontFamily>;
type Item = FontFamily;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
cast! {
FontList,
self => if self.0.len() == 1 {
self.0.into_iter().next().unwrap().0.into_value()
} else {
self.0.into_value()
},
family: FontFamily => Self(vec![family]),
values: Array => Self(values.into_iter().map(|v| v.cast()).collect::<StrResult<_>>()?),
}
/// The size of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TextSize(pub Length);
impl Fold for TextSize {
type Output = Abs;
fn fold(self, outer: Self::Output) -> Self::Output {
self.0.em.at(outer) + self.0.abs
}
}
cast! {
TextSize,
self => self.0.into_value(),
v: Length => Self(v),
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum TopEdge {
/// An edge specified via font metrics or bounding box.
Metric(TopEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl TopEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(TopEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
TopEdge::Metric(metric) => {
if let Ok(met | ngth) => length.resolve(styles),
}
}
}
cast! {
TopEdge,
self => match self {
Self::Metric(metric) => metric.into_value(),
Self::Length(length) => length.into_value(),
},
v: TopEdgeMetric => Self::Metric(v),
v: Length => Self::Length(v),
}
/// Metrics that describe the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum TopEdgeMetric {
/// The font's ascender, which typically exceeds the height of all glyphs.
Ascender,
/// The approximate height of uppercase letters.
CapHeight,
/// The approximate height of non-ascending lowercase letters.
XHeight,
/// The baseline on which the letters rest.
Baseline,
/// The top edge of the glyph's bounding box.
Bounds,
}
impl TryInto<VerticalFontMetric> for TopEdgeMetric {
type Error = ();
fn try_into(self) -> Result<VerticalFontMetric, Self::Error> {
match self {
Self::Ascender => Ok(VerticalFontMetric::Ascender),
Self::CapHeight => Ok(VerticalFontMetric::CapHeight),
Self::XHeight => Ok(VerticalFontMetric::XHeight),
Self::Baseline => Ok(VerticalFontMetric::Baseline),
_ => Err(()),
}
}
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum BottomEdge {
/// An edge specified via font metrics or bounding box.
Metric(BottomEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl BottomEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(BottomEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
BottomEdge::Metric(metric) => {
if let Ok(metric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_min)).resolve(styles))
.unwrap_or_default()
}
}
BottomEdge::Length(length) => length.resolve(styles),
}
}
}
cast! {
BottomEdge,
self => match self {
Self::Metric(metric) => metric.into_value(),
Self::Length(length) => length.into_value(),
},
v: BottomEdgeMetric => Self::Metric(v),
v: Length => Self::Length(v),
}
/// Metrics that describe the bottom edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum BottomEdgeMetric {
/// The baseline on which the letters rest.
Baseline,
/// The font's descender, which typically exceeds the depth of all glyphs.
Descender,
/// The bottom edge of the glyph's bounding box.
Bounds,
}
impl TryInto<VerticalFontMetric> for BottomEdgeMetric {
type Error = ();
fn try_into(self) -> Result<VerticalFontMetric, Self::Error> {
match self {
Self::Baseline => Ok(VerticalFontMetric::Baseline),
Self::Descender => Ok(VerticalFontMetric::Descender),
_ => Err(()),
}
}
}
/// The direction of text and inline objects in their line.
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TextDir(pub Smart<Dir>);
cast! {
TextDir,
self => self.0.into_value(),
v: Smart<Dir> => {
if v.map_or(false, |dir| dir.axis() == Axis::Y) {
bail!("text direction must be horizontal");
}
Self(v)
},
}
impl Resolve for TextDir {
type Output = Dir;
fn resolve(self, styles: StyleChain) -> Self::Output {
match self.0 {
Smart::Auto => TextElem::lang_in(styles).dir(),
Smart::Custom(dir) => dir,
}
}
}
/// Whether to hyphenate text.
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Hash)]
pub struct Hyphenate(pub Smart<bool>);
cast! {
Hyphenate,
self => self.0.into_value(),
v: Smart<bool> => Self(v),
}
impl Resolve for Hyphenate {
type Output = bool;
fn resolve(self, styles: StyleChain) -> Self::Output {
match self.0 {
Smart::Auto => ParElem::justify_in(styles),
Smart::Custom(v) => v,
}
}
}
/// A stylistic set in a font.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct StylisticSet(u8);
impl StylisticSet {
/// Create a new set, clamping to 1-20.
pub fn new(index: u8) -> Self {
Self(index.clamp(1, 20))
}
/// Get the value, guaranteed to be 1-20.
pub fn get(self) -> u8 {
self.0
}
}
cast! {
StylisticSet,
self => self.0.into_value(),
v: i64 => match v {
1 ..= 20 => Self::new(v as u8),
_ => bail!("stylistic set must be between 1 and 20"),
},
}
/// Which kind of numbers / figures to select.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum NumberType {
/// Numbers that fit well with capital text (the OpenType `lnum`
/// font feature).
Lining,
/// Numbers that fit well into a flow of upper- and lowercase text (the
/// OpenType `onum` font feature).
OldStyle,
}
/// The width of numbers / figures.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum NumberWidth {
/// Numbers with glyph-specific widths (the OpenType `pnum` font feature).
Proportional,
/// Numbers of equal width (the OpenType `tnum` font feature).
Tabular,
}
/// OpenType font features settings.
#[derive(Debug, Default, Clone, Eq, PartialEq, Hash)]
pub struct FontFeatures(pub Vec<(Tag, u32)>);
cast! {
FontFeatures,
self => self.0
.into_iter()
.map(|(tag, num)| {
let bytes = tag.to_bytes();
let key = std::str::from_utf8(&bytes).unwrap_or_default();
(key.into(), num.into_value())
})
.collect::<Dict>()
.into_value(),
values: Array => Self(values
.into_iter()
.map(|v| {
let tag = v.cast::<EcoString>()?;
Ok((Tag::from_bytes_lossy(tag.as_bytes()), 1))
})
.collect::<StrResult<_>>()?),
values: Dict => Self(values
.into_iter()
.map(|(k, v)| {
let num = v.cast::<u32>()?;
let tag = Tag::from_bytes_lossy(k.as_bytes());
Ok((tag, num))
})
.collect::<StrResult<_>>()?),
}
impl Fold for FontFeatures {
type Output = Self;
fn fold(mut self, outer: Self::Output) -> Self::Output {
self.0.extend(outer.0);
self
}
}
| ric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_max)).resolve(styles))
.unwrap_or_default()
}
}
TopEdge::Length(le | conditional_block |
mod.rs | //! Text handling.
mod deco;
mod misc;
mod quotes;
mod raw;
mod shaping;
mod shift;
pub use self::deco::*;
pub use self::misc::*;
pub use self::quotes::*;
pub use self::raw::*;
pub use self::shaping::*;
pub use self::shift::*;
use rustybuzz::Tag;
use ttf_parser::Rect;
use typst::font::{Font, FontStretch, FontStyle, FontWeight, VerticalFontMetric};
use crate::layout::ParElem;
use crate::prelude::*;
/// Hook up all text definitions.
pub(super) fn define(global: &mut Scope) {
global.define("text", TextElem::func());
global.define("linebreak", LinebreakElem::func());
global.define("smartquote", SmartQuoteElem::func());
global.define("strong", StrongElem::func());
global.define("emph", EmphElem::func());
global.define("lower", lower_func());
global.define("upper", upper_func());
global.define("smallcaps", smallcaps_func());
global.define("sub", SubElem::func());
global.define("super", SuperElem::func());
global.define("underline", UnderlineElem::func());
global.define("strike", StrikeElem::func());
global.define("overline", OverlineElem::func());
global.define("raw", RawElem::func());
global.define("lorem", lorem_func());
}
/// Customizes the look and layout of text in a variety of ways.
///
/// This function is used frequently, both with set rules and directly. While
/// the set rule is often the simpler choice, calling the `text` function
/// directly can be useful when passing text as an argument to another function.
///
/// ## Example { #example }
/// ```example
/// #set text(18pt)
/// With a set rule.
///
/// #emph(text(blue)[
/// With a function call.
/// ])
/// ```
///
/// Display: Text
/// Category: text
#[element(Construct, PlainText)]
pub struct TextElem {
/// A prioritized sequence of font families.
///
/// When processing text, Typst tries all specified font families in order
/// until it finds a font that has the necessary glyphs. In the example
/// below, the font `Inria Serif` is preferred, but since it does not
/// contain Arabic glyphs, the arabic text uses `Noto Sans Arabic` instead.
///
/// ```example
/// #set text(font: (
/// "Inria Serif",
/// "Noto Sans Arabic",
/// ))
///
/// This is Latin. \
/// هذا عربي.
///
/// ```
#[default(FontList(vec![FontFamily::new("Linux Libertine")]))]
pub font: FontList,
/// Whether to allow last resort font fallback when the primary font list
/// contains no match. This lets Typst search through all available fonts
/// for the most similar one that has the necessary glyphs.
///
/// _Note:_ Currently, there are no warnings when fallback is disabled and
/// no glyphs are found. Instead, your text shows up in the form of "tofus":
/// Small boxes that indicate the lack of an appropriate glyph. In the
/// future, you will be able to instruct Typst to issue warnings so you know
/// something is up.
///
/// ```example
/// #set text(font: "Inria Serif")
/// هذا عربي
///
/// #set text(fallback: false)
/// هذا عربي
/// ```
#[default(true)]
pub fallback: bool,
/// The desired font style.
///
/// When an italic style is requested and only an oblique one is available,
/// it is used. Similarly, the other way around, an italic style can stand
/// in for an oblique one. When neither an italic nor an oblique style is
/// available, Typst selects the normal style. Since most fonts are only
/// available either in an italic or oblique style, the difference between
/// italic and oblique style is rarely observable.
///
/// If you want to emphasize your text, you should do so using the
/// [emph]($func/emph) function instead. This makes it easy to adapt the
/// style later if you change your mind about how to signify the emphasis.
///
/// ```example
/// #text(font: "Linux Libertine", style: "italic")[Italic]
/// #text(font: "DejaVu Sans", style: "oblique")[Oblique]
/// ```
pub style: FontStyle,
/// The desired thickness of the font's glyphs. Accepts an integer between
/// `{100}` and `{900}` or one of the predefined weight names. When the
/// desired weight is not available, Typst selects the font from the family
/// that is closest in weight.
///
/// If you want to strongly emphasize your text, you should do so using the
/// [strong]($func/strong) function instead. This makes it easy to adapt the
/// style later if you change your mind about how to signify the strong
/// emphasis.
///
/// ```example
/// #set text(font: "IBM Plex Sans")
///
/// #text(weight: "light")[Light] \
/// #text(weight: "regular")[Regular] \
/// #text(weight: "medium")[Medium] \
/// #text(weight: 500)[Medium] \
/// #text(weight: "bold")[Bold]
/// ```
pub weight: FontWeight,
/// The desired width of the glyphs. Accepts a ratio between `{50%}` and
/// `{200%}`. When the desired width is not available, Typst selects the
/// font from the family that is closest in stretch. This will only stretch
/// the text if a condensed or expanded version of the font is available.
///
/// If you want to adjust the amount of space between characters instead of
/// stretching the glyphs itself, use the [`tracking`]($func/text.tracking)
/// property instead.
///
/// ```example
/// #text(stretch: 75%)[Condensed] \
/// #text(stretch: 100%)[Normal]
/// ```
pub stretch: FontStretch,
/// The size of the glyphs. This value forms the basis of the `em` unit:
/// `{1em}` is equivalent to the font size.
///
/// You can also give the font size itself in `em` units. Then, it is
/// relative to the previous font size.
///
/// ```example
/// #set text(size: 20pt)
/// very #text(1.5em)[big] text
/// ```
#[parse(args.named_or_find("size")?)]
#[fold]
#[default(Abs::pt(11.0))]
pub size: TextSize,
/// The glyph fill color.
///
/// ```example
/// #set text(fill: red)
/// This text is red.
/// ```
#[parse(args.named_or_find("fill")?)]
#[default(Color::BLACK.into())]
pub fill: Paint,
/// The amount of space that should be added between characters.
///
/// ```example
/// #set text(tracking: 1.5pt)
/// Distant text.
/// ```
#[resolve]
pub tracking: Length,
/// The amount of space between words.
///
/// Can be given as an absolute length, but also relative to the width of
/// the space character in the font.
///
/// If you want to adjust the amount of space between characters rather than
/// words, use the [`tracking`]($func/text.tracking) property instead.
///
/// ```example
/// #set text(spacing: 200%)
/// Text with distant words.
/// ```
#[resolve]
#[default(Rel::one())]
pub spacing: Rel<Length>,
/// An amount to shift the text baseline by.
///
/// ```example
/// A #text(baseline: 3pt)[lowered]
/// word.
/// ```
#[resolve]
pub baseline: Length,
/// Whether certain glyphs can hang over into the margin in justified text.
/// This can make justification visually more pleasing.
///
/// ```example
/// #set par(justify: true)
/// This justified text has a hyphen in
/// the paragraph's first line. Hanging
/// the hyphen slightly into the margin
/// results in a clearer paragraph edge.
///
/// #set text(overhang: false)
/// This justified text has a hyphen in
/// the paragraph's first line. Hanging
/// the hyphen slightly into the margin
/// results in a clearer paragraph edge.
/// ```
#[default(true)]
pub overhang: bool,
/// The top end of the conceptual frame around the text used for layout and
/// positioning. This affects the size of containers that hold text.
///
/// ```example
/// #set rect(inset: 0pt)
/// #set text(size: 20pt)
///
/// #set text(top-edge: "ascender")
/// #rect(fill: aqua)[Typst]
///
/// #set text(top-edge: "cap-height")
/// #rect(fill: aqua)[Typst]
/// ```
#[default(TopEdge::Metric(TopEdgeMetric::CapHeight))]
pub top_edge: TopEdge,
/// The bottom end of the conceptual frame around the text used for layout
/// and positioning. This affects the size of containers that hold text.
///
/// ```example
/// #set rect(inset: 0pt)
/// #set text(size: 20pt)
///
/// #set text(bottom-edge: "baseline")
/// #rect(fill: aqua)[Typst]
///
/// #set text(bottom-edge: "descender")
/// #rect(fill: aqua)[Typst]
/// ```
#[default(BottomEdge::Metric(BottomEdgeMetric::Baseline))]
pub bottom_edge: BottomEdge,
/// An [ISO 639-1/2/3 language code.](https://en.wikipedia.org/wiki/ISO_639)
///
/// Setting the correct language affects various parts of Typst:
///
/// - The text processing pipeline can make more informed choices.
/// - Hyphenation will use the correct patterns for the language.
/// - [Smart quotes]($func/smartquote) turns into the correct quotes for the
/// language.
/// - And all other things which are language-aware.
///
/// ```example
/// #set text(lang: "de")
/// #outline()
///
/// = Einleitung
/// In diesem Dokument, ...
/// ```
#[default(Lang::ENGLISH)]
pub lang: Lang,
/// An [ISO 3166-1 alpha-2 region code.](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
///
/// This lets the text processing pipeline make more informed choices.
pub region: Option<Region>,
/// The OpenType writing script.
///
/// The combination of `{lang}` and `{script}` determine how font features,
/// such as glyph substitution, are implemented. Frequently the value is a
/// modified (all-lowercase) ISO 15924 script identifier, and the `math`
/// writing script is used for features appropriate for mathematical
/// symbols.
///
/// When set to `{auto}`, the default and recommended setting, an
/// appropriate script is chosen for each block of characters sharing a
/// common Unicode script property.
///
/// ```example
/// #set text(
/// font: "Linux Libertine",
/// size: 20pt,
/// )
///
/// #let scedilla = [Ş]
/// #scedilla // S with a cedilla
///
/// #set text(lang: "ro", script: "latn")
/// #scedilla // S with a subscript comma
///
/// #set text(lang: "ro", script: "grek")
/// #scedilla // S with a cedilla
/// ```
pub script: Smart<WritingScript>,
/// The dominant direction for text and inline objects. Possible values are:
///
/// - `{auto}`: Automatically infer the direction from the `lang` property.
/// - `{ltr}`: Layout text from left to right.
/// - `{rtl}`: Layout text from right to left.
///
/// When writing in right-to-left scripts like Arabic or Hebrew, you should
/// set the [text language]($func/text.lang) or direction. While individual
/// runs of text are automatically layouted in the correct direction,
/// setting the dominant direction gives the bidirectional reordering
/// algorithm the necessary information to correctly place punctuation and
/// inline objects. Furthermore, setting the direction affects the alignment
/// values `start` and `end`, which are equivalent to `left` and `right` in
/// `ltr` text and the other way around in `rtl` text.
///
/// If you set this to `rtl` and experience bugs or in some way bad looking
/// output, please do get in touch with us through the
/// [contact form](https://typst.app/contact) or our
/// [Discord server]($community/#discord)!
///
/// ```example
/// #set text(dir: rtl)
/// هذا عربي.
/// ```
#[resolve]
pub dir: TextDir,
/// Whether to hyphenate text to improve line breaking. When `{auto}`, text
/// will be hyphenated if and only if justification is enabled.
///
/// Setting the [text language]($func/text.lang) ensures that the correct
/// hyphenation patterns are used.
///
/// ```example
/// #set page(width: 200pt)
///
/// #set par(justify: true)
/// This text illustrates how
/// enabling hyphenation can
/// improve justification.
///
/// #set text(hyphenate: false)
/// This text illustrates how
/// enabling hyphenation can
/// improve justification.
/// ```
#[resolve]
pub hyphenate: Hyphenate,
/// Whether to apply kerning.
///
/// When enabled, specific letter pairings move closer together or further
/// apart for a more visually pleasing result. The example below
/// demonstrates how decreasing the gap between the "T" and "o" results in a
/// more natural look. Setting this to `{false}` disables kerning by turning
/// off the OpenType `kern` font feature.
///
/// ```example
/// #set text(size: 25pt)
/// Totally
///
/// #set text(kerning: false)
/// Totally
/// ```
#[default(true)]
pub kerning: bool,
/// Whether to apply stylistic alternates.
///
/// Sometimes fonts contain alternative glyphs for the same codepoint.
/// Setting this to `{true}` switches to these by enabling the OpenType
/// `salt` font feature.
///
/// ```example
/// #set text(
/// font: "IBM Plex Sans",
/// size: 20pt,
/// )
///
/// 0, a, g, ß
///
/// #set text(alternates: true)
/// 0, a, g, ß
/// ```
#[default(false)]
pub alternates: bool,
/// Which stylistic set to apply. Font designers can categorize alternative
/// glyphs forms into stylistic sets. As this value is highly font-specific,
/// you need to consult your font to know which sets are available. When set
/// to an integer between `{1}` and `{20}`, enables the corresponding
/// OpenType font feature from `ss01`, ..., `ss20`.
pub stylistic_set: Option<StylisticSet>,
/// Whether standard ligatures are active.
///
/// Certain letter combinations like "fi" are often displayed as a single
/// merged glyph called a _ligature._ Setting this to `{false}` disables
/// these ligatures by turning off the OpenType `liga` and `clig` font
/// features.
///
/// ```example
/// #set text(size: 20pt)
/// A fine ligature.
///
/// #set text(ligatures: false)
/// A fine ligature.
/// ```
#[default(true)]
pub ligatures: bool,
/// Whether ligatures that should be used sparingly are active. Setting this
/// to `{true}` enables the OpenType `dlig` font feature.
#[default(false)]
pub discretionary_ligatures: bool,
/// Whether historical ligatures are active. Setting this to `{true}`
/// enables the OpenType `hlig` font feature.
#[default(false)]
pub historical_ligatures: bool,
/// Which kind of numbers / figures to select. When set to `{auto}`, the
/// default numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-type: "lining")
/// Number 9.
///
/// #set text(number-type: "old-style")
/// Number 9.
/// ```
pub number_type: Smart<NumberType>,
/// The width of numbers / figures. When set to `{auto}`, the default
/// numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-width: "proportional")
/// A 12 B 34. \
/// A 56 B 78.
///
/// #set text(number-width: "tabular")
/// A 12 B 34. \
/// A 56 B 78.
/// ```
pub number_width: Smart<NumberWidth>,
/// Whether to have a slash through the zero glyph. Setting this to `{true}`
/// enables the OpenType `zero` font feature.
///
/// ```example
/// 0, #text(slashed-zero: true)[0]
/// ```
#[default(false)]
pub slashed_zero: bool,
/// Whether to turn numbers into fractions. Setting this to `{true}`
/// enables the OpenType `frac` font feature.
///
/// It is not advisable to enable this property globally as it will mess
/// with all appearances of numbers after a slash (e.g., in URLs). Instead,
/// enable it locally when you want a fraction.
///
/// ```example
/// 1/2 \
/// #text(fractions: true)[1/2]
/// ```
#[default(false)]
pub fractions: bool,
/// Raw OpenType features to apply.
///
/// - If given an array of strings, sets the features identified by the
/// strings to `{1}`.
/// - If given a dictionary mapping to numbers, sets the features
/// identified by the keys to the values.
///
/// ```example
/// // Enable the `frac` feature manually.
/// #set text(features: ("frac",))
/// 1/2
/// ```
#[fold]
pub features: FontFeatures,
/// Content in which all text is styled according to the other arguments.
#[external]
#[required]
pub body: Content,
/// The text.
#[internal]
#[required]
pub text: EcoString,
/// A delta to apply on the font weight.
#[internal]
#[fold]
pub delta: Delta,
/// Whether the font style should be inverted.
#[internal]
#[fold]
#[default(false)]
pub emph: Toggle,
/// Decorative lines.
#[internal]
#[fold]
pub deco: Decoration,
/// A case transformation that should be applied to the text.
#[internal]
pub case: Option<Case>,
/// Whether small capital glyphs should be used. ("smcp")
#[internal]
#[default(false)]
pub smallcaps: bool,
}
impl TextElem {
/// Create a new packed text element.
pub fn packed(text: impl Into<EcoString>) -> Content {
Self::new(text.into()).pack()
}
}
impl Construct for TextElem {
fn construct(vm: &mut Vm, args: &mut Args) -> SourceResult<Content> {
// The text constructor is special: It doesn't create a text element.
// Instead, it leaves the passed argument structurally unchanged, but
// styles all text in it.
let styles = Self::set(vm, args)?;
let body = args.expect::<Content>("body")?;
Ok(body.styled_with_map(styles))
}
}
impl PlainText for TextElem {
fn plain_text(&self, text: &mut EcoString) {
text.push_str(&self.text());
}
}
/// A lowercased font family like "arial".
#[derive(Clone, Eq, PartialEq, Hash)]
pub struct FontFamily(EcoString);
impl FontFamily {
/// Create a named font family variant.
pub fn new(string: &str) -> Self {
Self(string.to_lowercase().into())
}
/// The lowercased family name.
pub fn as_str(&self) -> &str {
&self.0
}
}
impl Debug for FontFamily {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
cast! {
FontFamily,
self => self.0.into_value(),
string: EcoString => Self::new(&string),
}
/// Font family fallback list.
#[derive(Debug, Default, Clone, Eq, PartialEq, Hash)]
pub struct FontList(pub Vec<FontFamily>);
impl IntoIterator for FontList {
type IntoIter = std::vec::IntoIter<FontFamily>;
type Item = FontFamily;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
cast! {
FontList,
self => if self.0.len() == 1 {
self.0.into_iter().next().unwrap().0.into_value()
} else {
self.0.into_value()
},
family: FontFamily => Self(vec![family]),
values: Array => Self(values.into_iter().map(|v| v.cast()).collect::<StrResult<_>>()?),
}
/// The size of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TextSize(pub Length);
impl Fold for TextSize {
type Output = Abs;
fn fold(self, outer: Self::Output) -> Self::Output {
self.0.em.at(outer) + self.0.abs
}
}
cast! {
TextSize,
self => self.0.into_value(),
v: Length => Self(v),
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum TopEdge {
/// An edge specified via font metrics or bounding box.
Metric(TopEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl TopEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(TopEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
TopEdge::Metric(metric) => {
if let Ok(metric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_max)).resolve(styles))
.unwrap_or_default()
}
}
TopEdge::Length(length) => length.resolve(styles),
}
}
}
cast! {
TopEdge,
self => match self {
Self::Metric(metric) => metric.into_value(),
Self::Length(length) => length.into_value(),
},
v: TopEdgeMetric => Self::Metric(v),
v: Length => Self::Length(v),
}
/// Metrics that describe the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum TopEdgeMetric {
/// The font's ascender, which typically exceeds the height of all glyphs.
Ascender,
/// The approximate height of uppercase letters.
CapHeight,
/// The approximate height of non-ascending lowercase letters.
XHeight,
/// The baseline on which the letters rest.
Baseline,
/// The top edge of the glyph's bounding box.
Bounds,
}
impl TryInto<VerticalFontMetric> for TopEdgeMetric {
type Error = ();
fn try_into(self) -> Result<VerticalFontMetric, Self::Error> {
match self {
Self::Ascender => Ok(VerticalFontMetric::Ascender),
Self::CapHeight => Ok(VerticalFontMetric::CapHeight),
Self::XHeight => Ok(VerticalFontMetric::XHeight),
Self::Baseline => Ok(VerticalFontMetric::Baseline),
_ => Err(()),
}
}
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum BottomEdge {
/// An edge specified via font metrics or bounding box.
Metric(BottomEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl BottomEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(BottomEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
BottomEdge::Metric(metric) => {
if let Ok(metric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_min)).resolve(styles))
.unwrap_or_default()
}
}
BottomEdge::Length(length) => length.resolve(styles),
} | self => match self {
Self::Metric(metric) => metric.into_value(),
Self::Length(length) => length.into_value(),
},
v: BottomEdgeMetric => Self::Metric(v),
v: Length => Self::Length(v),
}
/// Metrics that describe the bottom edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum BottomEdgeMetric {
/// The baseline on which the letters rest.
Baseline,
/// The font's descender, which typically exceeds the depth of all glyphs.
Descender,
/// The bottom edge of the glyph's bounding box.
Bounds,
}
impl TryInto<VerticalFontMetric> for BottomEdgeMetric {
type Error = ();
fn try_into(self) -> Result<VerticalFontMetric, Self::Error> {
match self {
Self::Baseline => Ok(VerticalFontMetric::Baseline),
Self::Descender => Ok(VerticalFontMetric::Descender),
_ => Err(()),
}
}
}
/// The direction of text and inline objects in their line.
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TextDir(pub Smart<Dir>);
cast! {
TextDir,
self => self.0.into_value(),
v: Smart<Dir> => {
if v.map_or(false, |dir| dir.axis() == Axis::Y) {
bail!("text direction must be horizontal");
}
Self(v)
},
}
impl Resolve for TextDir {
type Output = Dir;
fn resolve(self, styles: StyleChain) -> Self::Output {
match self.0 {
Smart::Auto => TextElem::lang_in(styles).dir(),
Smart::Custom(dir) => dir,
}
}
}
/// Whether to hyphenate text.
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Hash)]
pub struct Hyphenate(pub Smart<bool>);
cast! {
Hyphenate,
self => self.0.into_value(),
v: Smart<bool> => Self(v),
}
impl Resolve for Hyphenate {
type Output = bool;
fn resolve(self, styles: StyleChain) -> Self::Output {
match self.0 {
Smart::Auto => ParElem::justify_in(styles),
Smart::Custom(v) => v,
}
}
}
/// A stylistic set in a font.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct StylisticSet(u8);
impl StylisticSet {
/// Create a new set, clamping to 1-20.
pub fn new(index: u8) -> Self {
Self(index.clamp(1, 20))
}
/// Get the value, guaranteed to be 1-20.
pub fn get(self) -> u8 {
self.0
}
}
cast! {
StylisticSet,
self => self.0.into_value(),
v: i64 => match v {
1 ..= 20 => Self::new(v as u8),
_ => bail!("stylistic set must be between 1 and 20"),
},
}
/// Which kind of numbers / figures to select.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum NumberType {
/// Numbers that fit well with capital text (the OpenType `lnum`
/// font feature).
Lining,
/// Numbers that fit well into a flow of upper- and lowercase text (the
/// OpenType `onum` font feature).
OldStyle,
}
/// The width of numbers / figures.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum NumberWidth {
/// Numbers with glyph-specific widths (the OpenType `pnum` font feature).
Proportional,
/// Numbers of equal width (the OpenType `tnum` font feature).
Tabular,
}
/// OpenType font features settings.
#[derive(Debug, Default, Clone, Eq, PartialEq, Hash)]
pub struct FontFeatures(pub Vec<(Tag, u32)>);
cast! {
FontFeatures,
self => self.0
.into_iter()
.map(|(tag, num)| {
let bytes = tag.to_bytes();
let key = std::str::from_utf8(&bytes).unwrap_or_default();
(key.into(), num.into_value())
})
.collect::<Dict>()
.into_value(),
values: Array => Self(values
.into_iter()
.map(|v| {
let tag = v.cast::<EcoString>()?;
Ok((Tag::from_bytes_lossy(tag.as_bytes()), 1))
})
.collect::<StrResult<_>>()?),
values: Dict => Self(values
.into_iter()
.map(|(k, v)| {
let num = v.cast::<u32>()?;
let tag = Tag::from_bytes_lossy(k.as_bytes());
Ok((tag, num))
})
.collect::<StrResult<_>>()?),
}
impl Fold for FontFeatures {
type Output = Self;
fn fold(mut self, outer: Self::Output) -> Self::Output {
self.0.extend(outer.0);
self
}
} | }
}
cast! {
BottomEdge, | random_line_split |
mod.rs | //! Text handling.
mod deco;
mod misc;
mod quotes;
mod raw;
mod shaping;
mod shift;
pub use self::deco::*;
pub use self::misc::*;
pub use self::quotes::*;
pub use self::raw::*;
pub use self::shaping::*;
pub use self::shift::*;
use rustybuzz::Tag;
use ttf_parser::Rect;
use typst::font::{Font, FontStretch, FontStyle, FontWeight, VerticalFontMetric};
use crate::layout::ParElem;
use crate::prelude::*;
/// Hook up all text definitions.
pub(super) fn | (global: &mut Scope) {
global.define("text", TextElem::func());
global.define("linebreak", LinebreakElem::func());
global.define("smartquote", SmartQuoteElem::func());
global.define("strong", StrongElem::func());
global.define("emph", EmphElem::func());
global.define("lower", lower_func());
global.define("upper", upper_func());
global.define("smallcaps", smallcaps_func());
global.define("sub", SubElem::func());
global.define("super", SuperElem::func());
global.define("underline", UnderlineElem::func());
global.define("strike", StrikeElem::func());
global.define("overline", OverlineElem::func());
global.define("raw", RawElem::func());
global.define("lorem", lorem_func());
}
/// Customizes the look and layout of text in a variety of ways.
///
/// This function is used frequently, both with set rules and directly. While
/// the set rule is often the simpler choice, calling the `text` function
/// directly can be useful when passing text as an argument to another function.
///
/// ## Example { #example }
/// ```example
/// #set text(18pt)
/// With a set rule.
///
/// #emph(text(blue)[
/// With a function call.
/// ])
/// ```
///
/// Display: Text
/// Category: text
#[element(Construct, PlainText)]
pub struct TextElem {
/// A prioritized sequence of font families.
///
/// When processing text, Typst tries all specified font families in order
/// until it finds a font that has the necessary glyphs. In the example
/// below, the font `Inria Serif` is preferred, but since it does not
/// contain Arabic glyphs, the arabic text uses `Noto Sans Arabic` instead.
///
/// ```example
/// #set text(font: (
/// "Inria Serif",
/// "Noto Sans Arabic",
/// ))
///
/// This is Latin. \
/// هذا عربي.
///
/// ```
#[default(FontList(vec![FontFamily::new("Linux Libertine")]))]
pub font: FontList,
/// Whether to allow last resort font fallback when the primary font list
/// contains no match. This lets Typst search through all available fonts
/// for the most similar one that has the necessary glyphs.
///
/// _Note:_ Currently, there are no warnings when fallback is disabled and
/// no glyphs are found. Instead, your text shows up in the form of "tofus":
/// Small boxes that indicate the lack of an appropriate glyph. In the
/// future, you will be able to instruct Typst to issue warnings so you know
/// something is up.
///
/// ```example
/// #set text(font: "Inria Serif")
/// هذا عربي
///
/// #set text(fallback: false)
/// هذا عربي
/// ```
#[default(true)]
pub fallback: bool,
/// The desired font style.
///
/// When an italic style is requested and only an oblique one is available,
/// it is used. Similarly, the other way around, an italic style can stand
/// in for an oblique one. When neither an italic nor an oblique style is
/// available, Typst selects the normal style. Since most fonts are only
/// available either in an italic or oblique style, the difference between
/// italic and oblique style is rarely observable.
///
/// If you want to emphasize your text, you should do so using the
/// [emph]($func/emph) function instead. This makes it easy to adapt the
/// style later if you change your mind about how to signify the emphasis.
///
/// ```example
/// #text(font: "Linux Libertine", style: "italic")[Italic]
/// #text(font: "DejaVu Sans", style: "oblique")[Oblique]
/// ```
pub style: FontStyle,
/// The desired thickness of the font's glyphs. Accepts an integer between
/// `{100}` and `{900}` or one of the predefined weight names. When the
/// desired weight is not available, Typst selects the font from the family
/// that is closest in weight.
///
/// If you want to strongly emphasize your text, you should do so using the
/// [strong]($func/strong) function instead. This makes it easy to adapt the
/// style later if you change your mind about how to signify the strong
/// emphasis.
///
/// ```example
/// #set text(font: "IBM Plex Sans")
///
/// #text(weight: "light")[Light] \
/// #text(weight: "regular")[Regular] \
/// #text(weight: "medium")[Medium] \
/// #text(weight: 500)[Medium] \
/// #text(weight: "bold")[Bold]
/// ```
pub weight: FontWeight,
/// The desired width of the glyphs. Accepts a ratio between `{50%}` and
/// `{200%}`. When the desired width is not available, Typst selects the
/// font from the family that is closest in stretch. This will only stretch
/// the text if a condensed or expanded version of the font is available.
///
/// If you want to adjust the amount of space between characters instead of
/// stretching the glyphs itself, use the [`tracking`]($func/text.tracking)
/// property instead.
///
/// ```example
/// #text(stretch: 75%)[Condensed] \
/// #text(stretch: 100%)[Normal]
/// ```
pub stretch: FontStretch,
/// The size of the glyphs. This value forms the basis of the `em` unit:
/// `{1em}` is equivalent to the font size.
///
/// You can also give the font size itself in `em` units. Then, it is
/// relative to the previous font size.
///
/// ```example
/// #set text(size: 20pt)
/// very #text(1.5em)[big] text
/// ```
#[parse(args.named_or_find("size")?)]
#[fold]
#[default(Abs::pt(11.0))]
pub size: TextSize,
/// The glyph fill color.
///
/// ```example
/// #set text(fill: red)
/// This text is red.
/// ```
#[parse(args.named_or_find("fill")?)]
#[default(Color::BLACK.into())]
pub fill: Paint,
/// The amount of space that should be added between characters.
///
/// ```example
/// #set text(tracking: 1.5pt)
/// Distant text.
/// ```
#[resolve]
pub tracking: Length,
/// The amount of space between words.
///
/// Can be given as an absolute length, but also relative to the width of
/// the space character in the font.
///
/// If you want to adjust the amount of space between characters rather than
/// words, use the [`tracking`]($func/text.tracking) property instead.
///
/// ```example
/// #set text(spacing: 200%)
/// Text with distant words.
/// ```
#[resolve]
#[default(Rel::one())]
pub spacing: Rel<Length>,
/// An amount to shift the text baseline by.
///
/// ```example
/// A #text(baseline: 3pt)[lowered]
/// word.
/// ```
#[resolve]
pub baseline: Length,
/// Whether certain glyphs can hang over into the margin in justified text.
/// This can make justification visually more pleasing.
///
/// ```example
/// #set par(justify: true)
/// This justified text has a hyphen in
/// the paragraph's first line. Hanging
/// the hyphen slightly into the margin
/// results in a clearer paragraph edge.
///
/// #set text(overhang: false)
/// This justified text has a hyphen in
/// the paragraph's first line. Hanging
/// the hyphen slightly into the margin
/// results in a clearer paragraph edge.
/// ```
#[default(true)]
pub overhang: bool,
/// The top end of the conceptual frame around the text used for layout and
/// positioning. This affects the size of containers that hold text.
///
/// ```example
/// #set rect(inset: 0pt)
/// #set text(size: 20pt)
///
/// #set text(top-edge: "ascender")
/// #rect(fill: aqua)[Typst]
///
/// #set text(top-edge: "cap-height")
/// #rect(fill: aqua)[Typst]
/// ```
#[default(TopEdge::Metric(TopEdgeMetric::CapHeight))]
pub top_edge: TopEdge,
/// The bottom end of the conceptual frame around the text used for layout
/// and positioning. This affects the size of containers that hold text.
///
/// ```example
/// #set rect(inset: 0pt)
/// #set text(size: 20pt)
///
/// #set text(bottom-edge: "baseline")
/// #rect(fill: aqua)[Typst]
///
/// #set text(bottom-edge: "descender")
/// #rect(fill: aqua)[Typst]
/// ```
#[default(BottomEdge::Metric(BottomEdgeMetric::Baseline))]
pub bottom_edge: BottomEdge,
/// An [ISO 639-1/2/3 language code.](https://en.wikipedia.org/wiki/ISO_639)
///
/// Setting the correct language affects various parts of Typst:
///
/// - The text processing pipeline can make more informed choices.
/// - Hyphenation will use the correct patterns for the language.
/// - [Smart quotes]($func/smartquote) turns into the correct quotes for the
/// language.
/// - And all other things which are language-aware.
///
/// ```example
/// #set text(lang: "de")
/// #outline()
///
/// = Einleitung
/// In diesem Dokument, ...
/// ```
#[default(Lang::ENGLISH)]
pub lang: Lang,
/// An [ISO 3166-1 alpha-2 region code.](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
///
/// This lets the text processing pipeline make more informed choices.
pub region: Option<Region>,
/// The OpenType writing script.
///
/// The combination of `{lang}` and `{script}` determine how font features,
/// such as glyph substitution, are implemented. Frequently the value is a
/// modified (all-lowercase) ISO 15924 script identifier, and the `math`
/// writing script is used for features appropriate for mathematical
/// symbols.
///
/// When set to `{auto}`, the default and recommended setting, an
/// appropriate script is chosen for each block of characters sharing a
/// common Unicode script property.
///
/// ```example
/// #set text(
/// font: "Linux Libertine",
/// size: 20pt,
/// )
///
/// #let scedilla = [Ş]
/// #scedilla // S with a cedilla
///
/// #set text(lang: "ro", script: "latn")
/// #scedilla // S with a subscript comma
///
/// #set text(lang: "ro", script: "grek")
/// #scedilla // S with a cedilla
/// ```
pub script: Smart<WritingScript>,
/// The dominant direction for text and inline objects. Possible values are:
///
/// - `{auto}`: Automatically infer the direction from the `lang` property.
/// - `{ltr}`: Layout text from left to right.
/// - `{rtl}`: Layout text from right to left.
///
/// When writing in right-to-left scripts like Arabic or Hebrew, you should
/// set the [text language]($func/text.lang) or direction. While individual
/// runs of text are automatically layouted in the correct direction,
/// setting the dominant direction gives the bidirectional reordering
/// algorithm the necessary information to correctly place punctuation and
/// inline objects. Furthermore, setting the direction affects the alignment
/// values `start` and `end`, which are equivalent to `left` and `right` in
/// `ltr` text and the other way around in `rtl` text.
///
/// If you set this to `rtl` and experience bugs or in some way bad looking
/// output, please do get in touch with us through the
/// [contact form](https://typst.app/contact) or our
/// [Discord server]($community/#discord)!
///
/// ```example
/// #set text(dir: rtl)
/// هذا عربي.
/// ```
#[resolve]
pub dir: TextDir,
/// Whether to hyphenate text to improve line breaking. When `{auto}`, text
/// will be hyphenated if and only if justification is enabled.
///
/// Setting the [text language]($func/text.lang) ensures that the correct
/// hyphenation patterns are used.
///
/// ```example
/// #set page(width: 200pt)
///
/// #set par(justify: true)
/// This text illustrates how
/// enabling hyphenation can
/// improve justification.
///
/// #set text(hyphenate: false)
/// This text illustrates how
/// enabling hyphenation can
/// improve justification.
/// ```
#[resolve]
pub hyphenate: Hyphenate,
/// Whether to apply kerning.
///
/// When enabled, specific letter pairings move closer together or further
/// apart for a more visually pleasing result. The example below
/// demonstrates how decreasing the gap between the "T" and "o" results in a
/// more natural look. Setting this to `{false}` disables kerning by turning
/// off the OpenType `kern` font feature.
///
/// ```example
/// #set text(size: 25pt)
/// Totally
///
/// #set text(kerning: false)
/// Totally
/// ```
#[default(true)]
pub kerning: bool,
/// Whether to apply stylistic alternates.
///
/// Sometimes fonts contain alternative glyphs for the same codepoint.
/// Setting this to `{true}` switches to these by enabling the OpenType
/// `salt` font feature.
///
/// ```example
/// #set text(
/// font: "IBM Plex Sans",
/// size: 20pt,
/// )
///
/// 0, a, g, ß
///
/// #set text(alternates: true)
/// 0, a, g, ß
/// ```
#[default(false)]
pub alternates: bool,
/// Which stylistic set to apply. Font designers can categorize alternative
/// glyphs forms into stylistic sets. As this value is highly font-specific,
/// you need to consult your font to know which sets are available. When set
/// to an integer between `{1}` and `{20}`, enables the corresponding
/// OpenType font feature from `ss01`, ..., `ss20`.
pub stylistic_set: Option<StylisticSet>,
/// Whether standard ligatures are active.
///
/// Certain letter combinations like "fi" are often displayed as a single
/// merged glyph called a _ligature._ Setting this to `{false}` disables
/// these ligatures by turning off the OpenType `liga` and `clig` font
/// features.
///
/// ```example
/// #set text(size: 20pt)
/// A fine ligature.
///
/// #set text(ligatures: false)
/// A fine ligature.
/// ```
#[default(true)]
pub ligatures: bool,
/// Whether ligatures that should be used sparingly are active. Setting this
/// to `{true}` enables the OpenType `dlig` font feature.
#[default(false)]
pub discretionary_ligatures: bool,
/// Whether historical ligatures are active. Setting this to `{true}`
/// enables the OpenType `hlig` font feature.
#[default(false)]
pub historical_ligatures: bool,
/// Which kind of numbers / figures to select. When set to `{auto}`, the
/// default numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-type: "lining")
/// Number 9.
///
/// #set text(number-type: "old-style")
/// Number 9.
/// ```
pub number_type: Smart<NumberType>,
/// The width of numbers / figures. When set to `{auto}`, the default
/// numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-width: "proportional")
/// A 12 B 34. \
/// A 56 B 78.
///
/// #set text(number-width: "tabular")
/// A 12 B 34. \
/// A 56 B 78.
/// ```
pub number_width: Smart<NumberWidth>,
/// Whether to have a slash through the zero glyph. Setting this to `{true}`
/// enables the OpenType `zero` font feature.
///
/// ```example
/// 0, #text(slashed-zero: true)[0]
/// ```
#[default(false)]
pub slashed_zero: bool,
/// Whether to turn numbers into fractions. Setting this to `{true}`
/// enables the OpenType `frac` font feature.
///
/// It is not advisable to enable this property globally as it will mess
/// with all appearances of numbers after a slash (e.g., in URLs). Instead,
/// enable it locally when you want a fraction.
///
/// ```example
/// 1/2 \
/// #text(fractions: true)[1/2]
/// ```
#[default(false)]
pub fractions: bool,
/// Raw OpenType features to apply.
///
/// - If given an array of strings, sets the features identified by the
/// strings to `{1}`.
/// - If given a dictionary mapping to numbers, sets the features
/// identified by the keys to the values.
///
/// ```example
/// // Enable the `frac` feature manually.
/// #set text(features: ("frac",))
/// 1/2
/// ```
#[fold]
pub features: FontFeatures,
/// Content in which all text is styled according to the other arguments.
#[external]
#[required]
pub body: Content,
/// The text.
#[internal]
#[required]
pub text: EcoString,
/// A delta to apply on the font weight.
#[internal]
#[fold]
pub delta: Delta,
/// Whether the font style should be inverted.
#[internal]
#[fold]
#[default(false)]
pub emph: Toggle,
/// Decorative lines.
#[internal]
#[fold]
pub deco: Decoration,
/// A case transformation that should be applied to the text.
#[internal]
pub case: Option<Case>,
/// Whether small capital glyphs should be used. ("smcp")
#[internal]
#[default(false)]
pub smallcaps: bool,
}
impl TextElem {
/// Create a new packed text element.
pub fn packed(text: impl Into<EcoString>) -> Content {
Self::new(text.into()).pack()
}
}
impl Construct for TextElem {
fn construct(vm: &mut Vm, args: &mut Args) -> SourceResult<Content> {
// The text constructor is special: It doesn't create a text element.
// Instead, it leaves the passed argument structurally unchanged, but
// styles all text in it.
let styles = Self::set(vm, args)?;
let body = args.expect::<Content>("body")?;
Ok(body.styled_with_map(styles))
}
}
impl PlainText for TextElem {
fn plain_text(&self, text: &mut EcoString) {
text.push_str(&self.text());
}
}
/// A lowercased font family like "arial".
#[derive(Clone, Eq, PartialEq, Hash)]
pub struct FontFamily(EcoString);
impl FontFamily {
/// Create a named font family variant.
pub fn new(string: &str) -> Self {
Self(string.to_lowercase().into())
}
/// The lowercased family name.
pub fn as_str(&self) -> &str {
&self.0
}
}
impl Debug for FontFamily {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
cast! {
FontFamily,
self => self.0.into_value(),
string: EcoString => Self::new(&string),
}
/// Font family fallback list.
#[derive(Debug, Default, Clone, Eq, PartialEq, Hash)]
pub struct FontList(pub Vec<FontFamily>);
impl IntoIterator for FontList {
type IntoIter = std::vec::IntoIter<FontFamily>;
type Item = FontFamily;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
cast! {
FontList,
self => if self.0.len() == 1 {
self.0.into_iter().next().unwrap().0.into_value()
} else {
self.0.into_value()
},
family: FontFamily => Self(vec![family]),
values: Array => Self(values.into_iter().map(|v| v.cast()).collect::<StrResult<_>>()?),
}
/// The size of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TextSize(pub Length);
impl Fold for TextSize {
type Output = Abs;
fn fold(self, outer: Self::Output) -> Self::Output {
self.0.em.at(outer) + self.0.abs
}
}
cast! {
TextSize,
self => self.0.into_value(),
v: Length => Self(v),
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum TopEdge {
/// An edge specified via font metrics or bounding box.
Metric(TopEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl TopEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(TopEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
TopEdge::Metric(metric) => {
if let Ok(metric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_max)).resolve(styles))
.unwrap_or_default()
}
}
TopEdge::Length(length) => length.resolve(styles),
}
}
}
cast! {
TopEdge,
self => match self {
Self::Metric(metric) => metric.into_value(),
Self::Length(length) => length.into_value(),
},
v: TopEdgeMetric => Self::Metric(v),
v: Length => Self::Length(v),
}
/// Metrics that describe the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum TopEdgeMetric {
/// The font's ascender, which typically exceeds the height of all glyphs.
Ascender,
/// The approximate height of uppercase letters.
CapHeight,
/// The approximate height of non-ascending lowercase letters.
XHeight,
/// The baseline on which the letters rest.
Baseline,
/// The top edge of the glyph's bounding box.
Bounds,
}
impl TryInto<VerticalFontMetric> for TopEdgeMetric {
type Error = ();
fn try_into(self) -> Result<VerticalFontMetric, Self::Error> {
match self {
Self::Ascender => Ok(VerticalFontMetric::Ascender),
Self::CapHeight => Ok(VerticalFontMetric::CapHeight),
Self::XHeight => Ok(VerticalFontMetric::XHeight),
Self::Baseline => Ok(VerticalFontMetric::Baseline),
_ => Err(()),
}
}
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum BottomEdge {
/// An edge specified via font metrics or bounding box.
Metric(BottomEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl BottomEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(BottomEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
BottomEdge::Metric(metric) => {
if let Ok(metric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_min)).resolve(styles))
.unwrap_or_default()
}
}
BottomEdge::Length(length) => length.resolve(styles),
}
}
}
cast! {
BottomEdge,
self => match self {
Self::Metric(metric) => metric.into_value(),
Self::Length(length) => length.into_value(),
},
v: BottomEdgeMetric => Self::Metric(v),
v: Length => Self::Length(v),
}
/// Metrics that describe the bottom edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum BottomEdgeMetric {
/// The baseline on which the letters rest.
Baseline,
/// The font's descender, which typically exceeds the depth of all glyphs.
Descender,
/// The bottom edge of the glyph's bounding box.
Bounds,
}
impl TryInto<VerticalFontMetric> for BottomEdgeMetric {
type Error = ();
fn try_into(self) -> Result<VerticalFontMetric, Self::Error> {
match self {
Self::Baseline => Ok(VerticalFontMetric::Baseline),
Self::Descender => Ok(VerticalFontMetric::Descender),
_ => Err(()),
}
}
}
/// The direction of text and inline objects in their line.
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TextDir(pub Smart<Dir>);
cast! {
TextDir,
self => self.0.into_value(),
v: Smart<Dir> => {
if v.map_or(false, |dir| dir.axis() == Axis::Y) {
bail!("text direction must be horizontal");
}
Self(v)
},
}
impl Resolve for TextDir {
type Output = Dir;
fn resolve(self, styles: StyleChain) -> Self::Output {
match self.0 {
Smart::Auto => TextElem::lang_in(styles).dir(),
Smart::Custom(dir) => dir,
}
}
}
/// Whether to hyphenate text.
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Hash)]
pub struct Hyphenate(pub Smart<bool>);
cast! {
Hyphenate,
self => self.0.into_value(),
v: Smart<bool> => Self(v),
}
impl Resolve for Hyphenate {
type Output = bool;
fn resolve(self, styles: StyleChain) -> Self::Output {
match self.0 {
Smart::Auto => ParElem::justify_in(styles),
Smart::Custom(v) => v,
}
}
}
/// A stylistic set in a font.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct StylisticSet(u8);
impl StylisticSet {
/// Create a new set, clamping to 1-20.
pub fn new(index: u8) -> Self {
Self(index.clamp(1, 20))
}
/// Get the value, guaranteed to be 1-20.
pub fn get(self) -> u8 {
self.0
}
}
cast! {
StylisticSet,
self => self.0.into_value(),
v: i64 => match v {
1 ..= 20 => Self::new(v as u8),
_ => bail!("stylistic set must be between 1 and 20"),
},
}
/// Which kind of numbers / figures to select.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum NumberType {
/// Numbers that fit well with capital text (the OpenType `lnum`
/// font feature).
Lining,
/// Numbers that fit well into a flow of upper- and lowercase text (the
/// OpenType `onum` font feature).
OldStyle,
}
/// The width of numbers / figures.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum NumberWidth {
/// Numbers with glyph-specific widths (the OpenType `pnum` font feature).
Proportional,
/// Numbers of equal width (the OpenType `tnum` font feature).
Tabular,
}
/// OpenType font features settings.
#[derive(Debug, Default, Clone, Eq, PartialEq, Hash)]
pub struct FontFeatures(pub Vec<(Tag, u32)>);
cast! {
FontFeatures,
self => self.0
.into_iter()
.map(|(tag, num)| {
let bytes = tag.to_bytes();
let key = std::str::from_utf8(&bytes).unwrap_or_default();
(key.into(), num.into_value())
})
.collect::<Dict>()
.into_value(),
values: Array => Self(values
.into_iter()
.map(|v| {
let tag = v.cast::<EcoString>()?;
Ok((Tag::from_bytes_lossy(tag.as_bytes()), 1))
})
.collect::<StrResult<_>>()?),
values: Dict => Self(values
.into_iter()
.map(|(k, v)| {
let num = v.cast::<u32>()?;
let tag = Tag::from_bytes_lossy(k.as_bytes());
Ok((tag, num))
})
.collect::<StrResult<_>>()?),
}
impl Fold for FontFeatures {
type Output = Self;
fn fold(mut self, outer: Self::Output) -> Self::Output {
self.0.extend(outer.0);
self
}
}
| define | identifier_name |
tetris.py | """This is the main file for the Pytris project. The three concrete classes
defined herein are
Board: generally controls the flow of the game, e.g. interacting with the
classes defined in tetris_pieces.py to determine whether and how pieces
get moved around the board. Also responsible for displaying the state of
the board.
NextPieceDisplay: is responsible for creating and displaying the next piece.
Main: a window containing a Board, a NextPieceDisplay, and other components
relevant to the game state. The Board actually controls what happens to
these components during game play.
Also defines an abstract class SquarePainter (extended by both Board and
NextPieceDisplay), and a convenience function styled_set_label_text.
@author Quinn Maurmann
"""
import pygtk
pygtk.require("2.0")
import cairo
import glib
import gtk
import random
import tetris_pieces
tuple_add = tetris_pieces.tuple_add # too useful to call by namespace
DOT_SIZE = 30
ROWS = 18
COLS = 10
class SquarePainter(gtk.DrawingArea):
"""Abstract SquarePainter class factors out the ability to paint squares
on a grid. Extended by both the Board and NextPieceDisplay classes."""
def paint_square(self, pos, color, cr):
"""Paints a square on the grid at a particular (int, int) position.
Color is given as an RGB triple (of floats between 0 and 1); cr is the
Cairo context. Used only in the expose methods of Board and
NextPieceDisplay"""
cr.set_source_rgb(*color)
i, j = pos
cr.rectangle(i*DOT_SIZE+1, j*DOT_SIZE-1, DOT_SIZE-2, DOT_SIZE-2)
cr.fill()
class Board(SquarePainter):
"""Board is responsible for handling all game logic and displaying
state."""
def __init__(self, next_piece_display, level_display, lines_display,
score_display):
super(Board, self).__init__()
self.set_size_request(COLS*DOT_SIZE, ROWS*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece_display = next_piece_display
self.level_display = level_display
self.lines_display = lines_display
self.score_display = score_display
self.level = 0
self.lines = 0
self.score = 0
self.over = False
self.increment_level() # formats label and starts timer
self.increment_lines(0) # formats label
self.increment_score(0) # formats label
self.curr_piece = self.next_piece_display.get_piece()
self.locked_squares = {} # (int,int): color dictionary
def expose(self, widget, event):
"""Paint current piece and all locked squares; should only be called
via self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0, 0, 0)
cr.paint()
for pos, color in self.locked_squares.iteritems():
self.paint_square(pos, color, cr)
for pos in self.curr_piece.occupying():
self.paint_square(pos, self.curr_piece.color, cr)
### Easiest to put "GAME OVER" message here ###
if self.over:
cr.select_font_face('Sans', cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
### HACK: The following doesn't scale with DOT_SIZE ###
cr.set_font_size(41)
cr.move_to(10, 200)
cr.set_source_rgb(0, 0, 0) # dark drop-shadow
cr.show_text('GAME OVER')
cr.move_to(12, 202)
cr.set_source_rgb(.82, .82, .82) # light main text
cr.show_text('GAME OVER')
cr.stroke()
def on_board(self, pos):
"""Determine whether a position is actually on the board."""
i, j = pos
return 0 <= i < COLS and 0 <= j < ROWS
def can_move_curr_piece(self, delta):
hypothetical = self.curr_piece.test_move(delta)
return all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical)
def move_curr_piece(self, delta, point=False):
"""Check the validity of a move, and conditionally perform it.
One point may be granted, e.g. when the player moves the piece down
voluntarily."""
if self.over: return
elif self.can_move_curr_piece(delta):
self.curr_piece.confirm_move(delta)
if point: self.increment_score(1)
elif delta == (0,1): # "illegal" down move
self.lock_curr_piece()
self.queue_draw()
def drop_curr_piece(self):
"""Drop (and lock) curr_piece as far as possible, granting points
equal to the distance of the drop."""
if self.over: return
delta = (0, 0) # now make this as big as possible
while True:
new_delta = tuple_add(delta, (0, 1))
if self.can_move_curr_piece(new_delta):
delta = new_delta
else:
break
self.increment_score(delta[1])
self.move_curr_piece(delta)
self.lock_curr_piece()
self.queue_draw()
def rotate_curr_piece(self):
"""Check the validity of a rotation, and conditionally perform it."""
if self.over: return
hypothetical = self.curr_piece.test_rotate()
if all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical):
self.curr_piece.confirm_rotate()
self.queue_draw()
def lock_curr_piece(self):
"""Add squares of current piece to the collection of locked squares.
Make calls to clear full rows, generate another piece, and check
whether the game should end."""
for pos in self.curr_piece.occupying():
self.locked_squares[pos] = self.curr_piece.color
self.clear_rows()
self.curr_piece = self.next_piece_display.get_piece()
if any(pos in self.locked_squares
for pos in self.curr_piece.occupying()):
self.game_over()
def game_over(self):
"""End the game. (Doesn't currently have to do much, because the
actual painting is done conditionally in expose.)"""
self.over = True
def | (self):
"""Clear any full rows, modifying the variables locked_squares,
level, lines, and score as appropriate."""
### Previous version had a bug, in that it assumed the set of ###
### indices of full rows had to be a contiguous sequence! ###
full_rows = [j for j in range(ROWS) if all(
(i, j) in self.locked_squares for i in range(COLS))]
if not full_rows: return
### Calculate how for to drop each other row, and do it ###
drop = {j: len([k for k in full_rows if k > j]) for j in range(ROWS)}
self.locked_squares = {(i, j+drop[j]): color for (i, j), color in
self.locked_squares.items() if j not in full_rows}
### Now just update score, etc. ###
d = len(full_rows)
self.increment_lines(d)
self.increment_score(self.level*{1: 40, 2: 100, 3: 300, 4: 1200}[d])
if self.level < self.lines // 10 + 1:
self.increment_level()
def increment_lines(self, d):
"""Increment lines by d, and change the label."""
self.lines += d
styled_set_label_text(self.lines_display, "Lines: "+str(self.lines))
def increment_score(self, x=1):
"""Increment score by x, and change the label."""
self.score += x
styled_set_label_text(self.score_display, "Score: "+str(self.score))
def increment_level(self):
"""Increment level by 1, and change the label. Also call make_timer
and hook up the resulting function with glib.timeout_add, to be
called every 2.0/(level+3) seconds."""
self.level += 1
styled_set_label_text(self.level_display, "Level: "+str(self.level))
glib.timeout_add(2000//(self.level+3), self.make_timer(self.level))
def make_timer(self, lev):
"""Creates a callback function on_timer, which moves current piece
down (without granting a point). If the current level moves beyond
lev, then on_timer will stop working, and will need to be replaced."""
def on_timer():
if (lev == self.level) and not self.over: # finds lev in scope
self.move_curr_piece((0, 1))
return True
else:
return False # kills on_timer
return on_timer
class NextPieceDisplay(SquarePainter):
"""Responsible for both creating and showing new pieces."""
def __init__(self):
super(NextPieceDisplay, self).__init__()
self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color(0, 0, 0))
self.set_size_request(8*DOT_SIZE, 4*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece = self.create_piece()
def expose(self, widget, event):
"""Displays the next piece; should only be called via
self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0.05, 0.05, 0.05)
cr.paint()
for pos in self.next_piece.occupying():
self.paint_square(tuple_add(pos, (-1, 1)),
self.next_piece.color, cr)
def create_piece(self):
"""A Piece factory."""
p_type = random.choice(tetris_pieces.CONCRETE_TYPES)
return p_type()
def get_piece(self):
"""Generates a new piece and shows it; returns the old piece.
Analogous to next() operation for iterators."""
old = self.next_piece
new = self.create_piece()
self.next_piece = new
self.queue_draw()
return old
class Main(gtk.Window):
"""Main window. Contains a Board and other relevant display objects. Is
not responsible for any in-game control beyond passing simple instructions
to the Board on keystroke events."""
def __init__(self):
super(Main, self).__init__()
self.set_title("Tetris")
self.set_resizable(False)
self.set_position(gtk.WIN_POS_CENTER)
self.connect("destroy", gtk.main_quit)
self.connect("key-press-event", self.on_key_down)
### Create and reformat labels ###
self.next_piece_words = gtk.Label("Undefined")
self.level_display = gtk.Label("Undefined")
self.lines_display = gtk.Label("Undefined")
self.score_display = gtk.Label("Undefined")
self.next_piece_words.set_alignment(.2, .4)
self.level_display.set_alignment(.2, 0)
self.lines_display.set_alignment(.2, 0)
self.score_display.set_alignment(.2, 0)
styled_set_label_text(self.next_piece_words, "Next Piece:")
### Note: Board automatically fixes other three labels ###
self.next_piece_display = NextPieceDisplay()
self.board = Board(self.next_piece_display, self.level_display,
self.lines_display, self.score_display)
self.hbox = gtk.HBox() # split screen into 2 panels
self.add(self.hbox)
self.hbox.add(self.board) # left panel is Board
self.vbox = gtk.VBox() # right panel has everything else in a VBox
### Have to wrap VBox in EventBox to change BG color ###
self.vbox_wrapper = gtk.EventBox()
self.vbox_wrapper.add(self.vbox)
self.vbox_wrapper.modify_bg(gtk.STATE_NORMAL,
gtk.gdk.Color(0.05, 0.05, 0.05))
self.hbox.add(self.vbox_wrapper)
self.vbox.add(self.next_piece_words)
self.vbox.add(self.next_piece_display)
self.vbox.add(self.level_display)
self.vbox.add(self.lines_display)
self.vbox.add(self.score_display)
self.show_all()
def on_key_down(self, widget, event):
key = event.keyval
if key == gtk.keysyms.Left:
self.board.move_curr_piece((-1, 0))
elif key == gtk.keysyms.Up:
self.board.rotate_curr_piece()
elif key == gtk.keysyms.Right:
self.board.move_curr_piece((1, 0))
elif key == gtk.keysyms.Down:
self.board.move_curr_piece((0, 1), point=True)
elif key == gtk.keysyms.space:
self.board.drop_curr_piece()
def styled_set_label_text(label, text):
"""Set the text of a gtk.Label with the preferred markup scheme. (Simple
enough not to be worth extending gtk.Label just for this method.)"""
front = "<b><span foreground='#AAAAAA' size='large'>"
end = "</span></b>"
label.set_markup(front+text+end)
if __name__ == "__main__":
Main()
gtk.main()
| clear_rows | identifier_name |
tetris.py | """This is the main file for the Pytris project. The three concrete classes
defined herein are
Board: generally controls the flow of the game, e.g. interacting with the
classes defined in tetris_pieces.py to determine whether and how pieces
get moved around the board. Also responsible for displaying the state of
the board.
NextPieceDisplay: is responsible for creating and displaying the next piece.
Main: a window containing a Board, a NextPieceDisplay, and other components
relevant to the game state. The Board actually controls what happens to
these components during game play.
Also defines an abstract class SquarePainter (extended by both Board and
NextPieceDisplay), and a convenience function styled_set_label_text.
@author Quinn Maurmann
"""
import pygtk
pygtk.require("2.0")
import cairo
import glib
import gtk
import random
import tetris_pieces
tuple_add = tetris_pieces.tuple_add # too useful to call by namespace
DOT_SIZE = 30
ROWS = 18
COLS = 10
class SquarePainter(gtk.DrawingArea):
"""Abstract SquarePainter class factors out the ability to paint squares
on a grid. Extended by both the Board and NextPieceDisplay classes."""
def paint_square(self, pos, color, cr):
"""Paints a square on the grid at a particular (int, int) position.
Color is given as an RGB triple (of floats between 0 and 1); cr is the
Cairo context. Used only in the expose methods of Board and
NextPieceDisplay"""
cr.set_source_rgb(*color)
i, j = pos
cr.rectangle(i*DOT_SIZE+1, j*DOT_SIZE-1, DOT_SIZE-2, DOT_SIZE-2)
cr.fill()
class Board(SquarePainter):
"""Board is responsible for handling all game logic and displaying
state."""
def __init__(self, next_piece_display, level_display, lines_display,
score_display):
super(Board, self).__init__()
self.set_size_request(COLS*DOT_SIZE, ROWS*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece_display = next_piece_display
self.level_display = level_display
self.lines_display = lines_display
self.score_display = score_display
self.level = 0
self.lines = 0
self.score = 0
self.over = False
self.increment_level() # formats label and starts timer
self.increment_lines(0) # formats label
self.increment_score(0) # formats label
self.curr_piece = self.next_piece_display.get_piece()
self.locked_squares = {} # (int,int): color dictionary
def expose(self, widget, event):
"""Paint current piece and all locked squares; should only be called
via self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0, 0, 0)
cr.paint()
for pos, color in self.locked_squares.iteritems():
self.paint_square(pos, color, cr)
for pos in self.curr_piece.occupying():
self.paint_square(pos, self.curr_piece.color, cr)
### Easiest to put "GAME OVER" message here ###
if self.over:
cr.select_font_face('Sans', cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
### HACK: The following doesn't scale with DOT_SIZE ###
cr.set_font_size(41)
cr.move_to(10, 200)
cr.set_source_rgb(0, 0, 0) # dark drop-shadow
cr.show_text('GAME OVER')
cr.move_to(12, 202)
cr.set_source_rgb(.82, .82, .82) # light main text
cr.show_text('GAME OVER')
cr.stroke()
def on_board(self, pos):
"""Determine whether a position is actually on the board."""
i, j = pos
return 0 <= i < COLS and 0 <= j < ROWS
def can_move_curr_piece(self, delta):
hypothetical = self.curr_piece.test_move(delta)
return all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical)
def move_curr_piece(self, delta, point=False):
"""Check the validity of a move, and conditionally perform it.
One point may be granted, e.g. when the player moves the piece down
voluntarily."""
if self.over: return
elif self.can_move_curr_piece(delta):
self.curr_piece.confirm_move(delta)
if point: self.increment_score(1)
elif delta == (0,1): # "illegal" down move
self.lock_curr_piece()
self.queue_draw()
def drop_curr_piece(self):
"""Drop (and lock) curr_piece as far as possible, granting points
equal to the distance of the drop."""
if self.over: return
delta = (0, 0) # now make this as big as possible
while True:
new_delta = tuple_add(delta, (0, 1))
if self.can_move_curr_piece(new_delta):
delta = new_delta
else:
|
self.increment_score(delta[1])
self.move_curr_piece(delta)
self.lock_curr_piece()
self.queue_draw()
def rotate_curr_piece(self):
"""Check the validity of a rotation, and conditionally perform it."""
if self.over: return
hypothetical = self.curr_piece.test_rotate()
if all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical):
self.curr_piece.confirm_rotate()
self.queue_draw()
def lock_curr_piece(self):
"""Add squares of current piece to the collection of locked squares.
Make calls to clear full rows, generate another piece, and check
whether the game should end."""
for pos in self.curr_piece.occupying():
self.locked_squares[pos] = self.curr_piece.color
self.clear_rows()
self.curr_piece = self.next_piece_display.get_piece()
if any(pos in self.locked_squares
for pos in self.curr_piece.occupying()):
self.game_over()
def game_over(self):
"""End the game. (Doesn't currently have to do much, because the
actual painting is done conditionally in expose.)"""
self.over = True
def clear_rows(self):
"""Clear any full rows, modifying the variables locked_squares,
level, lines, and score as appropriate."""
### Previous version had a bug, in that it assumed the set of ###
### indices of full rows had to be a contiguous sequence! ###
full_rows = [j for j in range(ROWS) if all(
(i, j) in self.locked_squares for i in range(COLS))]
if not full_rows: return
### Calculate how for to drop each other row, and do it ###
drop = {j: len([k for k in full_rows if k > j]) for j in range(ROWS)}
self.locked_squares = {(i, j+drop[j]): color for (i, j), color in
self.locked_squares.items() if j not in full_rows}
### Now just update score, etc. ###
d = len(full_rows)
self.increment_lines(d)
self.increment_score(self.level*{1: 40, 2: 100, 3: 300, 4: 1200}[d])
if self.level < self.lines // 10 + 1:
self.increment_level()
def increment_lines(self, d):
"""Increment lines by d, and change the label."""
self.lines += d
styled_set_label_text(self.lines_display, "Lines: "+str(self.lines))
def increment_score(self, x=1):
"""Increment score by x, and change the label."""
self.score += x
styled_set_label_text(self.score_display, "Score: "+str(self.score))
def increment_level(self):
"""Increment level by 1, and change the label. Also call make_timer
and hook up the resulting function with glib.timeout_add, to be
called every 2.0/(level+3) seconds."""
self.level += 1
styled_set_label_text(self.level_display, "Level: "+str(self.level))
glib.timeout_add(2000//(self.level+3), self.make_timer(self.level))
def make_timer(self, lev):
"""Creates a callback function on_timer, which moves current piece
down (without granting a point). If the current level moves beyond
lev, then on_timer will stop working, and will need to be replaced."""
def on_timer():
if (lev == self.level) and not self.over: # finds lev in scope
self.move_curr_piece((0, 1))
return True
else:
return False # kills on_timer
return on_timer
class NextPieceDisplay(SquarePainter):
"""Responsible for both creating and showing new pieces."""
def __init__(self):
super(NextPieceDisplay, self).__init__()
self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color(0, 0, 0))
self.set_size_request(8*DOT_SIZE, 4*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece = self.create_piece()
def expose(self, widget, event):
"""Displays the next piece; should only be called via
self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0.05, 0.05, 0.05)
cr.paint()
for pos in self.next_piece.occupying():
self.paint_square(tuple_add(pos, (-1, 1)),
self.next_piece.color, cr)
def create_piece(self):
"""A Piece factory."""
p_type = random.choice(tetris_pieces.CONCRETE_TYPES)
return p_type()
def get_piece(self):
"""Generates a new piece and shows it; returns the old piece.
Analogous to next() operation for iterators."""
old = self.next_piece
new = self.create_piece()
self.next_piece = new
self.queue_draw()
return old
class Main(gtk.Window):
"""Main window. Contains a Board and other relevant display objects. Is
not responsible for any in-game control beyond passing simple instructions
to the Board on keystroke events."""
def __init__(self):
super(Main, self).__init__()
self.set_title("Tetris")
self.set_resizable(False)
self.set_position(gtk.WIN_POS_CENTER)
self.connect("destroy", gtk.main_quit)
self.connect("key-press-event", self.on_key_down)
### Create and reformat labels ###
self.next_piece_words = gtk.Label("Undefined")
self.level_display = gtk.Label("Undefined")
self.lines_display = gtk.Label("Undefined")
self.score_display = gtk.Label("Undefined")
self.next_piece_words.set_alignment(.2, .4)
self.level_display.set_alignment(.2, 0)
self.lines_display.set_alignment(.2, 0)
self.score_display.set_alignment(.2, 0)
styled_set_label_text(self.next_piece_words, "Next Piece:")
### Note: Board automatically fixes other three labels ###
self.next_piece_display = NextPieceDisplay()
self.board = Board(self.next_piece_display, self.level_display,
self.lines_display, self.score_display)
self.hbox = gtk.HBox() # split screen into 2 panels
self.add(self.hbox)
self.hbox.add(self.board) # left panel is Board
self.vbox = gtk.VBox() # right panel has everything else in a VBox
### Have to wrap VBox in EventBox to change BG color ###
self.vbox_wrapper = gtk.EventBox()
self.vbox_wrapper.add(self.vbox)
self.vbox_wrapper.modify_bg(gtk.STATE_NORMAL,
gtk.gdk.Color(0.05, 0.05, 0.05))
self.hbox.add(self.vbox_wrapper)
self.vbox.add(self.next_piece_words)
self.vbox.add(self.next_piece_display)
self.vbox.add(self.level_display)
self.vbox.add(self.lines_display)
self.vbox.add(self.score_display)
self.show_all()
def on_key_down(self, widget, event):
key = event.keyval
if key == gtk.keysyms.Left:
self.board.move_curr_piece((-1, 0))
elif key == gtk.keysyms.Up:
self.board.rotate_curr_piece()
elif key == gtk.keysyms.Right:
self.board.move_curr_piece((1, 0))
elif key == gtk.keysyms.Down:
self.board.move_curr_piece((0, 1), point=True)
elif key == gtk.keysyms.space:
self.board.drop_curr_piece()
def styled_set_label_text(label, text):
"""Set the text of a gtk.Label with the preferred markup scheme. (Simple
enough not to be worth extending gtk.Label just for this method.)"""
front = "<b><span foreground='#AAAAAA' size='large'>"
end = "</span></b>"
label.set_markup(front+text+end)
if __name__ == "__main__":
Main()
gtk.main()
| break | conditional_block |
tetris.py | """This is the main file for the Pytris project. The three concrete classes
defined herein are
Board: generally controls the flow of the game, e.g. interacting with the
classes defined in tetris_pieces.py to determine whether and how pieces
get moved around the board. Also responsible for displaying the state of
the board.
NextPieceDisplay: is responsible for creating and displaying the next piece.
Main: a window containing a Board, a NextPieceDisplay, and other components
relevant to the game state. The Board actually controls what happens to
these components during game play.
Also defines an abstract class SquarePainter (extended by both Board and
NextPieceDisplay), and a convenience function styled_set_label_text.
@author Quinn Maurmann
"""
import pygtk
pygtk.require("2.0")
import cairo
import glib
import gtk
import random
import tetris_pieces
tuple_add = tetris_pieces.tuple_add # too useful to call by namespace
DOT_SIZE = 30
ROWS = 18
COLS = 10
class SquarePainter(gtk.DrawingArea):
"""Abstract SquarePainter class factors out the ability to paint squares
on a grid. Extended by both the Board and NextPieceDisplay classes."""
def paint_square(self, pos, color, cr):
"""Paints a square on the grid at a particular (int, int) position.
Color is given as an RGB triple (of floats between 0 and 1); cr is the
Cairo context. Used only in the expose methods of Board and
NextPieceDisplay"""
cr.set_source_rgb(*color)
i, j = pos
cr.rectangle(i*DOT_SIZE+1, j*DOT_SIZE-1, DOT_SIZE-2, DOT_SIZE-2)
cr.fill()
class Board(SquarePainter):
"""Board is responsible for handling all game logic and displaying
state."""
def __init__(self, next_piece_display, level_display, lines_display,
score_display):
super(Board, self).__init__()
self.set_size_request(COLS*DOT_SIZE, ROWS*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece_display = next_piece_display
self.level_display = level_display
self.lines_display = lines_display
self.score_display = score_display
self.level = 0
self.lines = 0
self.score = 0
self.over = False
self.increment_level() # formats label and starts timer
self.increment_lines(0) # formats label
self.increment_score(0) # formats label
self.curr_piece = self.next_piece_display.get_piece()
self.locked_squares = {} # (int,int): color dictionary
def expose(self, widget, event):
"""Paint current piece and all locked squares; should only be called
via self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0, 0, 0)
cr.paint()
for pos, color in self.locked_squares.iteritems():
self.paint_square(pos, color, cr)
for pos in self.curr_piece.occupying():
self.paint_square(pos, self.curr_piece.color, cr)
### Easiest to put "GAME OVER" message here ###
if self.over:
cr.select_font_face('Sans', cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
### HACK: The following doesn't scale with DOT_SIZE ###
cr.set_font_size(41)
cr.move_to(10, 200)
cr.set_source_rgb(0, 0, 0) # dark drop-shadow
cr.show_text('GAME OVER')
cr.move_to(12, 202)
cr.set_source_rgb(.82, .82, .82) # light main text
cr.show_text('GAME OVER')
cr.stroke()
def on_board(self, pos):
"""Determine whether a position is actually on the board."""
i, j = pos
return 0 <= i < COLS and 0 <= j < ROWS
def can_move_curr_piece(self, delta):
hypothetical = self.curr_piece.test_move(delta)
return all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical)
def move_curr_piece(self, delta, point=False):
"""Check the validity of a move, and conditionally perform it.
One point may be granted, e.g. when the player moves the piece down
voluntarily."""
if self.over: return
elif self.can_move_curr_piece(delta):
self.curr_piece.confirm_move(delta)
if point: self.increment_score(1)
elif delta == (0,1): # "illegal" down move
self.lock_curr_piece()
self.queue_draw()
def drop_curr_piece(self):
"""Drop (and lock) curr_piece as far as possible, granting points
equal to the distance of the drop."""
if self.over: return
delta = (0, 0) # now make this as big as possible
while True:
new_delta = tuple_add(delta, (0, 1))
if self.can_move_curr_piece(new_delta):
delta = new_delta
else:
break
self.increment_score(delta[1])
self.move_curr_piece(delta)
self.lock_curr_piece()
self.queue_draw()
def rotate_curr_piece(self):
"""Check the validity of a rotation, and conditionally perform it."""
if self.over: return
hypothetical = self.curr_piece.test_rotate()
if all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical):
self.curr_piece.confirm_rotate()
self.queue_draw()
def lock_curr_piece(self):
"""Add squares of current piece to the collection of locked squares.
Make calls to clear full rows, generate another piece, and check
whether the game should end."""
for pos in self.curr_piece.occupying():
self.locked_squares[pos] = self.curr_piece.color | self.game_over()
def game_over(self):
"""End the game. (Doesn't currently have to do much, because the
actual painting is done conditionally in expose.)"""
self.over = True
def clear_rows(self):
"""Clear any full rows, modifying the variables locked_squares,
level, lines, and score as appropriate."""
### Previous version had a bug, in that it assumed the set of ###
### indices of full rows had to be a contiguous sequence! ###
full_rows = [j for j in range(ROWS) if all(
(i, j) in self.locked_squares for i in range(COLS))]
if not full_rows: return
### Calculate how for to drop each other row, and do it ###
drop = {j: len([k for k in full_rows if k > j]) for j in range(ROWS)}
self.locked_squares = {(i, j+drop[j]): color for (i, j), color in
self.locked_squares.items() if j not in full_rows}
### Now just update score, etc. ###
d = len(full_rows)
self.increment_lines(d)
self.increment_score(self.level*{1: 40, 2: 100, 3: 300, 4: 1200}[d])
if self.level < self.lines // 10 + 1:
self.increment_level()
def increment_lines(self, d):
"""Increment lines by d, and change the label."""
self.lines += d
styled_set_label_text(self.lines_display, "Lines: "+str(self.lines))
def increment_score(self, x=1):
"""Increment score by x, and change the label."""
self.score += x
styled_set_label_text(self.score_display, "Score: "+str(self.score))
def increment_level(self):
"""Increment level by 1, and change the label. Also call make_timer
and hook up the resulting function with glib.timeout_add, to be
called every 2.0/(level+3) seconds."""
self.level += 1
styled_set_label_text(self.level_display, "Level: "+str(self.level))
glib.timeout_add(2000//(self.level+3), self.make_timer(self.level))
def make_timer(self, lev):
"""Creates a callback function on_timer, which moves current piece
down (without granting a point). If the current level moves beyond
lev, then on_timer will stop working, and will need to be replaced."""
def on_timer():
if (lev == self.level) and not self.over: # finds lev in scope
self.move_curr_piece((0, 1))
return True
else:
return False # kills on_timer
return on_timer
class NextPieceDisplay(SquarePainter):
"""Responsible for both creating and showing new pieces."""
def __init__(self):
super(NextPieceDisplay, self).__init__()
self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color(0, 0, 0))
self.set_size_request(8*DOT_SIZE, 4*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece = self.create_piece()
def expose(self, widget, event):
"""Displays the next piece; should only be called via
self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0.05, 0.05, 0.05)
cr.paint()
for pos in self.next_piece.occupying():
self.paint_square(tuple_add(pos, (-1, 1)),
self.next_piece.color, cr)
def create_piece(self):
"""A Piece factory."""
p_type = random.choice(tetris_pieces.CONCRETE_TYPES)
return p_type()
def get_piece(self):
"""Generates a new piece and shows it; returns the old piece.
Analogous to next() operation for iterators."""
old = self.next_piece
new = self.create_piece()
self.next_piece = new
self.queue_draw()
return old
class Main(gtk.Window):
"""Main window. Contains a Board and other relevant display objects. Is
not responsible for any in-game control beyond passing simple instructions
to the Board on keystroke events."""
def __init__(self):
super(Main, self).__init__()
self.set_title("Tetris")
self.set_resizable(False)
self.set_position(gtk.WIN_POS_CENTER)
self.connect("destroy", gtk.main_quit)
self.connect("key-press-event", self.on_key_down)
### Create and reformat labels ###
self.next_piece_words = gtk.Label("Undefined")
self.level_display = gtk.Label("Undefined")
self.lines_display = gtk.Label("Undefined")
self.score_display = gtk.Label("Undefined")
self.next_piece_words.set_alignment(.2, .4)
self.level_display.set_alignment(.2, 0)
self.lines_display.set_alignment(.2, 0)
self.score_display.set_alignment(.2, 0)
styled_set_label_text(self.next_piece_words, "Next Piece:")
### Note: Board automatically fixes other three labels ###
self.next_piece_display = NextPieceDisplay()
self.board = Board(self.next_piece_display, self.level_display,
self.lines_display, self.score_display)
self.hbox = gtk.HBox() # split screen into 2 panels
self.add(self.hbox)
self.hbox.add(self.board) # left panel is Board
self.vbox = gtk.VBox() # right panel has everything else in a VBox
### Have to wrap VBox in EventBox to change BG color ###
self.vbox_wrapper = gtk.EventBox()
self.vbox_wrapper.add(self.vbox)
self.vbox_wrapper.modify_bg(gtk.STATE_NORMAL,
gtk.gdk.Color(0.05, 0.05, 0.05))
self.hbox.add(self.vbox_wrapper)
self.vbox.add(self.next_piece_words)
self.vbox.add(self.next_piece_display)
self.vbox.add(self.level_display)
self.vbox.add(self.lines_display)
self.vbox.add(self.score_display)
self.show_all()
def on_key_down(self, widget, event):
key = event.keyval
if key == gtk.keysyms.Left:
self.board.move_curr_piece((-1, 0))
elif key == gtk.keysyms.Up:
self.board.rotate_curr_piece()
elif key == gtk.keysyms.Right:
self.board.move_curr_piece((1, 0))
elif key == gtk.keysyms.Down:
self.board.move_curr_piece((0, 1), point=True)
elif key == gtk.keysyms.space:
self.board.drop_curr_piece()
def styled_set_label_text(label, text):
"""Set the text of a gtk.Label with the preferred markup scheme. (Simple
enough not to be worth extending gtk.Label just for this method.)"""
front = "<b><span foreground='#AAAAAA' size='large'>"
end = "</span></b>"
label.set_markup(front+text+end)
if __name__ == "__main__":
Main()
gtk.main() | self.clear_rows()
self.curr_piece = self.next_piece_display.get_piece()
if any(pos in self.locked_squares
for pos in self.curr_piece.occupying()): | random_line_split |
tetris.py | """This is the main file for the Pytris project. The three concrete classes
defined herein are
Board: generally controls the flow of the game, e.g. interacting with the
classes defined in tetris_pieces.py to determine whether and how pieces
get moved around the board. Also responsible for displaying the state of
the board.
NextPieceDisplay: is responsible for creating and displaying the next piece.
Main: a window containing a Board, a NextPieceDisplay, and other components
relevant to the game state. The Board actually controls what happens to
these components during game play.
Also defines an abstract class SquarePainter (extended by both Board and
NextPieceDisplay), and a convenience function styled_set_label_text.
@author Quinn Maurmann
"""
import pygtk
pygtk.require("2.0")
import cairo
import glib
import gtk
import random
import tetris_pieces
tuple_add = tetris_pieces.tuple_add # too useful to call by namespace
DOT_SIZE = 30
ROWS = 18
COLS = 10
class SquarePainter(gtk.DrawingArea):
"""Abstract SquarePainter class factors out the ability to paint squares
on a grid. Extended by both the Board and NextPieceDisplay classes."""
def paint_square(self, pos, color, cr):
"""Paints a square on the grid at a particular (int, int) position.
Color is given as an RGB triple (of floats between 0 and 1); cr is the
Cairo context. Used only in the expose methods of Board and
NextPieceDisplay"""
cr.set_source_rgb(*color)
i, j = pos
cr.rectangle(i*DOT_SIZE+1, j*DOT_SIZE-1, DOT_SIZE-2, DOT_SIZE-2)
cr.fill()
class Board(SquarePainter):
"""Board is responsible for handling all game logic and displaying
state."""
def __init__(self, next_piece_display, level_display, lines_display,
score_display):
super(Board, self).__init__()
self.set_size_request(COLS*DOT_SIZE, ROWS*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece_display = next_piece_display
self.level_display = level_display
self.lines_display = lines_display
self.score_display = score_display
self.level = 0
self.lines = 0
self.score = 0
self.over = False
self.increment_level() # formats label and starts timer
self.increment_lines(0) # formats label
self.increment_score(0) # formats label
self.curr_piece = self.next_piece_display.get_piece()
self.locked_squares = {} # (int,int): color dictionary
def expose(self, widget, event):
"""Paint current piece and all locked squares; should only be called
via self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0, 0, 0)
cr.paint()
for pos, color in self.locked_squares.iteritems():
self.paint_square(pos, color, cr)
for pos in self.curr_piece.occupying():
self.paint_square(pos, self.curr_piece.color, cr)
### Easiest to put "GAME OVER" message here ###
if self.over:
cr.select_font_face('Sans', cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
### HACK: The following doesn't scale with DOT_SIZE ###
cr.set_font_size(41)
cr.move_to(10, 200)
cr.set_source_rgb(0, 0, 0) # dark drop-shadow
cr.show_text('GAME OVER')
cr.move_to(12, 202)
cr.set_source_rgb(.82, .82, .82) # light main text
cr.show_text('GAME OVER')
cr.stroke()
def on_board(self, pos):
"""Determine whether a position is actually on the board."""
i, j = pos
return 0 <= i < COLS and 0 <= j < ROWS
def can_move_curr_piece(self, delta):
hypothetical = self.curr_piece.test_move(delta)
return all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical)
def move_curr_piece(self, delta, point=False):
"""Check the validity of a move, and conditionally perform it.
One point may be granted, e.g. when the player moves the piece down
voluntarily."""
if self.over: return
elif self.can_move_curr_piece(delta):
self.curr_piece.confirm_move(delta)
if point: self.increment_score(1)
elif delta == (0,1): # "illegal" down move
self.lock_curr_piece()
self.queue_draw()
def drop_curr_piece(self):
"""Drop (and lock) curr_piece as far as possible, granting points
equal to the distance of the drop."""
if self.over: return
delta = (0, 0) # now make this as big as possible
while True:
new_delta = tuple_add(delta, (0, 1))
if self.can_move_curr_piece(new_delta):
delta = new_delta
else:
break
self.increment_score(delta[1])
self.move_curr_piece(delta)
self.lock_curr_piece()
self.queue_draw()
def rotate_curr_piece(self):
"""Check the validity of a rotation, and conditionally perform it."""
if self.over: return
hypothetical = self.curr_piece.test_rotate()
if all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical):
self.curr_piece.confirm_rotate()
self.queue_draw()
def lock_curr_piece(self):
"""Add squares of current piece to the collection of locked squares.
Make calls to clear full rows, generate another piece, and check
whether the game should end."""
for pos in self.curr_piece.occupying():
self.locked_squares[pos] = self.curr_piece.color
self.clear_rows()
self.curr_piece = self.next_piece_display.get_piece()
if any(pos in self.locked_squares
for pos in self.curr_piece.occupying()):
self.game_over()
def game_over(self):
"""End the game. (Doesn't currently have to do much, because the
actual painting is done conditionally in expose.)"""
self.over = True
def clear_rows(self):
"""Clear any full rows, modifying the variables locked_squares,
level, lines, and score as appropriate."""
### Previous version had a bug, in that it assumed the set of ###
### indices of full rows had to be a contiguous sequence! ###
full_rows = [j for j in range(ROWS) if all(
(i, j) in self.locked_squares for i in range(COLS))]
if not full_rows: return
### Calculate how for to drop each other row, and do it ###
drop = {j: len([k for k in full_rows if k > j]) for j in range(ROWS)}
self.locked_squares = {(i, j+drop[j]): color for (i, j), color in
self.locked_squares.items() if j not in full_rows}
### Now just update score, etc. ###
d = len(full_rows)
self.increment_lines(d)
self.increment_score(self.level*{1: 40, 2: 100, 3: 300, 4: 1200}[d])
if self.level < self.lines // 10 + 1:
self.increment_level()
def increment_lines(self, d):
"""Increment lines by d, and change the label."""
self.lines += d
styled_set_label_text(self.lines_display, "Lines: "+str(self.lines))
def increment_score(self, x=1):
"""Increment score by x, and change the label."""
self.score += x
styled_set_label_text(self.score_display, "Score: "+str(self.score))
def increment_level(self):
"""Increment level by 1, and change the label. Also call make_timer
and hook up the resulting function with glib.timeout_add, to be
called every 2.0/(level+3) seconds."""
self.level += 1
styled_set_label_text(self.level_display, "Level: "+str(self.level))
glib.timeout_add(2000//(self.level+3), self.make_timer(self.level))
def make_timer(self, lev):
"""Creates a callback function on_timer, which moves current piece
down (without granting a point). If the current level moves beyond
lev, then on_timer will stop working, and will need to be replaced."""
def on_timer():
if (lev == self.level) and not self.over: # finds lev in scope
self.move_curr_piece((0, 1))
return True
else:
return False # kills on_timer
return on_timer
class NextPieceDisplay(SquarePainter):
"""Responsible for both creating and showing new pieces."""
def __init__(self):
super(NextPieceDisplay, self).__init__()
self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color(0, 0, 0))
self.set_size_request(8*DOT_SIZE, 4*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece = self.create_piece()
def expose(self, widget, event):
"""Displays the next piece; should only be called via
self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0.05, 0.05, 0.05)
cr.paint()
for pos in self.next_piece.occupying():
self.paint_square(tuple_add(pos, (-1, 1)),
self.next_piece.color, cr)
def create_piece(self):
"""A Piece factory."""
p_type = random.choice(tetris_pieces.CONCRETE_TYPES)
return p_type()
def get_piece(self):
"""Generates a new piece and shows it; returns the old piece.
Analogous to next() operation for iterators."""
old = self.next_piece
new = self.create_piece()
self.next_piece = new
self.queue_draw()
return old
class Main(gtk.Window):
"""Main window. Contains a Board and other relevant display objects. Is
not responsible for any in-game control beyond passing simple instructions
to the Board on keystroke events."""
def __init__(self):
|
def on_key_down(self, widget, event):
key = event.keyval
if key == gtk.keysyms.Left:
self.board.move_curr_piece((-1, 0))
elif key == gtk.keysyms.Up:
self.board.rotate_curr_piece()
elif key == gtk.keysyms.Right:
self.board.move_curr_piece((1, 0))
elif key == gtk.keysyms.Down:
self.board.move_curr_piece((0, 1), point=True)
elif key == gtk.keysyms.space:
self.board.drop_curr_piece()
def styled_set_label_text(label, text):
"""Set the text of a gtk.Label with the preferred markup scheme. (Simple
enough not to be worth extending gtk.Label just for this method.)"""
front = "<b><span foreground='#AAAAAA' size='large'>"
end = "</span></b>"
label.set_markup(front+text+end)
if __name__ == "__main__":
Main()
gtk.main()
| super(Main, self).__init__()
self.set_title("Tetris")
self.set_resizable(False)
self.set_position(gtk.WIN_POS_CENTER)
self.connect("destroy", gtk.main_quit)
self.connect("key-press-event", self.on_key_down)
### Create and reformat labels ###
self.next_piece_words = gtk.Label("Undefined")
self.level_display = gtk.Label("Undefined")
self.lines_display = gtk.Label("Undefined")
self.score_display = gtk.Label("Undefined")
self.next_piece_words.set_alignment(.2, .4)
self.level_display.set_alignment(.2, 0)
self.lines_display.set_alignment(.2, 0)
self.score_display.set_alignment(.2, 0)
styled_set_label_text(self.next_piece_words, "Next Piece:")
### Note: Board automatically fixes other three labels ###
self.next_piece_display = NextPieceDisplay()
self.board = Board(self.next_piece_display, self.level_display,
self.lines_display, self.score_display)
self.hbox = gtk.HBox() # split screen into 2 panels
self.add(self.hbox)
self.hbox.add(self.board) # left panel is Board
self.vbox = gtk.VBox() # right panel has everything else in a VBox
### Have to wrap VBox in EventBox to change BG color ###
self.vbox_wrapper = gtk.EventBox()
self.vbox_wrapper.add(self.vbox)
self.vbox_wrapper.modify_bg(gtk.STATE_NORMAL,
gtk.gdk.Color(0.05, 0.05, 0.05))
self.hbox.add(self.vbox_wrapper)
self.vbox.add(self.next_piece_words)
self.vbox.add(self.next_piece_display)
self.vbox.add(self.level_display)
self.vbox.add(self.lines_display)
self.vbox.add(self.score_display)
self.show_all() | identifier_body |
SPACE-BLASTER-FINAL.py | # PREET PANCHAL & TIRTH PATEL
# ICS3U1-01
# MRS. RUBINI-LAFOREST
# WOBURN COLLEGIATE INSTITUTE
# JUNE 9th, 2017
"""
WORKS CITED:
- ALL screens(Start, instructions, credits, & game over screens) are designed
and created on https://www.canva.com/
- Star Background animation help:
http: // programarcadegames.com / python_examples / show_file.php?file = animating_snow.py
- Meteor Image: http://falloutequestria.wikia.com/wiki/File:CM_-_Midnight_Shower.png
- Instrumental Music: https://www.youtube.com/watch?v=plXGctq9OXo
- checkCollision function used from Mrs. Rubini
"""
"""
This program is a game called 'Space Blaster'. This game is a multiplayer game that consists
of two spaceships; one blue and the other red. There is a solid green line in the middle
splitting the two player sides respectively. The game's objectively is to simply dodge as many
meteors as you can by shooting at it. The shooting is automatic and all the users have to do is
move 'left' or 'right' using the appropriate keys. For every meteor hit, you earn 10pts. Once
the 90 second timer comes to an end, a winner is selected based on the final score.
"""
# Import a library of functions called 'pygame', 'random' & 'sys'
import pygame, random, sys
stdout = sys.__stdout__
stderr = sys.__stderr__
# Initialize the game engine
pygame.init()
# Frames Per Second (FPS)
FPS = 60
# Import colours
BLACK = [0, 0, 0]
WHITE = [255, 255, 255]
RED_FADE = [250, 219, 216]
GREEN = [0, 255, 0]
BLUE_FADE = [214, 234, 248]
YELLOW = [255, 255, 0]
# Set the height and width of the screen
SCREEN_SIZE = [1200, 900]
# Empty lists for moving objects
meteor_list_blue = []
meteor_list_red = []
star_list = []
# Initialize the game clock
clock = pygame.time.Clock()
# Displaying Screen size
screen = pygame.display.set_mode(SCREEN_SIZE)
# Displays window title
pygame.display.set_caption("SPACE BLASTER")
# Importing all images
blue_spaceship = pygame.image.load('Spaceship1.png')
red_spaceship = pygame.image.load('Spaceship2.png')
meteor_image = pygame.image.load('Meteor.png')
start_screen = pygame.image.load("Start Screen.png")
instruction_screen = pygame.image.load("Instructions Screen.png")
credits_screen = pygame.image.load("Credits Screen.png")
blue_wins = pygame.image.load("Blue Wins.png")
red_wins = pygame.image.load("Red Wins.png")
tie_game = pygame.image.load("Tie Game.png")
|
# For-loop appending coordinates for the meteors
# on blue spaceship side
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.append([x_meteor_blue, 0])
# Blue meteor width & height values
blue_meteorw = 30
blue_meteorh = 30
# Function for displaying blue spaceship
def BLUE(x_change_blue, y_change_blue):
screen.blit(blue_spaceship, (x_change_blue, y_change_blue))
# Variables controlling blue spaceship
x_coord_blue = 0
y_coord_blue = 775
# For-loop appending coordinates for the meteors
# on red spaceship side
for i in range(10):
x_meteor_red = random.randrange(620, 1155)
y_meteor_red = 0
meteor_list_red.append([x_meteor_red, y_meteor_red])
# Red meteor width & height values
red_meteorw = 30
red_meteorh = 30
# Function for displaying red spaceship
def RED(x_change_red, y_change_red):
screen.blit(red_spaceship, (x_change_red, y_change_red))
# Variables controlling red spaceship
x_coord_red = 1110
y_coord_red = 775
# For-loop appending coordinates for the white stars
# on game screen
for stars in range(50):
x_star = random.randrange(0, 1200)
y_star = random.randrange(0, 900)
star_list.append([x_star, y_star])
# Variables for bullets on blue side
startX_blue = 45
startY_blue = 773
Xchange_bullet_blue = 0
bullets_blue = [[startX_blue, startY_blue]]
blue_bulletw = 3
blue_bulleth = 10
# Variables for bullets on red side
startX_red = 1155
startY_red = 773
Xchange_bullet_red = 0
bullets_red = [[startX_red, startY_red]]
red_bulletw = 3
red_bulleth = 10
# COLLISION DETECTION Function
def checkCollision(obj1x, obj1y, obj1w, obj1h, obj2x, obj2y, obj2w, obj2h):
# check bounding box
if obj1x + obj1w >= obj2x and obj1x <= obj2x + obj2w:
if obj1y + obj1h >= obj2y and obj1y <= obj2y + obj2h:
return True
return False
# Blue Player scoring function
score_blue = 0
def blue_player(score_blue):
font_blue_score = pygame.font.SysFont('monospace', 25, True, False)
score_blue_text = font_blue_score.render("SCORE :" + str(int(score_blue)), True, BLUE_FADE)
screen.blit(score_blue_text, [215, 10])
return score_blue
# Red Player scoring function
score_red = 0
def red_player(score_red):
font_red_score = pygame.font.SysFont('monospace', 25, True, False)
score_red_text = font_red_score.render("SCORE :" + str(int(score_red)), True, RED_FADE)
screen.blit(score_red_text, [865, 10])
return score_red
# Importing & loading music file
background_music = pygame.mixer.music.load("Instrumental Music.mp3")
# Music timer set at zero before loop
music_timer = 0
# Initializing game timer (set to zero)
game_timer = 90
# --- Main Game Title Screen ---
start = False
done = False
while not start and not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
start = True
if event.type == pygame.MOUSEBUTTONDOWN:
start = True
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Switching of screens Event Loop ---
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pygame.quit()
sys.exit()
# screens set to zero initially
screens = 0
# If mouse button is clicked in a certain area, a certain screen will open up
if event.type == pygame.MOUSEBUTTONDOWN:
mx, my = pygame.mouse.get_pos()
if 261 < mx < 334 and 850 < my < 900:
screens = 1
elif 395 < mx < 605 and 850 < my < 900:
screens = 2
elif 660 < mx < 794 and 850 < my < 900:
screens = 3
elif 846 < mx < 919 and 850 < my < 900:
screens = 4
# Screen bliting of different in-game screens
if screens == 1:
done = True
if screens == 2:
screen.blit(instruction_screen, [0, 0])
if screens == 3:
screen.blit(credits_screen, [0, 0])
if screens == 4:
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Main Event Loop ---
game = False
while not game:
for event in pygame.event.get():
# To quit game
if event.type == pygame.QUIT:
game = True
# If the following keys are pressed,
# it will control the red or blue spaceship
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
Xchange_bullet_red = -7
elif event.key == pygame.K_RIGHT:
Xchange_bullet_red = 7
if event.key == pygame.K_a:
Xchange_bullet_blue = -7
elif event.key == pygame.K_d:
Xchange_bullet_blue = 7
# If no keys are pressed, then nothing will happen
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
Xchange_bullet_red = 0
if event.key == pygame.K_a or event.key == pygame.K_d:
Xchange_bullet_blue = 0
# Fills the background screen with Black
screen.fill(BLACK)
# Draws a solid green line in the middle of game screen
# to split red and blue player side {multiplayer}
pygame.draw.line(screen, GREEN, [595, 45], [595, 900], 10)
# If statement to pla music file, music timer now = 1
if music_timer == 0 or music_timer == 11700:
pygame.mixer.music.play(-1, 0.0)
music_timer = 1
# For-loop that constantly draws white dots (stars)
# and animates it on the game screen
for i in range(len(star_list)):
# Draw the snow flake
pygame.draw.circle(screen, WHITE, star_list[i], 2)
# Move the snow flake down one pixel
star_list[i][1] += 1
# If the snow flake has moved off the bottom of the screen
if star_list[i][1] > 900:
# Reset it just above the top
y = random.randrange(-50, -10)
star_list[i][1] = y
# Give it a new x position
x = random.randrange(0, 1200)
star_list[i][0] = x
# Displays meteors on blue player side
for meteors in meteor_list_blue:
meteors[1] += 3
# Displays meteors on red player side
for meteors in meteor_list_red:
meteors[1] += 3
# Animates meteors falling one at a time on blue side
if meteor_list_blue[0][1] >= 900:
# Reset it just above the top
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.remove(meteor_list_blue[0])
# Insert new meteor once one is done one cycle
meteor_list_blue.insert(0, [x_meteor_blue, 0])
screen.blit(meteor_image, [x_meteor_blue, meteor_list_blue[0][1]])
# Animates meteors falling one at a time on red side
if meteor_list_red[0][1] >= 900:
# Reset it just above the top
x_meteor_red = random.randrange(620, 1155)
meteor_list_red.remove(meteor_list_red[0])
# Insert new meteor once one is done one cycle
meteor_list_red.insert(0, [x_meteor_red, 0])
screen.blit(meteor_image, [x_meteor_red, meteor_list_red[0][1]])
# Restrictions for bullets on blue side
if startX_blue <= 45:
startX_blue += 3
startX_blue += 3
startX_blue += 3
if startX_blue >= 550:
startX_blue -= 3
startX_blue -= 3
startX_blue -= 3
# Synchronizes Blue spaceship with bullets
x_coord_blue += Xchange_bullet_blue
BLUE(x_coord_blue, y_coord_blue)
# Controls movement of bullets on blue side
startX_blue += Xchange_bullet_blue
# Move all bullets 3px
for bullet in bullets_blue:
bullet[1] = bullet[1] - 3
# If the last bullet is off the screen, remove it
if bullets_blue[len(bullets_blue) - 1][1] < 0:
bullets_blue.remove(bullets_blue[len(bullets_blue) - 1])
# If the first bullet is more than 10px from the initial location, add another
if bullets_blue[0][1] + 70 < startY_blue:
bullets_blue.insert(0, [startX_blue, startY_blue])
# Blue spaceship restrictions on game screen
if x_coord_blue <= 0:
x_coord_blue += 3
x_coord_blue += 3
x_coord_blue += 3
if x_coord_blue >= 502:
x_coord_blue -= 3
x_coord_blue -= 3
x_coord_blue -= 3
# Displays bullets on blue side and draws it as Yellow rectangles
for bullet in bullets_blue:
pygame.draw.rect(screen, YELLOW, [bullet[0], bullet[1], 3, 10], 3)
# Calling out the scoring function for blue player
blue_player(score_blue)
# Collision detection for bullets and meteors on blue side
for bullet in bullets_blue:
if checkCollision(bullet[0], bullet[1], blue_bulletw, blue_meteorh, meteor_list_blue[0][0],
meteor_list_blue[0][1], blue_meteorw, blue_meteorh):
meteor_list_blue.remove(meteor_list_blue[0])
score_blue += 10
if meteor_list_blue != 0:
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.insert(0, [x_meteor_blue, 0])
# Restrictions for bullets on red side
if startX_red <= 646:
startX_red += 3
startX_red += 3
startX_red += 3
if startX_red >= 1157:
startX_red -= 3
startX_red -= 3
startX_red -= 3
# Synchronizes Red spaceship with bullets
x_coord_red += Xchange_bullet_red
RED(x_coord_red, y_coord_red)
# Controls movement of bullets on red side
startX_red += Xchange_bullet_red
# Move all bullets 3px
for bullet in bullets_red:
bullet[1] = bullet[1] - 3
# If the last bullet is off the screen, remove it
if bullets_red[len(bullets_red) - 1][1] < 0:
bullets_red.remove(bullets_red[len(bullets_red) - 1])
# If the first bullet is more than 10px from the initial location, add another
if bullets_red[0][1] + 70 < startY_red:
bullets_red.insert(0, [startX_red, startY_red])
# Rlue spaceship restrictions on game screen
if x_coord_red <= 602:
x_coord_red += 3
x_coord_red += 3
x_coord_red += 3
if x_coord_red >= 1112:
x_coord_red -= 3
x_coord_red -= 3
x_coord_red -= 3
# Displays bullets on red side and draws it as Yellow rectangles
for bullet in bullets_red:
pygame.draw.rect(screen, YELLOW, [bullet[0], bullet[1], 3, 10], 3)
# Calling out the scoring function for red player
red_player(score_red)
# Collision detection for bullets and meteors on red side
for bullet in bullets_red:
if checkCollision(bullet[0], bullet[1], red_bulletw, red_meteorh, meteor_list_red[0][0], meteor_list_red[0][1],
red_meteorw, red_meteorh):
meteor_list_red.remove(meteor_list_red[0])
score_red += 10
if meteor_list_red != 0:
x_meteor_red = random.randrange(620, 1155)
meteor_list_red.insert(0, [x_meteor_red, 0])
# Game timer countdown from 90
game_timer -= 0.020
if game_timer < 0:
game = True
print "GAME OVER."
print ""
# Displaying game timer on game screen
font_game_timer = pygame.font.SysFont('monospace', 35, True, False)
game_timer_text = font_game_timer.render(str(int(game_timer)), True, WHITE)
screen.blit(game_timer_text, [575, 10])
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Music timer increment by 1
music_timer += 1
# Controls the speed of the meteors falling
meteor_list_blue[0][1] += 7
meteor_list_red[0][1] += 7
# Game clock tick set to 60 to run game
clock.tick(FPS)
# --- Game Over Event Loop---
game_over_timer = 3
game_over = False
while not game_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
pygame.quit()
sys.exit()
# Once game over timer reaches 0, display the following:
game_over_timer -= 0.5
if game_over_timer == 0:
# Depending on the final game score, a winner is chosen and score + result is printed
if score_red > score_blue:
screen.blit(red_wins, [0, 0])
print "RED SCORE: " + str(score_red)
print "BLUE SCORE: " + str(score_blue)
print "Result: RED WINS!"
print "*-" * 100
if score_blue > score_red:
screen.blit(blue_wins, [0, 0])
print "RED SCORE: " + str(score_red)
print "BLUE SCORE: " + str(score_blue)
print "Result: BLUE WINS!"
print "*-" * 100
if score_red == score_blue:
screen.blit(tie_game, [0, 0])
print "RED SCORE: " + str(score_red)
print "BLUE SCORE: " + str(score_blue)
print "Result: TIE GAME!"
print "*-" * 100
# Flip pygame screen to display everything
pygame.display.flip()
# Game Clock set to 60 Frames per second
clock.tick(FPS)
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit()
# Complete exit and end of game code
sys.exit()
# Thank you for playing our game! Hope you enjoyed it! | random_line_split |
|
SPACE-BLASTER-FINAL.py | # PREET PANCHAL & TIRTH PATEL
# ICS3U1-01
# MRS. RUBINI-LAFOREST
# WOBURN COLLEGIATE INSTITUTE
# JUNE 9th, 2017
"""
WORKS CITED:
- ALL screens(Start, instructions, credits, & game over screens) are designed
and created on https://www.canva.com/
- Star Background animation help:
http: // programarcadegames.com / python_examples / show_file.php?file = animating_snow.py
- Meteor Image: http://falloutequestria.wikia.com/wiki/File:CM_-_Midnight_Shower.png
- Instrumental Music: https://www.youtube.com/watch?v=plXGctq9OXo
- checkCollision function used from Mrs. Rubini
"""
"""
This program is a game called 'Space Blaster'. This game is a multiplayer game that consists
of two spaceships; one blue and the other red. There is a solid green line in the middle
splitting the two player sides respectively. The game's objectively is to simply dodge as many
meteors as you can by shooting at it. The shooting is automatic and all the users have to do is
move 'left' or 'right' using the appropriate keys. For every meteor hit, you earn 10pts. Once
the 90 second timer comes to an end, a winner is selected based on the final score.
"""
# Import a library of functions called 'pygame', 'random' & 'sys'
import pygame, random, sys
stdout = sys.__stdout__
stderr = sys.__stderr__
# Initialize the game engine
pygame.init()
# Frames Per Second (FPS)
FPS = 60
# Import colours
BLACK = [0, 0, 0]
WHITE = [255, 255, 255]
RED_FADE = [250, 219, 216]
GREEN = [0, 255, 0]
BLUE_FADE = [214, 234, 248]
YELLOW = [255, 255, 0]
# Set the height and width of the screen
SCREEN_SIZE = [1200, 900]
# Empty lists for moving objects
meteor_list_blue = []
meteor_list_red = []
star_list = []
# Initialize the game clock
clock = pygame.time.Clock()
# Displaying Screen size
screen = pygame.display.set_mode(SCREEN_SIZE)
# Displays window title
pygame.display.set_caption("SPACE BLASTER")
# Importing all images
blue_spaceship = pygame.image.load('Spaceship1.png')
red_spaceship = pygame.image.load('Spaceship2.png')
meteor_image = pygame.image.load('Meteor.png')
start_screen = pygame.image.load("Start Screen.png")
instruction_screen = pygame.image.load("Instructions Screen.png")
credits_screen = pygame.image.load("Credits Screen.png")
blue_wins = pygame.image.load("Blue Wins.png")
red_wins = pygame.image.load("Red Wins.png")
tie_game = pygame.image.load("Tie Game.png")
# For-loop appending coordinates for the meteors
# on blue spaceship side
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.append([x_meteor_blue, 0])
# Blue meteor width & height values
blue_meteorw = 30
blue_meteorh = 30
# Function for displaying blue spaceship
def BLUE(x_change_blue, y_change_blue):
screen.blit(blue_spaceship, (x_change_blue, y_change_blue))
# Variables controlling blue spaceship
x_coord_blue = 0
y_coord_blue = 775
# For-loop appending coordinates for the meteors
# on red spaceship side
for i in range(10):
x_meteor_red = random.randrange(620, 1155)
y_meteor_red = 0
meteor_list_red.append([x_meteor_red, y_meteor_red])
# Red meteor width & height values
red_meteorw = 30
red_meteorh = 30
# Function for displaying red spaceship
def RED(x_change_red, y_change_red):
screen.blit(red_spaceship, (x_change_red, y_change_red))
# Variables controlling red spaceship
x_coord_red = 1110
y_coord_red = 775
# For-loop appending coordinates for the white stars
# on game screen
for stars in range(50):
x_star = random.randrange(0, 1200)
y_star = random.randrange(0, 900)
star_list.append([x_star, y_star])
# Variables for bullets on blue side
startX_blue = 45
startY_blue = 773
Xchange_bullet_blue = 0
bullets_blue = [[startX_blue, startY_blue]]
blue_bulletw = 3
blue_bulleth = 10
# Variables for bullets on red side
startX_red = 1155
startY_red = 773
Xchange_bullet_red = 0
bullets_red = [[startX_red, startY_red]]
red_bulletw = 3
red_bulleth = 10
# COLLISION DETECTION Function
def checkCollision(obj1x, obj1y, obj1w, obj1h, obj2x, obj2y, obj2w, obj2h):
# check bounding box
if obj1x + obj1w >= obj2x and obj1x <= obj2x + obj2w:
if obj1y + obj1h >= obj2y and obj1y <= obj2y + obj2h:
return True
return False
# Blue Player scoring function
score_blue = 0
def blue_player(score_blue):
font_blue_score = pygame.font.SysFont('monospace', 25, True, False)
score_blue_text = font_blue_score.render("SCORE :" + str(int(score_blue)), True, BLUE_FADE)
screen.blit(score_blue_text, [215, 10])
return score_blue
# Red Player scoring function
score_red = 0
def red_player(score_red):
font_red_score = pygame.font.SysFont('monospace', 25, True, False)
score_red_text = font_red_score.render("SCORE :" + str(int(score_red)), True, RED_FADE)
screen.blit(score_red_text, [865, 10])
return score_red
# Importing & loading music file
background_music = pygame.mixer.music.load("Instrumental Music.mp3")
# Music timer set at zero before loop
music_timer = 0
# Initializing game timer (set to zero)
game_timer = 90
# --- Main Game Title Screen ---
start = False
done = False
while not start and not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
start = True
if event.type == pygame.MOUSEBUTTONDOWN:
start = True
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Switching of screens Event Loop ---
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pygame.quit()
sys.exit()
# screens set to zero initially
screens = 0
# If mouse button is clicked in a certain area, a certain screen will open up
if event.type == pygame.MOUSEBUTTONDOWN:
mx, my = pygame.mouse.get_pos()
if 261 < mx < 334 and 850 < my < 900:
screens = 1
elif 395 < mx < 605 and 850 < my < 900:
screens = 2
elif 660 < mx < 794 and 850 < my < 900:
screens = 3
elif 846 < mx < 919 and 850 < my < 900:
screens = 4
# Screen bliting of different in-game screens
if screens == 1:
done = True
if screens == 2:
screen.blit(instruction_screen, [0, 0])
if screens == 3:
screen.blit(credits_screen, [0, 0])
if screens == 4:
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Main Event Loop ---
game = False
while not game:
for event in pygame.event.get():
# To quit game
if event.type == pygame.QUIT:
game = True
# If the following keys are pressed,
# it will control the red or blue spaceship
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
Xchange_bullet_red = -7
elif event.key == pygame.K_RIGHT:
Xchange_bullet_red = 7
if event.key == pygame.K_a:
Xchange_bullet_blue = -7
elif event.key == pygame.K_d:
Xchange_bullet_blue = 7
# If no keys are pressed, then nothing will happen
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
Xchange_bullet_red = 0
if event.key == pygame.K_a or event.key == pygame.K_d:
Xchange_bullet_blue = 0
# Fills the background screen with Black
screen.fill(BLACK)
# Draws a solid green line in the middle of game screen
# to split red and blue player side {multiplayer}
pygame.draw.line(screen, GREEN, [595, 45], [595, 900], 10)
# If statement to pla music file, music timer now = 1
if music_timer == 0 or music_timer == 11700:
pygame.mixer.music.play(-1, 0.0)
music_timer = 1
# For-loop that constantly draws white dots (stars)
# and animates it on the game screen
for i in range(len(star_list)):
# Draw the snow flake
pygame.draw.circle(screen, WHITE, star_list[i], 2)
# Move the snow flake down one pixel
star_list[i][1] += 1
# If the snow flake has moved off the bottom of the screen
if star_list[i][1] > 900:
# Reset it just above the top
y = random.randrange(-50, -10)
star_list[i][1] = y
# Give it a new x position
x = random.randrange(0, 1200)
star_list[i][0] = x
# Displays meteors on blue player side
for meteors in meteor_list_blue:
meteors[1] += 3
# Displays meteors on red player side
for meteors in meteor_list_red:
meteors[1] += 3
# Animates meteors falling one at a time on blue side
if meteor_list_blue[0][1] >= 900:
# Reset it just above the top
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.remove(meteor_list_blue[0])
# Insert new meteor once one is done one cycle
meteor_list_blue.insert(0, [x_meteor_blue, 0])
screen.blit(meteor_image, [x_meteor_blue, meteor_list_blue[0][1]])
# Animates meteors falling one at a time on red side
if meteor_list_red[0][1] >= 900:
# Reset it just above the top
x_meteor_red = random.randrange(620, 1155)
meteor_list_red.remove(meteor_list_red[0])
# Insert new meteor once one is done one cycle
meteor_list_red.insert(0, [x_meteor_red, 0])
screen.blit(meteor_image, [x_meteor_red, meteor_list_red[0][1]])
# Restrictions for bullets on blue side
if startX_blue <= 45:
startX_blue += 3
startX_blue += 3
startX_blue += 3
if startX_blue >= 550:
startX_blue -= 3
startX_blue -= 3
startX_blue -= 3
# Synchronizes Blue spaceship with bullets
x_coord_blue += Xchange_bullet_blue
BLUE(x_coord_blue, y_coord_blue)
# Controls movement of bullets on blue side
startX_blue += Xchange_bullet_blue
# Move all bullets 3px
for bullet in bullets_blue:
bullet[1] = bullet[1] - 3
# If the last bullet is off the screen, remove it
if bullets_blue[len(bullets_blue) - 1][1] < 0:
bullets_blue.remove(bullets_blue[len(bullets_blue) - 1])
# If the first bullet is more than 10px from the initial location, add another
if bullets_blue[0][1] + 70 < startY_blue:
bullets_blue.insert(0, [startX_blue, startY_blue])
# Blue spaceship restrictions on game screen
if x_coord_blue <= 0:
x_coord_blue += 3
x_coord_blue += 3
x_coord_blue += 3
if x_coord_blue >= 502:
x_coord_blue -= 3
x_coord_blue -= 3
x_coord_blue -= 3
# Displays bullets on blue side and draws it as Yellow rectangles
for bullet in bullets_blue:
pygame.draw.rect(screen, YELLOW, [bullet[0], bullet[1], 3, 10], 3)
# Calling out the scoring function for blue player
blue_player(score_blue)
# Collision detection for bullets and meteors on blue side
for bullet in bullets_blue:
if checkCollision(bullet[0], bullet[1], blue_bulletw, blue_meteorh, meteor_list_blue[0][0],
meteor_list_blue[0][1], blue_meteorw, blue_meteorh):
meteor_list_blue.remove(meteor_list_blue[0])
score_blue += 10
if meteor_list_blue != 0:
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.insert(0, [x_meteor_blue, 0])
# Restrictions for bullets on red side
if startX_red <= 646:
|
if startX_red >= 1157:
startX_red -= 3
startX_red -= 3
startX_red -= 3
# Synchronizes Red spaceship with bullets
x_coord_red += Xchange_bullet_red
RED(x_coord_red, y_coord_red)
# Controls movement of bullets on red side
startX_red += Xchange_bullet_red
# Move all bullets 3px
for bullet in bullets_red:
bullet[1] = bullet[1] - 3
# If the last bullet is off the screen, remove it
if bullets_red[len(bullets_red) - 1][1] < 0:
bullets_red.remove(bullets_red[len(bullets_red) - 1])
# If the first bullet is more than 10px from the initial location, add another
if bullets_red[0][1] + 70 < startY_red:
bullets_red.insert(0, [startX_red, startY_red])
# Rlue spaceship restrictions on game screen
if x_coord_red <= 602:
x_coord_red += 3
x_coord_red += 3
x_coord_red += 3
if x_coord_red >= 1112:
x_coord_red -= 3
x_coord_red -= 3
x_coord_red -= 3
# Displays bullets on red side and draws it as Yellow rectangles
for bullet in bullets_red:
pygame.draw.rect(screen, YELLOW, [bullet[0], bullet[1], 3, 10], 3)
# Calling out the scoring function for red player
red_player(score_red)
# Collision detection for bullets and meteors on red side
for bullet in bullets_red:
if checkCollision(bullet[0], bullet[1], red_bulletw, red_meteorh, meteor_list_red[0][0], meteor_list_red[0][1],
red_meteorw, red_meteorh):
meteor_list_red.remove(meteor_list_red[0])
score_red += 10
if meteor_list_red != 0:
x_meteor_red = random.randrange(620, 1155)
meteor_list_red.insert(0, [x_meteor_red, 0])
# Game timer countdown from 90
game_timer -= 0.020
if game_timer < 0:
game = True
print "GAME OVER."
print ""
# Displaying game timer on game screen
font_game_timer = pygame.font.SysFont('monospace', 35, True, False)
game_timer_text = font_game_timer.render(str(int(game_timer)), True, WHITE)
screen.blit(game_timer_text, [575, 10])
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Music timer increment by 1
music_timer += 1
# Controls the speed of the meteors falling
meteor_list_blue[0][1] += 7
meteor_list_red[0][1] += 7
# Game clock tick set to 60 to run game
clock.tick(FPS)
# --- Game Over Event Loop---
game_over_timer = 3
game_over = False
while not game_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
pygame.quit()
sys.exit()
# Once game over timer reaches 0, display the following:
game_over_timer -= 0.5
if game_over_timer == 0:
# Depending on the final game score, a winner is chosen and score + result is printed
if score_red > score_blue:
screen.blit(red_wins, [0, 0])
print "RED SCORE: " + str(score_red)
print "BLUE SCORE: " + str(score_blue)
print "Result: RED WINS!"
print "*-" * 100
if score_blue > score_red:
screen.blit(blue_wins, [0, 0])
print "RED SCORE: " + str(score_red)
print "BLUE SCORE: " + str(score_blue)
print "Result: BLUE WINS!"
print "*-" * 100
if score_red == score_blue:
screen.blit(tie_game, [0, 0])
print "RED SCORE: " + str(score_red)
print "BLUE SCORE: " + str(score_blue)
print "Result: TIE GAME!"
print "*-" * 100
# Flip pygame screen to display everything
pygame.display.flip()
# Game Clock set to 60 Frames per second
clock.tick(FPS)
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit()
# Complete exit and end of game code
sys.exit()
# Thank you for playing our game! Hope you enjoyed it! | startX_red += 3
startX_red += 3
startX_red += 3 | conditional_block |
SPACE-BLASTER-FINAL.py | # PREET PANCHAL & TIRTH PATEL
# ICS3U1-01
# MRS. RUBINI-LAFOREST
# WOBURN COLLEGIATE INSTITUTE
# JUNE 9th, 2017
"""
WORKS CITED:
- ALL screens(Start, instructions, credits, & game over screens) are designed
and created on https://www.canva.com/
- Star Background animation help:
http: // programarcadegames.com / python_examples / show_file.php?file = animating_snow.py
- Meteor Image: http://falloutequestria.wikia.com/wiki/File:CM_-_Midnight_Shower.png
- Instrumental Music: https://www.youtube.com/watch?v=plXGctq9OXo
- checkCollision function used from Mrs. Rubini
"""
"""
This program is a game called 'Space Blaster'. This game is a multiplayer game that consists
of two spaceships; one blue and the other red. There is a solid green line in the middle
splitting the two player sides respectively. The game's objectively is to simply dodge as many
meteors as you can by shooting at it. The shooting is automatic and all the users have to do is
move 'left' or 'right' using the appropriate keys. For every meteor hit, you earn 10pts. Once
the 90 second timer comes to an end, a winner is selected based on the final score.
"""
# Import a library of functions called 'pygame', 'random' & 'sys'
import pygame, random, sys
stdout = sys.__stdout__
stderr = sys.__stderr__
# Initialize the game engine
pygame.init()
# Frames Per Second (FPS)
FPS = 60
# Import colours
BLACK = [0, 0, 0]
WHITE = [255, 255, 255]
RED_FADE = [250, 219, 216]
GREEN = [0, 255, 0]
BLUE_FADE = [214, 234, 248]
YELLOW = [255, 255, 0]
# Set the height and width of the screen
SCREEN_SIZE = [1200, 900]
# Empty lists for moving objects
meteor_list_blue = []
meteor_list_red = []
star_list = []
# Initialize the game clock
clock = pygame.time.Clock()
# Displaying Screen size
screen = pygame.display.set_mode(SCREEN_SIZE)
# Displays window title
pygame.display.set_caption("SPACE BLASTER")
# Importing all images
blue_spaceship = pygame.image.load('Spaceship1.png')
red_spaceship = pygame.image.load('Spaceship2.png')
meteor_image = pygame.image.load('Meteor.png')
start_screen = pygame.image.load("Start Screen.png")
instruction_screen = pygame.image.load("Instructions Screen.png")
credits_screen = pygame.image.load("Credits Screen.png")
blue_wins = pygame.image.load("Blue Wins.png")
red_wins = pygame.image.load("Red Wins.png")
tie_game = pygame.image.load("Tie Game.png")
# For-loop appending coordinates for the meteors
# on blue spaceship side
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.append([x_meteor_blue, 0])
# Blue meteor width & height values
blue_meteorw = 30
blue_meteorh = 30
# Function for displaying blue spaceship
def BLUE(x_change_blue, y_change_blue):
screen.blit(blue_spaceship, (x_change_blue, y_change_blue))
# Variables controlling blue spaceship
x_coord_blue = 0
y_coord_blue = 775
# For-loop appending coordinates for the meteors
# on red spaceship side
for i in range(10):
x_meteor_red = random.randrange(620, 1155)
y_meteor_red = 0
meteor_list_red.append([x_meteor_red, y_meteor_red])
# Red meteor width & height values
red_meteorw = 30
red_meteorh = 30
# Function for displaying red spaceship
def RED(x_change_red, y_change_red):
screen.blit(red_spaceship, (x_change_red, y_change_red))
# Variables controlling red spaceship
x_coord_red = 1110
y_coord_red = 775
# For-loop appending coordinates for the white stars
# on game screen
for stars in range(50):
x_star = random.randrange(0, 1200)
y_star = random.randrange(0, 900)
star_list.append([x_star, y_star])
# Variables for bullets on blue side
startX_blue = 45
startY_blue = 773
Xchange_bullet_blue = 0
bullets_blue = [[startX_blue, startY_blue]]
blue_bulletw = 3
blue_bulleth = 10
# Variables for bullets on red side
startX_red = 1155
startY_red = 773
Xchange_bullet_red = 0
bullets_red = [[startX_red, startY_red]]
red_bulletw = 3
red_bulleth = 10
# COLLISION DETECTION Function
def checkCollision(obj1x, obj1y, obj1w, obj1h, obj2x, obj2y, obj2w, obj2h):
# check bounding box
if obj1x + obj1w >= obj2x and obj1x <= obj2x + obj2w:
if obj1y + obj1h >= obj2y and obj1y <= obj2y + obj2h:
return True
return False
# Blue Player scoring function
score_blue = 0
def blue_player(score_blue):
font_blue_score = pygame.font.SysFont('monospace', 25, True, False)
score_blue_text = font_blue_score.render("SCORE :" + str(int(score_blue)), True, BLUE_FADE)
screen.blit(score_blue_text, [215, 10])
return score_blue
# Red Player scoring function
score_red = 0
def red_player(score_red):
|
# Importing & loading music file
background_music = pygame.mixer.music.load("Instrumental Music.mp3")
# Music timer set at zero before loop
music_timer = 0
# Initializing game timer (set to zero)
game_timer = 90
# --- Main Game Title Screen ---
start = False
done = False
while not start and not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
start = True
if event.type == pygame.MOUSEBUTTONDOWN:
start = True
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Switching of screens Event Loop ---
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pygame.quit()
sys.exit()
# screens set to zero initially
screens = 0
# If mouse button is clicked in a certain area, a certain screen will open up
if event.type == pygame.MOUSEBUTTONDOWN:
mx, my = pygame.mouse.get_pos()
if 261 < mx < 334 and 850 < my < 900:
screens = 1
elif 395 < mx < 605 and 850 < my < 900:
screens = 2
elif 660 < mx < 794 and 850 < my < 900:
screens = 3
elif 846 < mx < 919 and 850 < my < 900:
screens = 4
# Screen bliting of different in-game screens
if screens == 1:
done = True
if screens == 2:
screen.blit(instruction_screen, [0, 0])
if screens == 3:
screen.blit(credits_screen, [0, 0])
if screens == 4:
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Main Event Loop ---
game = False
while not game:
for event in pygame.event.get():
# To quit game
if event.type == pygame.QUIT:
game = True
# If the following keys are pressed,
# it will control the red or blue spaceship
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
Xchange_bullet_red = -7
elif event.key == pygame.K_RIGHT:
Xchange_bullet_red = 7
if event.key == pygame.K_a:
Xchange_bullet_blue = -7
elif event.key == pygame.K_d:
Xchange_bullet_blue = 7
# If no keys are pressed, then nothing will happen
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
Xchange_bullet_red = 0
if event.key == pygame.K_a or event.key == pygame.K_d:
Xchange_bullet_blue = 0
# Fills the background screen with Black
screen.fill(BLACK)
# Draws a solid green line in the middle of game screen
# to split red and blue player side {multiplayer}
pygame.draw.line(screen, GREEN, [595, 45], [595, 900], 10)
# If statement to pla music file, music timer now = 1
if music_timer == 0 or music_timer == 11700:
pygame.mixer.music.play(-1, 0.0)
music_timer = 1
# For-loop that constantly draws white dots (stars)
# and animates it on the game screen
for i in range(len(star_list)):
# Draw the snow flake
pygame.draw.circle(screen, WHITE, star_list[i], 2)
# Move the snow flake down one pixel
star_list[i][1] += 1
# If the snow flake has moved off the bottom of the screen
if star_list[i][1] > 900:
# Reset it just above the top
y = random.randrange(-50, -10)
star_list[i][1] = y
# Give it a new x position
x = random.randrange(0, 1200)
star_list[i][0] = x
# Displays meteors on blue player side
for meteors in meteor_list_blue:
meteors[1] += 3
# Displays meteors on red player side
for meteors in meteor_list_red:
meteors[1] += 3
# Animates meteors falling one at a time on blue side
if meteor_list_blue[0][1] >= 900:
# Reset it just above the top
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.remove(meteor_list_blue[0])
# Insert new meteor once one is done one cycle
meteor_list_blue.insert(0, [x_meteor_blue, 0])
screen.blit(meteor_image, [x_meteor_blue, meteor_list_blue[0][1]])
# Animates meteors falling one at a time on red side
if meteor_list_red[0][1] >= 900:
# Reset it just above the top
x_meteor_red = random.randrange(620, 1155)
meteor_list_red.remove(meteor_list_red[0])
# Insert new meteor once one is done one cycle
meteor_list_red.insert(0, [x_meteor_red, 0])
screen.blit(meteor_image, [x_meteor_red, meteor_list_red[0][1]])
# Restrictions for bullets on blue side
if startX_blue <= 45:
startX_blue += 3
startX_blue += 3
startX_blue += 3
if startX_blue >= 550:
startX_blue -= 3
startX_blue -= 3
startX_blue -= 3
# Synchronizes Blue spaceship with bullets
x_coord_blue += Xchange_bullet_blue
BLUE(x_coord_blue, y_coord_blue)
# Controls movement of bullets on blue side
startX_blue += Xchange_bullet_blue
# Move all bullets 3px
for bullet in bullets_blue:
bullet[1] = bullet[1] - 3
# If the last bullet is off the screen, remove it
if bullets_blue[len(bullets_blue) - 1][1] < 0:
bullets_blue.remove(bullets_blue[len(bullets_blue) - 1])
# If the first bullet is more than 10px from the initial location, add another
if bullets_blue[0][1] + 70 < startY_blue:
bullets_blue.insert(0, [startX_blue, startY_blue])
# Blue spaceship restrictions on game screen
if x_coord_blue <= 0:
x_coord_blue += 3
x_coord_blue += 3
x_coord_blue += 3
if x_coord_blue >= 502:
x_coord_blue -= 3
x_coord_blue -= 3
x_coord_blue -= 3
# Displays bullets on blue side and draws it as Yellow rectangles
for bullet in bullets_blue:
pygame.draw.rect(screen, YELLOW, [bullet[0], bullet[1], 3, 10], 3)
# Calling out the scoring function for blue player
blue_player(score_blue)
# Collision detection for bullets and meteors on blue side
for bullet in bullets_blue:
if checkCollision(bullet[0], bullet[1], blue_bulletw, blue_meteorh, meteor_list_blue[0][0],
meteor_list_blue[0][1], blue_meteorw, blue_meteorh):
meteor_list_blue.remove(meteor_list_blue[0])
score_blue += 10
if meteor_list_blue != 0:
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.insert(0, [x_meteor_blue, 0])
# Restrictions for bullets on red side
if startX_red <= 646:
startX_red += 3
startX_red += 3
startX_red += 3
if startX_red >= 1157:
startX_red -= 3
startX_red -= 3
startX_red -= 3
# Synchronizes Red spaceship with bullets
x_coord_red += Xchange_bullet_red
RED(x_coord_red, y_coord_red)
# Controls movement of bullets on red side
startX_red += Xchange_bullet_red
# Move all bullets 3px
for bullet in bullets_red:
bullet[1] = bullet[1] - 3
# If the last bullet is off the screen, remove it
if bullets_red[len(bullets_red) - 1][1] < 0:
bullets_red.remove(bullets_red[len(bullets_red) - 1])
# If the first bullet is more than 10px from the initial location, add another
if bullets_red[0][1] + 70 < startY_red:
bullets_red.insert(0, [startX_red, startY_red])
# Rlue spaceship restrictions on game screen
if x_coord_red <= 602:
x_coord_red += 3
x_coord_red += 3
x_coord_red += 3
if x_coord_red >= 1112:
x_coord_red -= 3
x_coord_red -= 3
x_coord_red -= 3
# Displays bullets on red side and draws it as Yellow rectangles
for bullet in bullets_red:
pygame.draw.rect(screen, YELLOW, [bullet[0], bullet[1], 3, 10], 3)
# Calling out the scoring function for red player
red_player(score_red)
# Collision detection for bullets and meteors on red side
for bullet in bullets_red:
if checkCollision(bullet[0], bullet[1], red_bulletw, red_meteorh, meteor_list_red[0][0], meteor_list_red[0][1],
red_meteorw, red_meteorh):
meteor_list_red.remove(meteor_list_red[0])
score_red += 10
if meteor_list_red != 0:
x_meteor_red = random.randrange(620, 1155)
meteor_list_red.insert(0, [x_meteor_red, 0])
# Game timer countdown from 90
game_timer -= 0.020
if game_timer < 0:
game = True
print "GAME OVER."
print ""
# Displaying game timer on game screen
font_game_timer = pygame.font.SysFont('monospace', 35, True, False)
game_timer_text = font_game_timer.render(str(int(game_timer)), True, WHITE)
screen.blit(game_timer_text, [575, 10])
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Music timer increment by 1
music_timer += 1
# Controls the speed of the meteors falling
meteor_list_blue[0][1] += 7
meteor_list_red[0][1] += 7
# Game clock tick set to 60 to run game
clock.tick(FPS)
# --- Game Over Event Loop---
game_over_timer = 3
game_over = False
while not game_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
pygame.quit()
sys.exit()
# Once game over timer reaches 0, display the following:
game_over_timer -= 0.5
if game_over_timer == 0:
# Depending on the final game score, a winner is chosen and score + result is printed
if score_red > score_blue:
screen.blit(red_wins, [0, 0])
print "RED SCORE: " + str(score_red)
print "BLUE SCORE: " + str(score_blue)
print "Result: RED WINS!"
print "*-" * 100
if score_blue > score_red:
screen.blit(blue_wins, [0, 0])
print "RED SCORE: " + str(score_red)
print "BLUE SCORE: " + str(score_blue)
print "Result: BLUE WINS!"
print "*-" * 100
if score_red == score_blue:
screen.blit(tie_game, [0, 0])
print "RED SCORE: " + str(score_red)
print "BLUE SCORE: " + str(score_blue)
print "Result: TIE GAME!"
print "*-" * 100
# Flip pygame screen to display everything
pygame.display.flip()
# Game Clock set to 60 Frames per second
clock.tick(FPS)
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit()
# Complete exit and end of game code
sys.exit()
# Thank you for playing our game! Hope you enjoyed it! | font_red_score = pygame.font.SysFont('monospace', 25, True, False)
score_red_text = font_red_score.render("SCORE :" + str(int(score_red)), True, RED_FADE)
screen.blit(score_red_text, [865, 10])
return score_red | identifier_body |
SPACE-BLASTER-FINAL.py | # PREET PANCHAL & TIRTH PATEL
# ICS3U1-01
# MRS. RUBINI-LAFOREST
# WOBURN COLLEGIATE INSTITUTE
# JUNE 9th, 2017
"""
WORKS CITED:
- ALL screens(Start, instructions, credits, & game over screens) are designed
and created on https://www.canva.com/
- Star Background animation help:
http: // programarcadegames.com / python_examples / show_file.php?file = animating_snow.py
- Meteor Image: http://falloutequestria.wikia.com/wiki/File:CM_-_Midnight_Shower.png
- Instrumental Music: https://www.youtube.com/watch?v=plXGctq9OXo
- checkCollision function used from Mrs. Rubini
"""
"""
This program is a game called 'Space Blaster'. This game is a multiplayer game that consists
of two spaceships; one blue and the other red. There is a solid green line in the middle
splitting the two player sides respectively. The game's objectively is to simply dodge as many
meteors as you can by shooting at it. The shooting is automatic and all the users have to do is
move 'left' or 'right' using the appropriate keys. For every meteor hit, you earn 10pts. Once
the 90 second timer comes to an end, a winner is selected based on the final score.
"""
# Import a library of functions called 'pygame', 'random' & 'sys'
import pygame, random, sys
stdout = sys.__stdout__
stderr = sys.__stderr__
# Initialize the game engine
pygame.init()
# Frames Per Second (FPS)
FPS = 60
# Import colours
BLACK = [0, 0, 0]
WHITE = [255, 255, 255]
RED_FADE = [250, 219, 216]
GREEN = [0, 255, 0]
BLUE_FADE = [214, 234, 248]
YELLOW = [255, 255, 0]
# Set the height and width of the screen
SCREEN_SIZE = [1200, 900]
# Empty lists for moving objects
meteor_list_blue = []
meteor_list_red = []
star_list = []
# Initialize the game clock
clock = pygame.time.Clock()
# Displaying Screen size
screen = pygame.display.set_mode(SCREEN_SIZE)
# Displays window title
pygame.display.set_caption("SPACE BLASTER")
# Importing all images
blue_spaceship = pygame.image.load('Spaceship1.png')
red_spaceship = pygame.image.load('Spaceship2.png')
meteor_image = pygame.image.load('Meteor.png')
start_screen = pygame.image.load("Start Screen.png")
instruction_screen = pygame.image.load("Instructions Screen.png")
credits_screen = pygame.image.load("Credits Screen.png")
blue_wins = pygame.image.load("Blue Wins.png")
red_wins = pygame.image.load("Red Wins.png")
tie_game = pygame.image.load("Tie Game.png")
# For-loop appending coordinates for the meteors
# on blue spaceship side
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.append([x_meteor_blue, 0])
# Blue meteor width & height values
blue_meteorw = 30
blue_meteorh = 30
# Function for displaying blue spaceship
def BLUE(x_change_blue, y_change_blue):
screen.blit(blue_spaceship, (x_change_blue, y_change_blue))
# Variables controlling blue spaceship
x_coord_blue = 0
y_coord_blue = 775
# For-loop appending coordinates for the meteors
# on red spaceship side
for i in range(10):
x_meteor_red = random.randrange(620, 1155)
y_meteor_red = 0
meteor_list_red.append([x_meteor_red, y_meteor_red])
# Red meteor width & height values
red_meteorw = 30
red_meteorh = 30
# Function for displaying red spaceship
def RED(x_change_red, y_change_red):
screen.blit(red_spaceship, (x_change_red, y_change_red))
# Variables controlling red spaceship
x_coord_red = 1110
y_coord_red = 775
# For-loop appending coordinates for the white stars
# on game screen
for stars in range(50):
x_star = random.randrange(0, 1200)
y_star = random.randrange(0, 900)
star_list.append([x_star, y_star])
# Variables for bullets on blue side
startX_blue = 45
startY_blue = 773
Xchange_bullet_blue = 0
bullets_blue = [[startX_blue, startY_blue]]
blue_bulletw = 3
blue_bulleth = 10
# Variables for bullets on red side
startX_red = 1155
startY_red = 773
Xchange_bullet_red = 0
bullets_red = [[startX_red, startY_red]]
red_bulletw = 3
red_bulleth = 10
# COLLISION DETECTION Function
def | (obj1x, obj1y, obj1w, obj1h, obj2x, obj2y, obj2w, obj2h):
# check bounding box
if obj1x + obj1w >= obj2x and obj1x <= obj2x + obj2w:
if obj1y + obj1h >= obj2y and obj1y <= obj2y + obj2h:
return True
return False
# Blue Player scoring function
score_blue = 0
def blue_player(score_blue):
font_blue_score = pygame.font.SysFont('monospace', 25, True, False)
score_blue_text = font_blue_score.render("SCORE :" + str(int(score_blue)), True, BLUE_FADE)
screen.blit(score_blue_text, [215, 10])
return score_blue
# Red Player scoring function
score_red = 0
def red_player(score_red):
font_red_score = pygame.font.SysFont('monospace', 25, True, False)
score_red_text = font_red_score.render("SCORE :" + str(int(score_red)), True, RED_FADE)
screen.blit(score_red_text, [865, 10])
return score_red
# Importing & loading music file
background_music = pygame.mixer.music.load("Instrumental Music.mp3")
# Music timer set at zero before loop
music_timer = 0
# Initializing game timer (set to zero)
game_timer = 90
# --- Main Game Title Screen ---
start = False
done = False
while not start and not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
start = True
if event.type == pygame.MOUSEBUTTONDOWN:
start = True
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Switching of screens Event Loop ---
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pygame.quit()
sys.exit()
# screens set to zero initially
screens = 0
# If mouse button is clicked in a certain area, a certain screen will open up
if event.type == pygame.MOUSEBUTTONDOWN:
mx, my = pygame.mouse.get_pos()
if 261 < mx < 334 and 850 < my < 900:
screens = 1
elif 395 < mx < 605 and 850 < my < 900:
screens = 2
elif 660 < mx < 794 and 850 < my < 900:
screens = 3
elif 846 < mx < 919 and 850 < my < 900:
screens = 4
# Screen bliting of different in-game screens
if screens == 1:
done = True
if screens == 2:
screen.blit(instruction_screen, [0, 0])
if screens == 3:
screen.blit(credits_screen, [0, 0])
if screens == 4:
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Main Event Loop ---
game = False
while not game:
for event in pygame.event.get():
# To quit game
if event.type == pygame.QUIT:
game = True
# If the following keys are pressed,
# it will control the red or blue spaceship
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
Xchange_bullet_red = -7
elif event.key == pygame.K_RIGHT:
Xchange_bullet_red = 7
if event.key == pygame.K_a:
Xchange_bullet_blue = -7
elif event.key == pygame.K_d:
Xchange_bullet_blue = 7
# If no keys are pressed, then nothing will happen
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
Xchange_bullet_red = 0
if event.key == pygame.K_a or event.key == pygame.K_d:
Xchange_bullet_blue = 0
# Fills the background screen with Black
screen.fill(BLACK)
# Draws a solid green line in the middle of game screen
# to split red and blue player side {multiplayer}
pygame.draw.line(screen, GREEN, [595, 45], [595, 900], 10)
# If statement to pla music file, music timer now = 1
if music_timer == 0 or music_timer == 11700:
pygame.mixer.music.play(-1, 0.0)
music_timer = 1
# For-loop that constantly draws white dots (stars)
# and animates it on the game screen
for i in range(len(star_list)):
# Draw the snow flake
pygame.draw.circle(screen, WHITE, star_list[i], 2)
# Move the snow flake down one pixel
star_list[i][1] += 1
# If the snow flake has moved off the bottom of the screen
if star_list[i][1] > 900:
# Reset it just above the top
y = random.randrange(-50, -10)
star_list[i][1] = y
# Give it a new x position
x = random.randrange(0, 1200)
star_list[i][0] = x
# Displays meteors on blue player side
for meteors in meteor_list_blue:
meteors[1] += 3
# Displays meteors on red player side
for meteors in meteor_list_red:
meteors[1] += 3
# Animates meteors falling one at a time on blue side
if meteor_list_blue[0][1] >= 900:
# Reset it just above the top
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.remove(meteor_list_blue[0])
# Insert new meteor once one is done one cycle
meteor_list_blue.insert(0, [x_meteor_blue, 0])
screen.blit(meteor_image, [x_meteor_blue, meteor_list_blue[0][1]])
# Animates meteors falling one at a time on red side
if meteor_list_red[0][1] >= 900:
# Reset it just above the top
x_meteor_red = random.randrange(620, 1155)
meteor_list_red.remove(meteor_list_red[0])
# Insert new meteor once one is done one cycle
meteor_list_red.insert(0, [x_meteor_red, 0])
screen.blit(meteor_image, [x_meteor_red, meteor_list_red[0][1]])
# Restrictions for bullets on blue side
if startX_blue <= 45:
startX_blue += 3
startX_blue += 3
startX_blue += 3
if startX_blue >= 550:
startX_blue -= 3
startX_blue -= 3
startX_blue -= 3
# Synchronizes Blue spaceship with bullets
x_coord_blue += Xchange_bullet_blue
BLUE(x_coord_blue, y_coord_blue)
# Controls movement of bullets on blue side
startX_blue += Xchange_bullet_blue
# Move all bullets 3px
for bullet in bullets_blue:
bullet[1] = bullet[1] - 3
# If the last bullet is off the screen, remove it
if bullets_blue[len(bullets_blue) - 1][1] < 0:
bullets_blue.remove(bullets_blue[len(bullets_blue) - 1])
# If the first bullet is more than 10px from the initial location, add another
if bullets_blue[0][1] + 70 < startY_blue:
bullets_blue.insert(0, [startX_blue, startY_blue])
# Blue spaceship restrictions on game screen
if x_coord_blue <= 0:
x_coord_blue += 3
x_coord_blue += 3
x_coord_blue += 3
if x_coord_blue >= 502:
x_coord_blue -= 3
x_coord_blue -= 3
x_coord_blue -= 3
# Displays bullets on blue side and draws it as Yellow rectangles
for bullet in bullets_blue:
pygame.draw.rect(screen, YELLOW, [bullet[0], bullet[1], 3, 10], 3)
# Calling out the scoring function for blue player
blue_player(score_blue)
# Collision detection for bullets and meteors on blue side
for bullet in bullets_blue:
if checkCollision(bullet[0], bullet[1], blue_bulletw, blue_meteorh, meteor_list_blue[0][0],
meteor_list_blue[0][1], blue_meteorw, blue_meteorh):
meteor_list_blue.remove(meteor_list_blue[0])
score_blue += 10
if meteor_list_blue != 0:
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.insert(0, [x_meteor_blue, 0])
# Restrictions for bullets on red side
if startX_red <= 646:
startX_red += 3
startX_red += 3
startX_red += 3
if startX_red >= 1157:
startX_red -= 3
startX_red -= 3
startX_red -= 3
# Synchronizes Red spaceship with bullets
x_coord_red += Xchange_bullet_red
RED(x_coord_red, y_coord_red)
# Controls movement of bullets on red side
startX_red += Xchange_bullet_red
# Move all bullets 3px
for bullet in bullets_red:
bullet[1] = bullet[1] - 3
# If the last bullet is off the screen, remove it
if bullets_red[len(bullets_red) - 1][1] < 0:
bullets_red.remove(bullets_red[len(bullets_red) - 1])
# If the first bullet is more than 10px from the initial location, add another
if bullets_red[0][1] + 70 < startY_red:
bullets_red.insert(0, [startX_red, startY_red])
# Rlue spaceship restrictions on game screen
if x_coord_red <= 602:
x_coord_red += 3
x_coord_red += 3
x_coord_red += 3
if x_coord_red >= 1112:
x_coord_red -= 3
x_coord_red -= 3
x_coord_red -= 3
# Displays bullets on red side and draws it as Yellow rectangles
for bullet in bullets_red:
pygame.draw.rect(screen, YELLOW, [bullet[0], bullet[1], 3, 10], 3)
# Calling out the scoring function for red player
red_player(score_red)
# Collision detection for bullets and meteors on red side
for bullet in bullets_red:
if checkCollision(bullet[0], bullet[1], red_bulletw, red_meteorh, meteor_list_red[0][0], meteor_list_red[0][1],
red_meteorw, red_meteorh):
meteor_list_red.remove(meteor_list_red[0])
score_red += 10
if meteor_list_red != 0:
x_meteor_red = random.randrange(620, 1155)
meteor_list_red.insert(0, [x_meteor_red, 0])
# Game timer countdown from 90
game_timer -= 0.020
if game_timer < 0:
game = True
print "GAME OVER."
print ""
# Displaying game timer on game screen
font_game_timer = pygame.font.SysFont('monospace', 35, True, False)
game_timer_text = font_game_timer.render(str(int(game_timer)), True, WHITE)
screen.blit(game_timer_text, [575, 10])
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Music timer increment by 1
music_timer += 1
# Controls the speed of the meteors falling
meteor_list_blue[0][1] += 7
meteor_list_red[0][1] += 7
# Game clock tick set to 60 to run game
clock.tick(FPS)
# --- Game Over Event Loop---
game_over_timer = 3
game_over = False
while not game_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
pygame.quit()
sys.exit()
# Once game over timer reaches 0, display the following:
game_over_timer -= 0.5
if game_over_timer == 0:
# Depending on the final game score, a winner is chosen and score + result is printed
if score_red > score_blue:
screen.blit(red_wins, [0, 0])
print "RED SCORE: " + str(score_red)
print "BLUE SCORE: " + str(score_blue)
print "Result: RED WINS!"
print "*-" * 100
if score_blue > score_red:
screen.blit(blue_wins, [0, 0])
print "RED SCORE: " + str(score_red)
print "BLUE SCORE: " + str(score_blue)
print "Result: BLUE WINS!"
print "*-" * 100
if score_red == score_blue:
screen.blit(tie_game, [0, 0])
print "RED SCORE: " + str(score_red)
print "BLUE SCORE: " + str(score_blue)
print "Result: TIE GAME!"
print "*-" * 100
# Flip pygame screen to display everything
pygame.display.flip()
# Game Clock set to 60 Frames per second
clock.tick(FPS)
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit()
# Complete exit and end of game code
sys.exit()
# Thank you for playing our game! Hope you enjoyed it! | checkCollision | identifier_name |
rust_gtest_interop.rs | // Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::pin::Pin;
/// Use `prelude:::*` to get access to all macros defined in this crate.
pub mod prelude {
// The #[extern_test_suite("cplusplus::Type") macro.
pub use gtest_attribute::extern_test_suite;
// The #[gtest(TestSuite, TestName)] macro.
pub use gtest_attribute::gtest;
// Gtest expectation macros, which should be used to verify test expectations.
// These replace the standard practice of using assert/panic in Rust tests
// which would crash the test binary.
pub use crate::expect_eq;
pub use crate::expect_false; | pub use crate::expect_ge;
pub use crate::expect_gt;
pub use crate::expect_le;
pub use crate::expect_lt;
pub use crate::expect_ne;
pub use crate::expect_true;
}
// The gtest_attribute proc-macro crate makes use of small_ctor, with a path
// through this crate here to ensure it's available.
#[doc(hidden)]
pub extern crate small_ctor;
/// A marker trait that promises the Rust type is an FFI wrapper around a C++
/// class which subclasses `testing::Test`. In particular, casting a
/// `testing::Test` pointer to the implementing class type is promised to be
/// valid.
///
/// Implement this trait with the `#[extern_test_suite]` macro:
/// ```rs
/// #[extern_test_suite("cpp::type::wrapped::by::Foo")
/// unsafe impl TestSuite for Foo {}
/// ```
pub unsafe trait TestSuite {
/// Gives the Gtest factory function on the C++ side which constructs the
/// C++ class for which the implementing Rust type is an FFI wrapper.
#[doc(hidden)]
fn gtest_factory_fn_ptr() -> GtestFactoryFunction;
}
/// Matches the C++ type `rust_gtest_interop::GtestFactoryFunction`, with the
/// `testing::Test` type erased to `OpaqueTestingTest`.
///
/// We replace `testing::Test*` with `OpaqueTestingTest` because but we don't
/// know that C++ type in Rust, as we don't have a Rust generator giving access
/// to that type.
#[doc(hidden)]
pub type GtestFactoryFunction = unsafe extern "C" fn(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
/// Opaque replacement of a C++ `testing::Test` type, which can only be used as
/// a pointer, since its size is incorrect. Only appears in the
/// GtestFactoryFunction signature, which is a function pointer that passed to
/// C++, and never run from within Rust.
///
/// See https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs
///
/// TODO(danakj): If there was a way, without making references to it into wide
/// pointers, we should make this type be !Sized.
#[repr(C)]
#[doc(hidden)]
pub struct OpaqueTestingTest {
data: [u8; 0],
marker: std::marker::PhantomData<(*mut u8, std::marker::PhantomPinned)>,
}
#[doc(hidden)]
pub trait TestResult {
fn into_error_message(self) -> Option<String>;
}
impl TestResult for () {
fn into_error_message(self) -> Option<String> {
None
}
}
// This impl requires an `Error` not just a `String` so that in the future we
// could print things like the backtrace too (though that field is currently
// unstable).
impl<E: Into<Box<dyn std::error::Error>>> TestResult for std::result::Result<(), E> {
fn into_error_message(self) -> Option<String> {
match self {
Ok(_) => None,
Err(e) => Some(format!("Test returned error: {}", e.into())),
}
}
}
// Internals used by code generated from the gtest-attriute proc-macro. Should
// not be used by human-written code.
#[doc(hidden)]
pub mod __private {
use super::{GtestFactoryFunction, OpaqueTestingTest, Pin};
/// Rust wrapper around the same C++ method.
///
/// We have a wrapper to convert the file name into a C++-friendly string,
/// and the line number into a C++-friendly signed int.
///
/// TODO(crbug.com/1298175): We should be able to receive a C++-friendly
/// file path.
///
/// TODO(danakj): We should be able to pass a `c_int` directly to C++:
/// https://github.com/dtolnay/cxx/issues/1015.
pub fn add_failure_at(file: &'static str, line: u32, message: &str) {
let null_term_file = std::ffi::CString::new(make_canonical_file_path(file)).unwrap();
let null_term_message = std::ffi::CString::new(message).unwrap();
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_add_failure_at().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
file: *const std::ffi::c_char,
line: i32,
message: *const std::ffi::c_char,
);
}
unsafe {
_ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
null_term_file.as_ptr(),
line.try_into().unwrap_or(-1),
null_term_message.as_ptr(),
)
}
}
/// Turn a file!() string for a source file into a path from the root of the
/// source tree.
pub fn make_canonical_file_path(file: &str) -> String {
// The path of the file here is relative to and prefixed with the crate root's
// source file with the current directory being the build's output
// directory. So for a generated crate root at gen/foo/, the file path
// would look like `gen/foo/../../../../real/path.rs`. The last two `../
// ` move up from the build output directory to the source tree root. As such,
// we need to strip pairs of `something/../` until there are none left, and
// remove the remaining `../` path components up to the source tree
// root.
//
// Note that std::fs::canonicalize() does not work here since it requires the
// file to exist, but we're working with a relative path that is rooted
// in the build directory, not the current directory. We could try to
// get the path to the build directory.. but this is simple enough.
let (keep_rev, _) = std::path::Path::new(file).iter().rev().fold(
(Vec::new(), 0),
// Build the set of path components we want to keep, which we do by keeping a count of
// the `..` components and then dropping stuff that comes before them.
|(mut keep, dotdot_count), path_component| {
if path_component == ".." {
// The `..` component will skip the next downward component.
(keep, dotdot_count + 1)
} else if dotdot_count > 0 {
// Skip the component as we drop it with `..` later in the path.
(keep, dotdot_count - 1)
} else {
// Keep this component.
keep.push(path_component);
(keep, dotdot_count)
}
},
);
// Reverse the path components, join them together, and write them into a
// string.
keep_rev
.into_iter()
.rev()
.fold(std::path::PathBuf::new(), |path, path_component| path.join(path_component))
.to_string_lossy()
.to_string()
}
/// Wrapper that calls C++ rust_gtest_default_factory().
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers: https://github.com/dtolnay/cxx/issues/1011.
pub unsafe extern "C" fn rust_gtest_default_factory(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest> {
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_default_factory().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
}
unsafe { _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(f) }
}
/// Wrapper that calls C++ rust_gtest_add_test().
///
/// Note that the `factory` parameter is actually a C++ function pointer.
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers nor passing `*const c_char`: https://github.com/dtolnay/cxx/issues/1011 and
/// https://github.com/dtolnay/cxx/issues/1015.
unsafe fn rust_gtest_add_test(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
line: i32,
) {
extern "C" {
/// The C++ mangled name for
/// rust_gtest_interop::rust_gtest_add_test(). This comes from
/// `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop19rust_gtest_add_testEPFPN7testing4TestEPFvS2_EES4_PKcS8_S8_i(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
line: i32,
);
}
unsafe {
_ZN18rust_gtest_interop19rust_gtest_add_testEPFPN7testing4TestEPFvS2_EES4_PKcS8_S8_i(
factory,
run_test_fn,
test_suite_name,
test_name,
file,
line,
)
}
}
/// Information used to register a function pointer as a test with the C++
/// Gtest framework.
pub struct TestRegistration {
pub func: extern "C" fn(suite: Pin<&mut OpaqueTestingTest>),
// TODO(danakj): These a C-String-Literals. Maybe we should expose that as a type
// somewhere.
pub test_suite_name: &'static [std::os::raw::c_char],
pub test_name: &'static [std::os::raw::c_char],
pub file: &'static [std::os::raw::c_char],
pub line: u32,
pub factory: GtestFactoryFunction,
}
/// Register a given test function with the C++ Gtest framework.
///
/// This function is called from static initializers. It may only be called
/// from the main thread, before main() is run. It may not panic, or
/// call anything that may panic.
pub fn register_test(r: TestRegistration) {
let line = r.line.try_into().unwrap_or(-1);
// SAFETY: The `factory` parameter to rust_gtest_add_test() must be a C++
// function that returns a `testing::Test*` disguised as a
// `OpaqueTestingTest`. The #[gtest] macro will use
// `rust_gtest_interop::rust_gtest_default_factory()` by default.
unsafe {
rust_gtest_add_test(
r.factory,
r.func,
r.test_suite_name.as_ptr(),
r.test_name.as_ptr(),
r.file.as_ptr(),
line,
)
};
}
}
mod expect_macros; | random_line_split |
|
rust_gtest_interop.rs | // Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::pin::Pin;
/// Use `prelude:::*` to get access to all macros defined in this crate.
pub mod prelude {
// The #[extern_test_suite("cplusplus::Type") macro.
pub use gtest_attribute::extern_test_suite;
// The #[gtest(TestSuite, TestName)] macro.
pub use gtest_attribute::gtest;
// Gtest expectation macros, which should be used to verify test expectations.
// These replace the standard practice of using assert/panic in Rust tests
// which would crash the test binary.
pub use crate::expect_eq;
pub use crate::expect_false;
pub use crate::expect_ge;
pub use crate::expect_gt;
pub use crate::expect_le;
pub use crate::expect_lt;
pub use crate::expect_ne;
pub use crate::expect_true;
}
// The gtest_attribute proc-macro crate makes use of small_ctor, with a path
// through this crate here to ensure it's available.
#[doc(hidden)]
pub extern crate small_ctor;
/// A marker trait that promises the Rust type is an FFI wrapper around a C++
/// class which subclasses `testing::Test`. In particular, casting a
/// `testing::Test` pointer to the implementing class type is promised to be
/// valid.
///
/// Implement this trait with the `#[extern_test_suite]` macro:
/// ```rs
/// #[extern_test_suite("cpp::type::wrapped::by::Foo")
/// unsafe impl TestSuite for Foo {}
/// ```
pub unsafe trait TestSuite {
/// Gives the Gtest factory function on the C++ side which constructs the
/// C++ class for which the implementing Rust type is an FFI wrapper.
#[doc(hidden)]
fn gtest_factory_fn_ptr() -> GtestFactoryFunction;
}
/// Matches the C++ type `rust_gtest_interop::GtestFactoryFunction`, with the
/// `testing::Test` type erased to `OpaqueTestingTest`.
///
/// We replace `testing::Test*` with `OpaqueTestingTest` because but we don't
/// know that C++ type in Rust, as we don't have a Rust generator giving access
/// to that type.
#[doc(hidden)]
pub type GtestFactoryFunction = unsafe extern "C" fn(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
/// Opaque replacement of a C++ `testing::Test` type, which can only be used as
/// a pointer, since its size is incorrect. Only appears in the
/// GtestFactoryFunction signature, which is a function pointer that passed to
/// C++, and never run from within Rust.
///
/// See https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs
///
/// TODO(danakj): If there was a way, without making references to it into wide
/// pointers, we should make this type be !Sized.
#[repr(C)]
#[doc(hidden)]
pub struct | {
data: [u8; 0],
marker: std::marker::PhantomData<(*mut u8, std::marker::PhantomPinned)>,
}
#[doc(hidden)]
pub trait TestResult {
fn into_error_message(self) -> Option<String>;
}
impl TestResult for () {
fn into_error_message(self) -> Option<String> {
None
}
}
// This impl requires an `Error` not just a `String` so that in the future we
// could print things like the backtrace too (though that field is currently
// unstable).
impl<E: Into<Box<dyn std::error::Error>>> TestResult for std::result::Result<(), E> {
fn into_error_message(self) -> Option<String> {
match self {
Ok(_) => None,
Err(e) => Some(format!("Test returned error: {}", e.into())),
}
}
}
// Internals used by code generated from the gtest-attriute proc-macro. Should
// not be used by human-written code.
#[doc(hidden)]
pub mod __private {
use super::{GtestFactoryFunction, OpaqueTestingTest, Pin};
/// Rust wrapper around the same C++ method.
///
/// We have a wrapper to convert the file name into a C++-friendly string,
/// and the line number into a C++-friendly signed int.
///
/// TODO(crbug.com/1298175): We should be able to receive a C++-friendly
/// file path.
///
/// TODO(danakj): We should be able to pass a `c_int` directly to C++:
/// https://github.com/dtolnay/cxx/issues/1015.
pub fn add_failure_at(file: &'static str, line: u32, message: &str) {
let null_term_file = std::ffi::CString::new(make_canonical_file_path(file)).unwrap();
let null_term_message = std::ffi::CString::new(message).unwrap();
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_add_failure_at().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
file: *const std::ffi::c_char,
line: i32,
message: *const std::ffi::c_char,
);
}
unsafe {
_ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
null_term_file.as_ptr(),
line.try_into().unwrap_or(-1),
null_term_message.as_ptr(),
)
}
}
/// Turn a file!() string for a source file into a path from the root of the
/// source tree.
pub fn make_canonical_file_path(file: &str) -> String {
// The path of the file here is relative to and prefixed with the crate root's
// source file with the current directory being the build's output
// directory. So for a generated crate root at gen/foo/, the file path
// would look like `gen/foo/../../../../real/path.rs`. The last two `../
// ` move up from the build output directory to the source tree root. As such,
// we need to strip pairs of `something/../` until there are none left, and
// remove the remaining `../` path components up to the source tree
// root.
//
// Note that std::fs::canonicalize() does not work here since it requires the
// file to exist, but we're working with a relative path that is rooted
// in the build directory, not the current directory. We could try to
// get the path to the build directory.. but this is simple enough.
let (keep_rev, _) = std::path::Path::new(file).iter().rev().fold(
(Vec::new(), 0),
// Build the set of path components we want to keep, which we do by keeping a count of
// the `..` components and then dropping stuff that comes before them.
|(mut keep, dotdot_count), path_component| {
if path_component == ".." {
// The `..` component will skip the next downward component.
(keep, dotdot_count + 1)
} else if dotdot_count > 0 {
// Skip the component as we drop it with `..` later in the path.
(keep, dotdot_count - 1)
} else {
// Keep this component.
keep.push(path_component);
(keep, dotdot_count)
}
},
);
// Reverse the path components, join them together, and write them into a
// string.
keep_rev
.into_iter()
.rev()
.fold(std::path::PathBuf::new(), |path, path_component| path.join(path_component))
.to_string_lossy()
.to_string()
}
/// Wrapper that calls C++ rust_gtest_default_factory().
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers: https://github.com/dtolnay/cxx/issues/1011.
pub unsafe extern "C" fn rust_gtest_default_factory(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest> {
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_default_factory().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
}
unsafe { _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(f) }
}
/// Wrapper that calls C++ rust_gtest_add_test().
///
/// Note that the `factory` parameter is actually a C++ function pointer.
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers nor passing `*const c_char`: https://github.com/dtolnay/cxx/issues/1011 and
/// https://github.com/dtolnay/cxx/issues/1015.
unsafe fn rust_gtest_add_test(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
line: i32,
) {
extern "C" {
/// The C++ mangled name for
/// rust_gtest_interop::rust_gtest_add_test(). This comes from
/// `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop19rust_gtest_add_testEPFPN7testing4TestEPFvS2_EES4_PKcS8_S8_i(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
line: i32,
);
}
unsafe {
_ZN18rust_gtest_interop19rust_gtest_add_testEPFPN7testing4TestEPFvS2_EES4_PKcS8_S8_i(
factory,
run_test_fn,
test_suite_name,
test_name,
file,
line,
)
}
}
/// Information used to register a function pointer as a test with the C++
/// Gtest framework.
pub struct TestRegistration {
pub func: extern "C" fn(suite: Pin<&mut OpaqueTestingTest>),
// TODO(danakj): These a C-String-Literals. Maybe we should expose that as a type
// somewhere.
pub test_suite_name: &'static [std::os::raw::c_char],
pub test_name: &'static [std::os::raw::c_char],
pub file: &'static [std::os::raw::c_char],
pub line: u32,
pub factory: GtestFactoryFunction,
}
/// Register a given test function with the C++ Gtest framework.
///
/// This function is called from static initializers. It may only be called
/// from the main thread, before main() is run. It may not panic, or
/// call anything that may panic.
pub fn register_test(r: TestRegistration) {
let line = r.line.try_into().unwrap_or(-1);
// SAFETY: The `factory` parameter to rust_gtest_add_test() must be a C++
// function that returns a `testing::Test*` disguised as a
// `OpaqueTestingTest`. The #[gtest] macro will use
// `rust_gtest_interop::rust_gtest_default_factory()` by default.
unsafe {
rust_gtest_add_test(
r.factory,
r.func,
r.test_suite_name.as_ptr(),
r.test_name.as_ptr(),
r.file.as_ptr(),
line,
)
};
}
}
mod expect_macros;
| OpaqueTestingTest | identifier_name |
rust_gtest_interop.rs | // Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::pin::Pin;
/// Use `prelude:::*` to get access to all macros defined in this crate.
pub mod prelude {
// The #[extern_test_suite("cplusplus::Type") macro.
pub use gtest_attribute::extern_test_suite;
// The #[gtest(TestSuite, TestName)] macro.
pub use gtest_attribute::gtest;
// Gtest expectation macros, which should be used to verify test expectations.
// These replace the standard practice of using assert/panic in Rust tests
// which would crash the test binary.
pub use crate::expect_eq;
pub use crate::expect_false;
pub use crate::expect_ge;
pub use crate::expect_gt;
pub use crate::expect_le;
pub use crate::expect_lt;
pub use crate::expect_ne;
pub use crate::expect_true;
}
// The gtest_attribute proc-macro crate makes use of small_ctor, with a path
// through this crate here to ensure it's available.
#[doc(hidden)]
pub extern crate small_ctor;
/// A marker trait that promises the Rust type is an FFI wrapper around a C++
/// class which subclasses `testing::Test`. In particular, casting a
/// `testing::Test` pointer to the implementing class type is promised to be
/// valid.
///
/// Implement this trait with the `#[extern_test_suite]` macro:
/// ```rs
/// #[extern_test_suite("cpp::type::wrapped::by::Foo")
/// unsafe impl TestSuite for Foo {}
/// ```
pub unsafe trait TestSuite {
/// Gives the Gtest factory function on the C++ side which constructs the
/// C++ class for which the implementing Rust type is an FFI wrapper.
#[doc(hidden)]
fn gtest_factory_fn_ptr() -> GtestFactoryFunction;
}
/// Matches the C++ type `rust_gtest_interop::GtestFactoryFunction`, with the
/// `testing::Test` type erased to `OpaqueTestingTest`.
///
/// We replace `testing::Test*` with `OpaqueTestingTest` because but we don't
/// know that C++ type in Rust, as we don't have a Rust generator giving access
/// to that type.
#[doc(hidden)]
pub type GtestFactoryFunction = unsafe extern "C" fn(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
/// Opaque replacement of a C++ `testing::Test` type, which can only be used as
/// a pointer, since its size is incorrect. Only appears in the
/// GtestFactoryFunction signature, which is a function pointer that passed to
/// C++, and never run from within Rust.
///
/// See https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs
///
/// TODO(danakj): If there was a way, without making references to it into wide
/// pointers, we should make this type be !Sized.
#[repr(C)]
#[doc(hidden)]
pub struct OpaqueTestingTest {
data: [u8; 0],
marker: std::marker::PhantomData<(*mut u8, std::marker::PhantomPinned)>,
}
#[doc(hidden)]
pub trait TestResult {
fn into_error_message(self) -> Option<String>;
}
impl TestResult for () {
fn into_error_message(self) -> Option<String> {
None
}
}
// This impl requires an `Error` not just a `String` so that in the future we
// could print things like the backtrace too (though that field is currently
// unstable).
impl<E: Into<Box<dyn std::error::Error>>> TestResult for std::result::Result<(), E> {
fn into_error_message(self) -> Option<String> |
}
// Internals used by code generated from the gtest-attriute proc-macro. Should
// not be used by human-written code.
#[doc(hidden)]
pub mod __private {
use super::{GtestFactoryFunction, OpaqueTestingTest, Pin};
/// Rust wrapper around the same C++ method.
///
/// We have a wrapper to convert the file name into a C++-friendly string,
/// and the line number into a C++-friendly signed int.
///
/// TODO(crbug.com/1298175): We should be able to receive a C++-friendly
/// file path.
///
/// TODO(danakj): We should be able to pass a `c_int` directly to C++:
/// https://github.com/dtolnay/cxx/issues/1015.
pub fn add_failure_at(file: &'static str, line: u32, message: &str) {
let null_term_file = std::ffi::CString::new(make_canonical_file_path(file)).unwrap();
let null_term_message = std::ffi::CString::new(message).unwrap();
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_add_failure_at().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
file: *const std::ffi::c_char,
line: i32,
message: *const std::ffi::c_char,
);
}
unsafe {
_ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
null_term_file.as_ptr(),
line.try_into().unwrap_or(-1),
null_term_message.as_ptr(),
)
}
}
/// Turn a file!() string for a source file into a path from the root of the
/// source tree.
pub fn make_canonical_file_path(file: &str) -> String {
// The path of the file here is relative to and prefixed with the crate root's
// source file with the current directory being the build's output
// directory. So for a generated crate root at gen/foo/, the file path
// would look like `gen/foo/../../../../real/path.rs`. The last two `../
// ` move up from the build output directory to the source tree root. As such,
// we need to strip pairs of `something/../` until there are none left, and
// remove the remaining `../` path components up to the source tree
// root.
//
// Note that std::fs::canonicalize() does not work here since it requires the
// file to exist, but we're working with a relative path that is rooted
// in the build directory, not the current directory. We could try to
// get the path to the build directory.. but this is simple enough.
let (keep_rev, _) = std::path::Path::new(file).iter().rev().fold(
(Vec::new(), 0),
// Build the set of path components we want to keep, which we do by keeping a count of
// the `..` components and then dropping stuff that comes before them.
|(mut keep, dotdot_count), path_component| {
if path_component == ".." {
// The `..` component will skip the next downward component.
(keep, dotdot_count + 1)
} else if dotdot_count > 0 {
// Skip the component as we drop it with `..` later in the path.
(keep, dotdot_count - 1)
} else {
// Keep this component.
keep.push(path_component);
(keep, dotdot_count)
}
},
);
// Reverse the path components, join them together, and write them into a
// string.
keep_rev
.into_iter()
.rev()
.fold(std::path::PathBuf::new(), |path, path_component| path.join(path_component))
.to_string_lossy()
.to_string()
}
/// Wrapper that calls C++ rust_gtest_default_factory().
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers: https://github.com/dtolnay/cxx/issues/1011.
pub unsafe extern "C" fn rust_gtest_default_factory(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest> {
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_default_factory().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
}
unsafe { _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(f) }
}
/// Wrapper that calls C++ rust_gtest_add_test().
///
/// Note that the `factory` parameter is actually a C++ function pointer.
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers nor passing `*const c_char`: https://github.com/dtolnay/cxx/issues/1011 and
/// https://github.com/dtolnay/cxx/issues/1015.
unsafe fn rust_gtest_add_test(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
line: i32,
) {
extern "C" {
/// The C++ mangled name for
/// rust_gtest_interop::rust_gtest_add_test(). This comes from
/// `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop19rust_gtest_add_testEPFPN7testing4TestEPFvS2_EES4_PKcS8_S8_i(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
line: i32,
);
}
unsafe {
_ZN18rust_gtest_interop19rust_gtest_add_testEPFPN7testing4TestEPFvS2_EES4_PKcS8_S8_i(
factory,
run_test_fn,
test_suite_name,
test_name,
file,
line,
)
}
}
/// Information used to register a function pointer as a test with the C++
/// Gtest framework.
pub struct TestRegistration {
pub func: extern "C" fn(suite: Pin<&mut OpaqueTestingTest>),
// TODO(danakj): These a C-String-Literals. Maybe we should expose that as a type
// somewhere.
pub test_suite_name: &'static [std::os::raw::c_char],
pub test_name: &'static [std::os::raw::c_char],
pub file: &'static [std::os::raw::c_char],
pub line: u32,
pub factory: GtestFactoryFunction,
}
/// Register a given test function with the C++ Gtest framework.
///
/// This function is called from static initializers. It may only be called
/// from the main thread, before main() is run. It may not panic, or
/// call anything that may panic.
pub fn register_test(r: TestRegistration) {
let line = r.line.try_into().unwrap_or(-1);
// SAFETY: The `factory` parameter to rust_gtest_add_test() must be a C++
// function that returns a `testing::Test*` disguised as a
// `OpaqueTestingTest`. The #[gtest] macro will use
// `rust_gtest_interop::rust_gtest_default_factory()` by default.
unsafe {
rust_gtest_add_test(
r.factory,
r.func,
r.test_suite_name.as_ptr(),
r.test_name.as_ptr(),
r.file.as_ptr(),
line,
)
};
}
}
mod expect_macros;
| {
match self {
Ok(_) => None,
Err(e) => Some(format!("Test returned error: {}", e.into())),
}
} | identifier_body |
main.rs | //This project was inspired by https://github.com/jkusner/CACBarcode/blob/master/cacbarcode.py
extern crate base_custom; use base_custom::BaseCustom;
extern crate chrono; use chrono::prelude::*;
extern crate time; use time::Duration;
fn main() {
if std::env::args().count() > 1 {
println!("For security, the barcodes should only be passed via stdin, not as arguments.");
std::process::exit(1);
}
println!("Common Access Cards have two barcodes.");
println!("One the front (PDF417), and one the back (Code39).");
println!("Get an application that can read a PDF417 barcode.");
println!("Copy and paste it into here, and I will decode it.");
println!("The decoded info will only be presented here, and will not be saved.");
println!();
use std::io::prelude::*;
let stdin = std::io::stdin();
for line in stdin.lock().lines() {
println!("{}", decode(line.unwrap()));
}
}
fn decode(data: String) -> String {
match data.len() {
18 => return decode_code39(data),
88 | 89 => return decode_pdf217(data),
_ => return format!("Incorrect barcode length: {}. Make sure to include all spaces.", data.len()),
}
}
fn decode_pdf217(data: String) -> String {
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let base_time = Utc.ymd(1000, 1, 1);
let mut data_chars = data.chars();
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "PDF217".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' | 'N' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
println!("1");
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
println!("2");
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
println!("3");
//First Name
out.push(("First Name", data_chars.by_ref().take(20).collect::<String>()));
//Last Name
out.push(("Last Name", data_chars.by_ref().take(26).collect::<String>()));
//Date of Birth
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Date of Birth", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Personnel Entitlement Condition Type
let pect = (data_chars.next().unwrap(), data_chars.next().unwrap());
out.push(("Personnel Entitlement Condition Type", lookup_pect(pect)));
//Rank
out.push(("Rank", data_chars.by_ref().take(6).collect::<String>()));
//Pay Plan Code
out.push(("Pay Plan Code", data_chars.by_ref().take(2).collect::<String>()));
//Pay Plan Grade Code
out.push(("Pay Plan Grade Code", data_chars.by_ref().take(2).collect::<String>()));
//Card Issue Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Issue Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Expiration Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Expiration Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
if data.len() == 89 {
//Middle Initial
let initial = data_chars.next().unwrap();
out.push(("Middle Initial", initial.to_string()));
}
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn decode_code39(data: String) -> String {
let mut data_chars = data.chars();
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "Code39".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap()))); | //Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn lookup_pdt(pdt: char) -> String {
match pdt {
'S' => "Social Security Number (SSN)".to_string(),
'N' => "9 digits, not valid SSN".to_string(),
'P' => "Special code before SSNs".to_string(),
'D' => "Temporary Identifier Number (TIN)".to_string(),
'F' => "Foreign Identifier Number (FIN)".to_string(),
'T' => "Test (858 series)".to_string(),
'I' => "Individual Taxpayer Identification Number".to_string(),
_ => format!("Unknown Type {}", pdt),
}
}
fn lookup_ppc(ppc: char) -> String {
match ppc {
'A' => "Active Duty member".to_string(),
'B' => "Presidential Appointee".to_string(),
'C' => "DoD civil service employee".to_string(),
'D' => "100% disabled American veteran".to_string(),
'E' => "DoD contract employee".to_string(),
'F' => "Former member".to_string(),
'N' | 'G' => "National Guard member".to_string(),
'H' => "Medal of Honor recipient".to_string(),
'I' => "Non-DoD Civil Service Employee".to_string(),
'J' => "Academy student".to_string(),
'K' => "non-appropriated fund (NAF) DoD employee".to_string(),
'L' => "Lighthouse service".to_string(),
'M' => "Non-Government agency personnel".to_string(),
'O' => "Non-DoD contract employee".to_string(),
'Q' => "Reserve retiree not yet eligible for retired pay".to_string(),
'R' => "Retired Uniformed Service member eligible for retired pay".to_string(),
'V' | 'S' => "Reserve member".to_string(),
'T' => "Foreign military member".to_string(),
'U' => "Foreign national employee".to_string(),
'W' => "DoD Beneficiary".to_string(),
'Y' => "Retired DoD Civil Service Employees".to_string(),
_ => format!("Unknown Type {}", ppc),
}
}
fn lookup_branch(branch: char) -> String {
match branch {
'A' => "USA".to_string(),
'C' => "USCG".to_string(),
'D' => "DOD".to_string(),
'F' => "USAF".to_string(),
'H' => "USPHS".to_string(),
'M' => "USMC".to_string(),
'N' => "USN".to_string(),
'O' => "NOAA".to_string(),
'1' => "Foreign Army".to_string(),
'2' => "Foreign Navy".to_string(),
'3' => "Foreign Marine Corps".to_string(),
'4' => "Foreign Air Force".to_string(),
'X' => "Other".to_string(),
_ => format!("Unknown Type {}", branch),
}
}
fn lookup_pect(pect: (char, char)) -> String {
match pect {
('0', '1') => "On Active Duty. Segment condition.".to_string(),
('0', '2') => "Mobilization. Segment condition.".to_string(),
('0', '3') => "On appellate leave. Segment condition.".to_string(),
('0', '4') => "Military prisoner. Segment condition.".to_string(),
('0', '5') => "POW/MIA. Segment condition.".to_string(),
('0', '6') => "Separated from Selected Reserve. Event condition.".to_string(),
('0', '7') => "Declared permanently disabled after temporary disability period. Event condition.".to_string(),
('0', '8') => "On non-CONUS assignment. Segment condition.".to_string(),
('0', '9') => "Living in Guam or Puerto Rico. Segment condition.".to_string(),
('1', '0') => "Living in government quarters. Segment condition.".to_string(),
('1', '1') => "Death determined to be related to an injury, illness, or disease while on Active duty for training or while traveling to or from a place of duty. Event condition.".to_string(),
('1', '2') => "Discharged due to misconduct involving family member abuse. (Sponsors who are eligible for retirement.) Segment condition.".to_string(),
('1', '3') => "Granted retired pay. Event condition.".to_string(),
('1', '4') => "DoD sponsored in U.S. (foreign military). Segment condition.".to_string(),
('1', '5') => "DoD non-sponsored in U.S. (foreign military). Segment condition.".to_string(),
('1', '6') => "DoD sponsored overseas. Segment condition.".to_string(),
('1', '7') => "Deserter. Segment condition.".to_string(),
('1', '8') => "Discharged due to misconduct involving family member abuse. (Sponsors who are not eligible for retirement.) Segment condition.".to_string(),
('1', '9') => "Reservist who dies after receiving their 20 year letter. Event condition.".to_string(),
('2', '0') => "Transitional assistance (TA-30). Segment condition.".to_string(),
('2', '1') => "Transitional assistance (TA-Res). Segment condition.".to_string(),
('2', '2') => "Transitional assistance (TA-60). Segment condition.".to_string(),
('2', '3') => "Transitional assistance (TA-120). Segment condition.".to_string(),
('2', '4') => "Transitional assistance (SSB program). Segment condition.".to_string(),
('2', '5') => "Transitional assistance (VSI program). Segment condition.".to_string(),
('2', '6') => "Transitional assistance (composite). Segment condition.".to_string(),
('2', '7') => "Senior Executive Service (SES).".to_string(),
('2', '8') => "Emergency Essential - overseas only.".to_string(),
('2', '9') => "Emergency Essential - CONUS.".to_string(),
('3', '0') => "Emergency Essential - CONUS in living quarters, living on base, and not drawing a basic allowance for quarters, serving in an emergency essential capacity.".to_string(),
('3', '1') => "Reserve Component TA-120 Reserve Component Transition Assistance TA 120 (Jan 1, 2002 or later).".to_string(),
('3', '2') => "On MSC owned and operated vessels Deployed to foreign countries on Military Sealift Command owned and operated vessels. Segment condition.".to_string(),
('3', '3') => "Guard/Reserve Alert Notification Period.".to_string(),
('3', '4') | ('3', '5') => "Reserve Component TA-180 - 180 days TAMPS for reserve return from named contingencies.".to_string(),
('3', '6') | ('3', '7') => "TA-180 - 180 days TAMP for involuntary separation.".to_string(),
('3', '8') => "Living in Government Quarters in Guam or Puerto Rico, Living on base and not drawing an allowance for quarters in Guam or Puerto Rico.".to_string(),
('3', '9') => "Reserve Component TA-180 - TAMP - Mobilized for Contingency.".to_string(),
('4', '0') => "TA-180 TAMP - SPD Code Separation.".to_string(),
('4', '1') => "TA-180 - TAMP - Stop/Loss Separation.".to_string(),
('4', '2') => "DoD Non-Sponsored Overseas - Foreign Military personnel serving OCONUS not sponsored by DoD.".to_string(),
_ => format!("Unknown Type {}{}", pect.0, pect.1),
}
} | random_line_split |
|
main.rs | //This project was inspired by https://github.com/jkusner/CACBarcode/blob/master/cacbarcode.py
extern crate base_custom; use base_custom::BaseCustom;
extern crate chrono; use chrono::prelude::*;
extern crate time; use time::Duration;
fn main() {
if std::env::args().count() > 1 {
println!("For security, the barcodes should only be passed via stdin, not as arguments.");
std::process::exit(1);
}
println!("Common Access Cards have two barcodes.");
println!("One the front (PDF417), and one the back (Code39).");
println!("Get an application that can read a PDF417 barcode.");
println!("Copy and paste it into here, and I will decode it.");
println!("The decoded info will only be presented here, and will not be saved.");
println!();
use std::io::prelude::*;
let stdin = std::io::stdin();
for line in stdin.lock().lines() {
println!("{}", decode(line.unwrap()));
}
}
fn decode(data: String) -> String {
match data.len() {
18 => return decode_code39(data),
88 | 89 => return decode_pdf217(data),
_ => return format!("Incorrect barcode length: {}. Make sure to include all spaces.", data.len()),
}
}
fn decode_pdf217(data: String) -> String {
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let base_time = Utc.ymd(1000, 1, 1);
let mut data_chars = data.chars();
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "PDF217".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' | 'N' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
println!("1");
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
println!("2");
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
println!("3");
//First Name
out.push(("First Name", data_chars.by_ref().take(20).collect::<String>()));
//Last Name
out.push(("Last Name", data_chars.by_ref().take(26).collect::<String>()));
//Date of Birth
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Date of Birth", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Personnel Entitlement Condition Type
let pect = (data_chars.next().unwrap(), data_chars.next().unwrap());
out.push(("Personnel Entitlement Condition Type", lookup_pect(pect)));
//Rank
out.push(("Rank", data_chars.by_ref().take(6).collect::<String>()));
//Pay Plan Code
out.push(("Pay Plan Code", data_chars.by_ref().take(2).collect::<String>()));
//Pay Plan Grade Code
out.push(("Pay Plan Grade Code", data_chars.by_ref().take(2).collect::<String>()));
//Card Issue Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Issue Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Expiration Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Expiration Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
if data.len() == 89 {
//Middle Initial
let initial = data_chars.next().unwrap();
out.push(("Middle Initial", initial.to_string()));
}
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn | (data: String) -> String {
let mut data_chars = data.chars();
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "Code39".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn lookup_pdt(pdt: char) -> String {
match pdt {
'S' => "Social Security Number (SSN)".to_string(),
'N' => "9 digits, not valid SSN".to_string(),
'P' => "Special code before SSNs".to_string(),
'D' => "Temporary Identifier Number (TIN)".to_string(),
'F' => "Foreign Identifier Number (FIN)".to_string(),
'T' => "Test (858 series)".to_string(),
'I' => "Individual Taxpayer Identification Number".to_string(),
_ => format!("Unknown Type {}", pdt),
}
}
fn lookup_ppc(ppc: char) -> String {
match ppc {
'A' => "Active Duty member".to_string(),
'B' => "Presidential Appointee".to_string(),
'C' => "DoD civil service employee".to_string(),
'D' => "100% disabled American veteran".to_string(),
'E' => "DoD contract employee".to_string(),
'F' => "Former member".to_string(),
'N' | 'G' => "National Guard member".to_string(),
'H' => "Medal of Honor recipient".to_string(),
'I' => "Non-DoD Civil Service Employee".to_string(),
'J' => "Academy student".to_string(),
'K' => "non-appropriated fund (NAF) DoD employee".to_string(),
'L' => "Lighthouse service".to_string(),
'M' => "Non-Government agency personnel".to_string(),
'O' => "Non-DoD contract employee".to_string(),
'Q' => "Reserve retiree not yet eligible for retired pay".to_string(),
'R' => "Retired Uniformed Service member eligible for retired pay".to_string(),
'V' | 'S' => "Reserve member".to_string(),
'T' => "Foreign military member".to_string(),
'U' => "Foreign national employee".to_string(),
'W' => "DoD Beneficiary".to_string(),
'Y' => "Retired DoD Civil Service Employees".to_string(),
_ => format!("Unknown Type {}", ppc),
}
}
fn lookup_branch(branch: char) -> String {
match branch {
'A' => "USA".to_string(),
'C' => "USCG".to_string(),
'D' => "DOD".to_string(),
'F' => "USAF".to_string(),
'H' => "USPHS".to_string(),
'M' => "USMC".to_string(),
'N' => "USN".to_string(),
'O' => "NOAA".to_string(),
'1' => "Foreign Army".to_string(),
'2' => "Foreign Navy".to_string(),
'3' => "Foreign Marine Corps".to_string(),
'4' => "Foreign Air Force".to_string(),
'X' => "Other".to_string(),
_ => format!("Unknown Type {}", branch),
}
}
fn lookup_pect(pect: (char, char)) -> String {
match pect {
('0', '1') => "On Active Duty. Segment condition.".to_string(),
('0', '2') => "Mobilization. Segment condition.".to_string(),
('0', '3') => "On appellate leave. Segment condition.".to_string(),
('0', '4') => "Military prisoner. Segment condition.".to_string(),
('0', '5') => "POW/MIA. Segment condition.".to_string(),
('0', '6') => "Separated from Selected Reserve. Event condition.".to_string(),
('0', '7') => "Declared permanently disabled after temporary disability period. Event condition.".to_string(),
('0', '8') => "On non-CONUS assignment. Segment condition.".to_string(),
('0', '9') => "Living in Guam or Puerto Rico. Segment condition.".to_string(),
('1', '0') => "Living in government quarters. Segment condition.".to_string(),
('1', '1') => "Death determined to be related to an injury, illness, or disease while on Active duty for training or while traveling to or from a place of duty. Event condition.".to_string(),
('1', '2') => "Discharged due to misconduct involving family member abuse. (Sponsors who are eligible for retirement.) Segment condition.".to_string(),
('1', '3') => "Granted retired pay. Event condition.".to_string(),
('1', '4') => "DoD sponsored in U.S. (foreign military). Segment condition.".to_string(),
('1', '5') => "DoD non-sponsored in U.S. (foreign military). Segment condition.".to_string(),
('1', '6') => "DoD sponsored overseas. Segment condition.".to_string(),
('1', '7') => "Deserter. Segment condition.".to_string(),
('1', '8') => "Discharged due to misconduct involving family member abuse. (Sponsors who are not eligible for retirement.) Segment condition.".to_string(),
('1', '9') => "Reservist who dies after receiving their 20 year letter. Event condition.".to_string(),
('2', '0') => "Transitional assistance (TA-30). Segment condition.".to_string(),
('2', '1') => "Transitional assistance (TA-Res). Segment condition.".to_string(),
('2', '2') => "Transitional assistance (TA-60). Segment condition.".to_string(),
('2', '3') => "Transitional assistance (TA-120). Segment condition.".to_string(),
('2', '4') => "Transitional assistance (SSB program). Segment condition.".to_string(),
('2', '5') => "Transitional assistance (VSI program). Segment condition.".to_string(),
('2', '6') => "Transitional assistance (composite). Segment condition.".to_string(),
('2', '7') => "Senior Executive Service (SES).".to_string(),
('2', '8') => "Emergency Essential - overseas only.".to_string(),
('2', '9') => "Emergency Essential - CONUS.".to_string(),
('3', '0') => "Emergency Essential - CONUS in living quarters, living on base, and not drawing a basic allowance for quarters, serving in an emergency essential capacity.".to_string(),
('3', '1') => "Reserve Component TA-120 Reserve Component Transition Assistance TA 120 (Jan 1, 2002 or later).".to_string(),
('3', '2') => "On MSC owned and operated vessels Deployed to foreign countries on Military Sealift Command owned and operated vessels. Segment condition.".to_string(),
('3', '3') => "Guard/Reserve Alert Notification Period.".to_string(),
('3', '4') | ('3', '5') => "Reserve Component TA-180 - 180 days TAMPS for reserve return from named contingencies.".to_string(),
('3', '6') | ('3', '7') => "TA-180 - 180 days TAMP for involuntary separation.".to_string(),
('3', '8') => "Living in Government Quarters in Guam or Puerto Rico, Living on base and not drawing an allowance for quarters in Guam or Puerto Rico.".to_string(),
('3', '9') => "Reserve Component TA-180 - TAMP - Mobilized for Contingency.".to_string(),
('4', '0') => "TA-180 TAMP - SPD Code Separation.".to_string(),
('4', '1') => "TA-180 - TAMP - Stop/Loss Separation.".to_string(),
('4', '2') => "DoD Non-Sponsored Overseas - Foreign Military personnel serving OCONUS not sponsored by DoD.".to_string(),
_ => format!("Unknown Type {}{}", pect.0, pect.1),
}
}
| decode_code39 | identifier_name |
main.rs | //This project was inspired by https://github.com/jkusner/CACBarcode/blob/master/cacbarcode.py
extern crate base_custom; use base_custom::BaseCustom;
extern crate chrono; use chrono::prelude::*;
extern crate time; use time::Duration;
fn main() {
if std::env::args().count() > 1 {
println!("For security, the barcodes should only be passed via stdin, not as arguments.");
std::process::exit(1);
}
println!("Common Access Cards have two barcodes.");
println!("One the front (PDF417), and one the back (Code39).");
println!("Get an application that can read a PDF417 barcode.");
println!("Copy and paste it into here, and I will decode it.");
println!("The decoded info will only be presented here, and will not be saved.");
println!();
use std::io::prelude::*;
let stdin = std::io::stdin();
for line in stdin.lock().lines() {
println!("{}", decode(line.unwrap()));
}
}
fn decode(data: String) -> String |
fn decode_pdf217(data: String) -> String {
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let base_time = Utc.ymd(1000, 1, 1);
let mut data_chars = data.chars();
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "PDF217".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' | 'N' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
println!("1");
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
println!("2");
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
println!("3");
//First Name
out.push(("First Name", data_chars.by_ref().take(20).collect::<String>()));
//Last Name
out.push(("Last Name", data_chars.by_ref().take(26).collect::<String>()));
//Date of Birth
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Date of Birth", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Personnel Entitlement Condition Type
let pect = (data_chars.next().unwrap(), data_chars.next().unwrap());
out.push(("Personnel Entitlement Condition Type", lookup_pect(pect)));
//Rank
out.push(("Rank", data_chars.by_ref().take(6).collect::<String>()));
//Pay Plan Code
out.push(("Pay Plan Code", data_chars.by_ref().take(2).collect::<String>()));
//Pay Plan Grade Code
out.push(("Pay Plan Grade Code", data_chars.by_ref().take(2).collect::<String>()));
//Card Issue Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Issue Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Expiration Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Expiration Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
if data.len() == 89 {
//Middle Initial
let initial = data_chars.next().unwrap();
out.push(("Middle Initial", initial.to_string()));
}
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn decode_code39(data: String) -> String {
let mut data_chars = data.chars();
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "Code39".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn lookup_pdt(pdt: char) -> String {
match pdt {
'S' => "Social Security Number (SSN)".to_string(),
'N' => "9 digits, not valid SSN".to_string(),
'P' => "Special code before SSNs".to_string(),
'D' => "Temporary Identifier Number (TIN)".to_string(),
'F' => "Foreign Identifier Number (FIN)".to_string(),
'T' => "Test (858 series)".to_string(),
'I' => "Individual Taxpayer Identification Number".to_string(),
_ => format!("Unknown Type {}", pdt),
}
}
fn lookup_ppc(ppc: char) -> String {
match ppc {
'A' => "Active Duty member".to_string(),
'B' => "Presidential Appointee".to_string(),
'C' => "DoD civil service employee".to_string(),
'D' => "100% disabled American veteran".to_string(),
'E' => "DoD contract employee".to_string(),
'F' => "Former member".to_string(),
'N' | 'G' => "National Guard member".to_string(),
'H' => "Medal of Honor recipient".to_string(),
'I' => "Non-DoD Civil Service Employee".to_string(),
'J' => "Academy student".to_string(),
'K' => "non-appropriated fund (NAF) DoD employee".to_string(),
'L' => "Lighthouse service".to_string(),
'M' => "Non-Government agency personnel".to_string(),
'O' => "Non-DoD contract employee".to_string(),
'Q' => "Reserve retiree not yet eligible for retired pay".to_string(),
'R' => "Retired Uniformed Service member eligible for retired pay".to_string(),
'V' | 'S' => "Reserve member".to_string(),
'T' => "Foreign military member".to_string(),
'U' => "Foreign national employee".to_string(),
'W' => "DoD Beneficiary".to_string(),
'Y' => "Retired DoD Civil Service Employees".to_string(),
_ => format!("Unknown Type {}", ppc),
}
}
fn lookup_branch(branch: char) -> String {
match branch {
'A' => "USA".to_string(),
'C' => "USCG".to_string(),
'D' => "DOD".to_string(),
'F' => "USAF".to_string(),
'H' => "USPHS".to_string(),
'M' => "USMC".to_string(),
'N' => "USN".to_string(),
'O' => "NOAA".to_string(),
'1' => "Foreign Army".to_string(),
'2' => "Foreign Navy".to_string(),
'3' => "Foreign Marine Corps".to_string(),
'4' => "Foreign Air Force".to_string(),
'X' => "Other".to_string(),
_ => format!("Unknown Type {}", branch),
}
}
fn lookup_pect(pect: (char, char)) -> String {
match pect {
('0', '1') => "On Active Duty. Segment condition.".to_string(),
('0', '2') => "Mobilization. Segment condition.".to_string(),
('0', '3') => "On appellate leave. Segment condition.".to_string(),
('0', '4') => "Military prisoner. Segment condition.".to_string(),
('0', '5') => "POW/MIA. Segment condition.".to_string(),
('0', '6') => "Separated from Selected Reserve. Event condition.".to_string(),
('0', '7') => "Declared permanently disabled after temporary disability period. Event condition.".to_string(),
('0', '8') => "On non-CONUS assignment. Segment condition.".to_string(),
('0', '9') => "Living in Guam or Puerto Rico. Segment condition.".to_string(),
('1', '0') => "Living in government quarters. Segment condition.".to_string(),
('1', '1') => "Death determined to be related to an injury, illness, or disease while on Active duty for training or while traveling to or from a place of duty. Event condition.".to_string(),
('1', '2') => "Discharged due to misconduct involving family member abuse. (Sponsors who are eligible for retirement.) Segment condition.".to_string(),
('1', '3') => "Granted retired pay. Event condition.".to_string(),
('1', '4') => "DoD sponsored in U.S. (foreign military). Segment condition.".to_string(),
('1', '5') => "DoD non-sponsored in U.S. (foreign military). Segment condition.".to_string(),
('1', '6') => "DoD sponsored overseas. Segment condition.".to_string(),
('1', '7') => "Deserter. Segment condition.".to_string(),
('1', '8') => "Discharged due to misconduct involving family member abuse. (Sponsors who are not eligible for retirement.) Segment condition.".to_string(),
('1', '9') => "Reservist who dies after receiving their 20 year letter. Event condition.".to_string(),
('2', '0') => "Transitional assistance (TA-30). Segment condition.".to_string(),
('2', '1') => "Transitional assistance (TA-Res). Segment condition.".to_string(),
('2', '2') => "Transitional assistance (TA-60). Segment condition.".to_string(),
('2', '3') => "Transitional assistance (TA-120). Segment condition.".to_string(),
('2', '4') => "Transitional assistance (SSB program). Segment condition.".to_string(),
('2', '5') => "Transitional assistance (VSI program). Segment condition.".to_string(),
('2', '6') => "Transitional assistance (composite). Segment condition.".to_string(),
('2', '7') => "Senior Executive Service (SES).".to_string(),
('2', '8') => "Emergency Essential - overseas only.".to_string(),
('2', '9') => "Emergency Essential - CONUS.".to_string(),
('3', '0') => "Emergency Essential - CONUS in living quarters, living on base, and not drawing a basic allowance for quarters, serving in an emergency essential capacity.".to_string(),
('3', '1') => "Reserve Component TA-120 Reserve Component Transition Assistance TA 120 (Jan 1, 2002 or later).".to_string(),
('3', '2') => "On MSC owned and operated vessels Deployed to foreign countries on Military Sealift Command owned and operated vessels. Segment condition.".to_string(),
('3', '3') => "Guard/Reserve Alert Notification Period.".to_string(),
('3', '4') | ('3', '5') => "Reserve Component TA-180 - 180 days TAMPS for reserve return from named contingencies.".to_string(),
('3', '6') | ('3', '7') => "TA-180 - 180 days TAMP for involuntary separation.".to_string(),
('3', '8') => "Living in Government Quarters in Guam or Puerto Rico, Living on base and not drawing an allowance for quarters in Guam or Puerto Rico.".to_string(),
('3', '9') => "Reserve Component TA-180 - TAMP - Mobilized for Contingency.".to_string(),
('4', '0') => "TA-180 TAMP - SPD Code Separation.".to_string(),
('4', '1') => "TA-180 - TAMP - Stop/Loss Separation.".to_string(),
('4', '2') => "DoD Non-Sponsored Overseas - Foreign Military personnel serving OCONUS not sponsored by DoD.".to_string(),
_ => format!("Unknown Type {}{}", pect.0, pect.1),
}
}
| {
match data.len() {
18 => return decode_code39(data),
88 | 89 => return decode_pdf217(data),
_ => return format!("Incorrect barcode length: {}. Make sure to include all spaces.", data.len()),
}
} | identifier_body |
source_manager.go | package gps
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"github.com/Masterminds/semver"
)
// Used to compute a friendly filepath from a URL-shaped input
//
// TODO(sdboyer) this is awful. Right?
var sanitizer = strings.NewReplacer(":", "-", "/", "-", "+", "-")
// A SourceManager is responsible for retrieving, managing, and interrogating
// source repositories. Its primary purpose is to serve the needs of a Solver,
// but it is handy for other purposes, as well.
//
// gps's built-in SourceManager, SourceMgr, is intended to be generic and
// sufficient for any purpose. It provides some additional semantics around the
// methods defined here.
type SourceManager interface {
// SourceExists checks if a repository exists, either upstream or in the
// SourceManager's central repository cache.
SourceExists(ProjectIdentifier) (bool, error)
// SyncSourceFor will attempt to bring all local information about a source
// fully up to date.
SyncSourceFor(ProjectIdentifier) error
// ListVersions retrieves a list of the available versions for a given
// repository name.
ListVersions(ProjectIdentifier) ([]Version, error)
// RevisionPresentIn indicates whether the provided Version is present in
// the given repository.
RevisionPresentIn(ProjectIdentifier, Revision) (bool, error)
// ListPackages parses the tree of the Go packages at or below root of the
// provided ProjectIdentifier, at the provided version.
ListPackages(ProjectIdentifier, Version) (PackageTree, error)
// GetManifestAndLock returns manifest and lock information for the provided
// root import path.
//
// gps currently requires that projects be rooted at their repository root,
// necessitating that the ProjectIdentifier's ProjectRoot must also be a
// repository root.
GetManifestAndLock(ProjectIdentifier, Version) (Manifest, Lock, error)
// ExportProject writes out the tree of the provided import path, at the
// provided version, to the provided directory.
ExportProject(ProjectIdentifier, Version, string) error
// AnalyzerInfo reports the name and version of the logic used to service
// GetManifestAndLock().
AnalyzerInfo() (name string, version *semver.Version)
// DeduceRootProject takes an import path and deduces the corresponding
// project/source root.
DeduceProjectRoot(ip string) (ProjectRoot, error)
}
// A ProjectAnalyzer is responsible for analyzing a given path for Manifest and
// Lock information. Tools relying on gps must implement one.
type ProjectAnalyzer interface {
// Perform analysis of the filesystem tree rooted at path, with the
// root import path importRoot, to determine the project's constraints, as
// indicated by a Manifest and Lock.
DeriveManifestAndLock(path string, importRoot ProjectRoot) (Manifest, Lock, error)
// Report the name and version of this ProjectAnalyzer.
Info() (name string, version *semver.Version)
}
// SourceMgr is the default SourceManager for gps.
//
// There's no (planned) reason why it would need to be reimplemented by other
// tools; control via dependency injection is intended to be sufficient.
type SourceMgr struct {
cachedir string
lf *os.File
srcs map[string]source
srcmut sync.RWMutex
srcfuts map[string]*unifiedFuture
srcfmut sync.RWMutex
an ProjectAnalyzer
dxt deducerTrie
rootxt prTrie
}
type unifiedFuture struct {
rc, sc chan struct{}
rootf stringFuture
srcf sourceFuture
}
var _ SourceManager = &SourceMgr{}
// NewSourceManager produces an instance of gps's built-in SourceManager. It
// takes a cache directory (where local instances of upstream repositories are
// stored), and a ProjectAnalyzer that is used to extract manifest and lock
// information from source trees.
//
// The returned SourceManager aggressively caches information wherever possible.
// If tools need to do preliminary work involving upstream repository analysis
// prior to invoking a solve run, it is recommended that they create this
// SourceManager as early as possible and use it to their ends. That way, the
// solver can benefit from any caches that may have already been warmed.
// | // gps's SourceManager is intended to be threadsafe (if it's not, please file a
// bug!). It should be safe to reuse across concurrent solving runs, even on
// unrelated projects.
func NewSourceManager(an ProjectAnalyzer, cachedir string) (*SourceMgr, error) {
if an == nil {
return nil, fmt.Errorf("a ProjectAnalyzer must be provided to the SourceManager")
}
err := os.MkdirAll(filepath.Join(cachedir, "sources"), 0777)
if err != nil {
return nil, err
}
glpath := filepath.Join(cachedir, "sm.lock")
_, err = os.Stat(glpath)
if err == nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("cache lock file %s exists - another process crashed or is still running?", glpath),
}
}
fi, err := os.OpenFile(glpath, os.O_CREATE|os.O_EXCL, 0600) // is 0600 sane for this purpose?
if err != nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("err on attempting to create global cache lock: %s", err),
}
}
return &SourceMgr{
cachedir: cachedir,
lf: fi,
srcs: make(map[string]source),
srcfuts: make(map[string]*unifiedFuture),
an: an,
dxt: pathDeducerTrie(),
rootxt: newProjectRootTrie(),
}, nil
}
// CouldNotCreateLockError describe failure modes in which creating a SourceMgr
// did not succeed because there was an error while attempting to create the
// on-disk lock file.
type CouldNotCreateLockError struct {
Path string
Err error
}
func (e CouldNotCreateLockError) Error() string {
return e.Err.Error()
}
// Release lets go of any locks held by the SourceManager.
func (sm *SourceMgr) Release() {
sm.lf.Close()
os.Remove(filepath.Join(sm.cachedir, "sm.lock"))
}
// AnalyzerInfo reports the name and version of the injected ProjectAnalyzer.
func (sm *SourceMgr) AnalyzerInfo() (name string, version *semver.Version) {
return sm.an.Info()
}
// GetManifestAndLock returns manifest and lock information for the provided
// import path. gps currently requires that projects be rooted at their
// repository root, necessitating that the ProjectIdentifier's ProjectRoot must
// also be a repository root.
//
// The work of producing the manifest and lock is delegated to the injected
// ProjectAnalyzer's DeriveManifestAndLock() method.
func (sm *SourceMgr) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return nil, nil, err
}
return src.getManifestAndLock(id.ProjectRoot, v)
}
// ListPackages parses the tree of the Go packages at and below the ProjectRoot
// of the given ProjectIdentifier, at the given version.
func (sm *SourceMgr) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return PackageTree{}, err
}
return src.listPackages(id.ProjectRoot, v)
}
// ListVersions retrieves a list of the available versions for a given
// repository name.
//
// The list is not sorted; while it may be returned in the order that the
// underlying VCS reports version information, no guarantee is made. It is
// expected that the caller either not care about order, or sort the result
// themselves.
//
// This list is always retrieved from upstream on the first call. Subsequent
// calls will return a cached version of the first call's results. if upstream
// is not accessible (network outage, access issues, or the resource actually
// went away), an error will be returned.
func (sm *SourceMgr) ListVersions(id ProjectIdentifier) ([]Version, error) {
src, err := sm.getSourceFor(id)
if err != nil {
// TODO(sdboyer) More-er proper-er errors
return nil, err
}
return src.listVersions()
}
// RevisionPresentIn indicates whether the provided Revision is present in the given
// repository.
func (sm *SourceMgr) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
// TODO(sdboyer) More-er proper-er errors
return false, err
}
return src.revisionPresentIn(r)
}
// SourceExists checks if a repository exists, either upstream or in the cache,
// for the provided ProjectIdentifier.
func (sm *SourceMgr) SourceExists(id ProjectIdentifier) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return false, err
}
return src.checkExistence(existsInCache) || src.checkExistence(existsUpstream), nil
}
// SyncSourceFor will ensure that all local caches and information about a
// source are up to date with any network-acccesible information.
//
// The primary use case for this is prefetching.
func (sm *SourceMgr) SyncSourceFor(id ProjectIdentifier) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.syncLocal()
}
// ExportProject writes out the tree of the provided ProjectIdentifier's
// ProjectRoot, at the provided version, to the provided directory.
func (sm *SourceMgr) ExportProject(id ProjectIdentifier, v Version, to string) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.exportVersionTo(v, to)
}
// DeduceProjectRoot takes an import path and deduces the corresponding
// project/source root.
//
// Note that some import paths may require network activity to correctly
// determine the root of the path, such as, but not limited to, vanity import
// paths. (A special exception is written for gopkg.in to minimize network
// activity, as its behavior is well-structured)
func (sm *SourceMgr) DeduceProjectRoot(ip string) (ProjectRoot, error) {
if prefix, root, has := sm.rootxt.LongestPrefix(ip); has {
// The non-matching tail of the import path could still be malformed.
// Validate just that part, if it exists
if prefix != ip {
// TODO(sdboyer) commented until i find a proper description of how
// to validate an import path
//if !pathvld.MatchString(strings.TrimPrefix(ip, prefix+"/")) {
//return "", fmt.Errorf("%q is not a valid import path", ip)
//}
// There was one, and it validated fine - add it so we don't have to
// revalidate it later
sm.rootxt.Insert(ip, root)
}
return root, nil
}
ft, err := sm.deducePathAndProcess(ip)
if err != nil {
return "", err
}
r, err := ft.rootf()
return ProjectRoot(r), err
}
func (sm *SourceMgr) getSourceFor(id ProjectIdentifier) (source, error) {
//pretty.Println(id.ProjectRoot)
nn := id.netName()
sm.srcmut.RLock()
src, has := sm.srcs[nn]
sm.srcmut.RUnlock()
if has {
return src, nil
}
ft, err := sm.deducePathAndProcess(nn)
if err != nil {
return nil, err
}
// we don't care about the ident here, and the future produced by
// deducePathAndProcess will dedupe with what's in the sm.srcs map
src, _, err = ft.srcf()
return src, err
}
func (sm *SourceMgr) deducePathAndProcess(path string) (*unifiedFuture, error) {
// Check for an already-existing future in the map first
sm.srcfmut.RLock()
ft, exists := sm.srcfuts[path]
sm.srcfmut.RUnlock()
if exists {
return ft, nil
}
// Don't have one - set one up.
df, err := sm.deduceFromPath(path)
if err != nil {
return nil, err
}
sm.srcfmut.Lock()
defer sm.srcfmut.Unlock()
// A bad interleaving could allow two goroutines to make it here for the
// same path, so we have to re-check existence.
if ft, exists = sm.srcfuts[path]; exists {
return ft, nil
}
ft = &unifiedFuture{
rc: make(chan struct{}, 1),
sc: make(chan struct{}, 1),
}
// Rewrap the rootfinding func in another future
var pr string
var rooterr error
// Kick off the func to get root and register it into the rootxt.
rootf := func() {
defer close(ft.rc)
pr, rooterr = df.root()
if rooterr != nil {
// Don't cache errs. This doesn't really hurt the solver, and is
// beneficial for other use cases because it means we don't have to
// expose any kind of controls for clearing caches.
return
}
tpr := ProjectRoot(pr)
sm.rootxt.Insert(pr, tpr)
// It's not harmful if the netname was a URL rather than an
// import path
if pr != path {
// Insert the result into the rootxt twice - once at the
// root itself, so as to catch siblings/relatives, and again
// at the exact provided import path (assuming they were
// different), so that on subsequent calls, exact matches
// can skip the regex above.
sm.rootxt.Insert(path, tpr)
}
}
// If deduction tells us this is slow, do it async in its own goroutine;
// otherwise, we can do it here and give the scheduler a bit of a break.
if df.rslow {
go rootf()
} else {
rootf()
}
// Store a closure bound to the future result on the futTracker.
ft.rootf = func() (string, error) {
<-ft.rc
return pr, rooterr
}
// Root future is handled, now build up the source future.
//
// First, complete the partialSourceFuture with information the sm has about
// our cachedir and analyzer
fut := df.psf(sm.cachedir, sm.an)
// The maybeSource-trying process is always slow, so keep it async here.
var src source
var ident string
var srcerr error
go func() {
defer close(ft.sc)
src, ident, srcerr = fut()
if srcerr != nil {
// Don't cache errs. This doesn't really hurt the solver, and is
// beneficial for other use cases because it means we don't have
// to expose any kind of controls for clearing caches.
return
}
sm.srcmut.Lock()
defer sm.srcmut.Unlock()
// Check to make sure a source hasn't shown up in the meantime, or that
// there wasn't already one at the ident.
var hasi, hasp bool
var srci, srcp source
if ident != "" {
srci, hasi = sm.srcs[ident]
}
srcp, hasp = sm.srcs[path]
// if neither the ident nor the input path have an entry for this src,
// we're in the simple case - write them both in and we're done
if !hasi && !hasp {
sm.srcs[path] = src
if ident != path && ident != "" {
sm.srcs[ident] = src
}
return
}
// Now, the xors.
//
// If already present for ident but not for path, copy ident's src
// to path. This covers cases like a gopkg.in path referring back
// onto a github repository, where something else already explicitly
// looked up that same gh repo.
if hasi && !hasp {
sm.srcs[path] = srci
src = srci
}
// If already present for path but not for ident, do NOT copy path's
// src to ident, but use the returned one instead. Really, this case
// shouldn't occur at all...? But the crucial thing is that the
// path-based one has already discovered what actual ident of source
// they want to use, and changing that arbitrarily would have
// undefined effects.
if hasp && !hasi && ident != "" {
sm.srcs[ident] = src
}
// If both are present, then assume we're good, and use the path one
if hasp && hasi {
// TODO(sdboyer) compare these (somehow? reflect? pointer?) and if they're not the
// same object, panic
src = srcp
}
}()
ft.srcf = func() (source, string, error) {
<-ft.sc
return src, ident, srcerr
}
sm.srcfuts[path] = ft
return ft, nil
} | random_line_split |
|
source_manager.go | package gps
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"github.com/Masterminds/semver"
)
// Used to compute a friendly filepath from a URL-shaped input
//
// TODO(sdboyer) this is awful. Right?
var sanitizer = strings.NewReplacer(":", "-", "/", "-", "+", "-")
// A SourceManager is responsible for retrieving, managing, and interrogating
// source repositories. Its primary purpose is to serve the needs of a Solver,
// but it is handy for other purposes, as well.
//
// gps's built-in SourceManager, SourceMgr, is intended to be generic and
// sufficient for any purpose. It provides some additional semantics around the
// methods defined here.
type SourceManager interface {
// SourceExists checks if a repository exists, either upstream or in the
// SourceManager's central repository cache.
SourceExists(ProjectIdentifier) (bool, error)
// SyncSourceFor will attempt to bring all local information about a source
// fully up to date.
SyncSourceFor(ProjectIdentifier) error
// ListVersions retrieves a list of the available versions for a given
// repository name.
ListVersions(ProjectIdentifier) ([]Version, error)
// RevisionPresentIn indicates whether the provided Version is present in
// the given repository.
RevisionPresentIn(ProjectIdentifier, Revision) (bool, error)
// ListPackages parses the tree of the Go packages at or below root of the
// provided ProjectIdentifier, at the provided version.
ListPackages(ProjectIdentifier, Version) (PackageTree, error)
// GetManifestAndLock returns manifest and lock information for the provided
// root import path.
//
// gps currently requires that projects be rooted at their repository root,
// necessitating that the ProjectIdentifier's ProjectRoot must also be a
// repository root.
GetManifestAndLock(ProjectIdentifier, Version) (Manifest, Lock, error)
// ExportProject writes out the tree of the provided import path, at the
// provided version, to the provided directory.
ExportProject(ProjectIdentifier, Version, string) error
// AnalyzerInfo reports the name and version of the logic used to service
// GetManifestAndLock().
AnalyzerInfo() (name string, version *semver.Version)
// DeduceRootProject takes an import path and deduces the corresponding
// project/source root.
DeduceProjectRoot(ip string) (ProjectRoot, error)
}
// A ProjectAnalyzer is responsible for analyzing a given path for Manifest and
// Lock information. Tools relying on gps must implement one.
type ProjectAnalyzer interface {
// Perform analysis of the filesystem tree rooted at path, with the
// root import path importRoot, to determine the project's constraints, as
// indicated by a Manifest and Lock.
DeriveManifestAndLock(path string, importRoot ProjectRoot) (Manifest, Lock, error)
// Report the name and version of this ProjectAnalyzer.
Info() (name string, version *semver.Version)
}
// SourceMgr is the default SourceManager for gps.
//
// There's no (planned) reason why it would need to be reimplemented by other
// tools; control via dependency injection is intended to be sufficient.
type SourceMgr struct {
cachedir string
lf *os.File
srcs map[string]source
srcmut sync.RWMutex
srcfuts map[string]*unifiedFuture
srcfmut sync.RWMutex
an ProjectAnalyzer
dxt deducerTrie
rootxt prTrie
}
type unifiedFuture struct {
rc, sc chan struct{}
rootf stringFuture
srcf sourceFuture
}
var _ SourceManager = &SourceMgr{}
// NewSourceManager produces an instance of gps's built-in SourceManager. It
// takes a cache directory (where local instances of upstream repositories are
// stored), and a ProjectAnalyzer that is used to extract manifest and lock
// information from source trees.
//
// The returned SourceManager aggressively caches information wherever possible.
// If tools need to do preliminary work involving upstream repository analysis
// prior to invoking a solve run, it is recommended that they create this
// SourceManager as early as possible and use it to their ends. That way, the
// solver can benefit from any caches that may have already been warmed.
//
// gps's SourceManager is intended to be threadsafe (if it's not, please file a
// bug!). It should be safe to reuse across concurrent solving runs, even on
// unrelated projects.
func NewSourceManager(an ProjectAnalyzer, cachedir string) (*SourceMgr, error) {
if an == nil {
return nil, fmt.Errorf("a ProjectAnalyzer must be provided to the SourceManager")
}
err := os.MkdirAll(filepath.Join(cachedir, "sources"), 0777)
if err != nil {
return nil, err
}
glpath := filepath.Join(cachedir, "sm.lock")
_, err = os.Stat(glpath)
if err == nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("cache lock file %s exists - another process crashed or is still running?", glpath),
}
}
fi, err := os.OpenFile(glpath, os.O_CREATE|os.O_EXCL, 0600) // is 0600 sane for this purpose?
if err != nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("err on attempting to create global cache lock: %s", err),
}
}
return &SourceMgr{
cachedir: cachedir,
lf: fi,
srcs: make(map[string]source),
srcfuts: make(map[string]*unifiedFuture),
an: an,
dxt: pathDeducerTrie(),
rootxt: newProjectRootTrie(),
}, nil
}
// CouldNotCreateLockError describe failure modes in which creating a SourceMgr
// did not succeed because there was an error while attempting to create the
// on-disk lock file.
type CouldNotCreateLockError struct {
Path string
Err error
}
func (e CouldNotCreateLockError) Error() string {
return e.Err.Error()
}
// Release lets go of any locks held by the SourceManager.
func (sm *SourceMgr) Release() {
sm.lf.Close()
os.Remove(filepath.Join(sm.cachedir, "sm.lock"))
}
// AnalyzerInfo reports the name and version of the injected ProjectAnalyzer.
func (sm *SourceMgr) AnalyzerInfo() (name string, version *semver.Version) {
return sm.an.Info()
}
// GetManifestAndLock returns manifest and lock information for the provided
// import path. gps currently requires that projects be rooted at their
// repository root, necessitating that the ProjectIdentifier's ProjectRoot must
// also be a repository root.
//
// The work of producing the manifest and lock is delegated to the injected
// ProjectAnalyzer's DeriveManifestAndLock() method.
func (sm *SourceMgr) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return nil, nil, err
}
return src.getManifestAndLock(id.ProjectRoot, v)
}
// ListPackages parses the tree of the Go packages at and below the ProjectRoot
// of the given ProjectIdentifier, at the given version.
func (sm *SourceMgr) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return PackageTree{}, err
}
return src.listPackages(id.ProjectRoot, v)
}
// ListVersions retrieves a list of the available versions for a given
// repository name.
//
// The list is not sorted; while it may be returned in the order that the
// underlying VCS reports version information, no guarantee is made. It is
// expected that the caller either not care about order, or sort the result
// themselves.
//
// This list is always retrieved from upstream on the first call. Subsequent
// calls will return a cached version of the first call's results. if upstream
// is not accessible (network outage, access issues, or the resource actually
// went away), an error will be returned.
func (sm *SourceMgr) | (id ProjectIdentifier) ([]Version, error) {
src, err := sm.getSourceFor(id)
if err != nil {
// TODO(sdboyer) More-er proper-er errors
return nil, err
}
return src.listVersions()
}
// RevisionPresentIn indicates whether the provided Revision is present in the given
// repository.
func (sm *SourceMgr) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
// TODO(sdboyer) More-er proper-er errors
return false, err
}
return src.revisionPresentIn(r)
}
// SourceExists checks if a repository exists, either upstream or in the cache,
// for the provided ProjectIdentifier.
func (sm *SourceMgr) SourceExists(id ProjectIdentifier) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return false, err
}
return src.checkExistence(existsInCache) || src.checkExistence(existsUpstream), nil
}
// SyncSourceFor will ensure that all local caches and information about a
// source are up to date with any network-acccesible information.
//
// The primary use case for this is prefetching.
func (sm *SourceMgr) SyncSourceFor(id ProjectIdentifier) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.syncLocal()
}
// ExportProject writes out the tree of the provided ProjectIdentifier's
// ProjectRoot, at the provided version, to the provided directory.
func (sm *SourceMgr) ExportProject(id ProjectIdentifier, v Version, to string) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.exportVersionTo(v, to)
}
// DeduceProjectRoot takes an import path and deduces the corresponding
// project/source root.
//
// Note that some import paths may require network activity to correctly
// determine the root of the path, such as, but not limited to, vanity import
// paths. (A special exception is written for gopkg.in to minimize network
// activity, as its behavior is well-structured)
func (sm *SourceMgr) DeduceProjectRoot(ip string) (ProjectRoot, error) {
if prefix, root, has := sm.rootxt.LongestPrefix(ip); has {
// The non-matching tail of the import path could still be malformed.
// Validate just that part, if it exists
if prefix != ip {
// TODO(sdboyer) commented until i find a proper description of how
// to validate an import path
//if !pathvld.MatchString(strings.TrimPrefix(ip, prefix+"/")) {
//return "", fmt.Errorf("%q is not a valid import path", ip)
//}
// There was one, and it validated fine - add it so we don't have to
// revalidate it later
sm.rootxt.Insert(ip, root)
}
return root, nil
}
ft, err := sm.deducePathAndProcess(ip)
if err != nil {
return "", err
}
r, err := ft.rootf()
return ProjectRoot(r), err
}
func (sm *SourceMgr) getSourceFor(id ProjectIdentifier) (source, error) {
//pretty.Println(id.ProjectRoot)
nn := id.netName()
sm.srcmut.RLock()
src, has := sm.srcs[nn]
sm.srcmut.RUnlock()
if has {
return src, nil
}
ft, err := sm.deducePathAndProcess(nn)
if err != nil {
return nil, err
}
// we don't care about the ident here, and the future produced by
// deducePathAndProcess will dedupe with what's in the sm.srcs map
src, _, err = ft.srcf()
return src, err
}
func (sm *SourceMgr) deducePathAndProcess(path string) (*unifiedFuture, error) {
// Check for an already-existing future in the map first
sm.srcfmut.RLock()
ft, exists := sm.srcfuts[path]
sm.srcfmut.RUnlock()
if exists {
return ft, nil
}
// Don't have one - set one up.
df, err := sm.deduceFromPath(path)
if err != nil {
return nil, err
}
sm.srcfmut.Lock()
defer sm.srcfmut.Unlock()
// A bad interleaving could allow two goroutines to make it here for the
// same path, so we have to re-check existence.
if ft, exists = sm.srcfuts[path]; exists {
return ft, nil
}
ft = &unifiedFuture{
rc: make(chan struct{}, 1),
sc: make(chan struct{}, 1),
}
// Rewrap the rootfinding func in another future
var pr string
var rooterr error
// Kick off the func to get root and register it into the rootxt.
rootf := func() {
defer close(ft.rc)
pr, rooterr = df.root()
if rooterr != nil {
// Don't cache errs. This doesn't really hurt the solver, and is
// beneficial for other use cases because it means we don't have to
// expose any kind of controls for clearing caches.
return
}
tpr := ProjectRoot(pr)
sm.rootxt.Insert(pr, tpr)
// It's not harmful if the netname was a URL rather than an
// import path
if pr != path {
// Insert the result into the rootxt twice - once at the
// root itself, so as to catch siblings/relatives, and again
// at the exact provided import path (assuming they were
// different), so that on subsequent calls, exact matches
// can skip the regex above.
sm.rootxt.Insert(path, tpr)
}
}
// If deduction tells us this is slow, do it async in its own goroutine;
// otherwise, we can do it here and give the scheduler a bit of a break.
if df.rslow {
go rootf()
} else {
rootf()
}
// Store a closure bound to the future result on the futTracker.
ft.rootf = func() (string, error) {
<-ft.rc
return pr, rooterr
}
// Root future is handled, now build up the source future.
//
// First, complete the partialSourceFuture with information the sm has about
// our cachedir and analyzer
fut := df.psf(sm.cachedir, sm.an)
// The maybeSource-trying process is always slow, so keep it async here.
var src source
var ident string
var srcerr error
go func() {
defer close(ft.sc)
src, ident, srcerr = fut()
if srcerr != nil {
// Don't cache errs. This doesn't really hurt the solver, and is
// beneficial for other use cases because it means we don't have
// to expose any kind of controls for clearing caches.
return
}
sm.srcmut.Lock()
defer sm.srcmut.Unlock()
// Check to make sure a source hasn't shown up in the meantime, or that
// there wasn't already one at the ident.
var hasi, hasp bool
var srci, srcp source
if ident != "" {
srci, hasi = sm.srcs[ident]
}
srcp, hasp = sm.srcs[path]
// if neither the ident nor the input path have an entry for this src,
// we're in the simple case - write them both in and we're done
if !hasi && !hasp {
sm.srcs[path] = src
if ident != path && ident != "" {
sm.srcs[ident] = src
}
return
}
// Now, the xors.
//
// If already present for ident but not for path, copy ident's src
// to path. This covers cases like a gopkg.in path referring back
// onto a github repository, where something else already explicitly
// looked up that same gh repo.
if hasi && !hasp {
sm.srcs[path] = srci
src = srci
}
// If already present for path but not for ident, do NOT copy path's
// src to ident, but use the returned one instead. Really, this case
// shouldn't occur at all...? But the crucial thing is that the
// path-based one has already discovered what actual ident of source
// they want to use, and changing that arbitrarily would have
// undefined effects.
if hasp && !hasi && ident != "" {
sm.srcs[ident] = src
}
// If both are present, then assume we're good, and use the path one
if hasp && hasi {
// TODO(sdboyer) compare these (somehow? reflect? pointer?) and if they're not the
// same object, panic
src = srcp
}
}()
ft.srcf = func() (source, string, error) {
<-ft.sc
return src, ident, srcerr
}
sm.srcfuts[path] = ft
return ft, nil
}
| ListVersions | identifier_name |
source_manager.go | package gps
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"github.com/Masterminds/semver"
)
// Used to compute a friendly filepath from a URL-shaped input
//
// TODO(sdboyer) this is awful. Right?
var sanitizer = strings.NewReplacer(":", "-", "/", "-", "+", "-")
// A SourceManager is responsible for retrieving, managing, and interrogating
// source repositories. Its primary purpose is to serve the needs of a Solver,
// but it is handy for other purposes, as well.
//
// gps's built-in SourceManager, SourceMgr, is intended to be generic and
// sufficient for any purpose. It provides some additional semantics around the
// methods defined here.
type SourceManager interface {
// SourceExists checks if a repository exists, either upstream or in the
// SourceManager's central repository cache.
SourceExists(ProjectIdentifier) (bool, error)
// SyncSourceFor will attempt to bring all local information about a source
// fully up to date.
SyncSourceFor(ProjectIdentifier) error
// ListVersions retrieves a list of the available versions for a given
// repository name.
ListVersions(ProjectIdentifier) ([]Version, error)
// RevisionPresentIn indicates whether the provided Version is present in
// the given repository.
RevisionPresentIn(ProjectIdentifier, Revision) (bool, error)
// ListPackages parses the tree of the Go packages at or below root of the
// provided ProjectIdentifier, at the provided version.
ListPackages(ProjectIdentifier, Version) (PackageTree, error)
// GetManifestAndLock returns manifest and lock information for the provided
// root import path.
//
// gps currently requires that projects be rooted at their repository root,
// necessitating that the ProjectIdentifier's ProjectRoot must also be a
// repository root.
GetManifestAndLock(ProjectIdentifier, Version) (Manifest, Lock, error)
// ExportProject writes out the tree of the provided import path, at the
// provided version, to the provided directory.
ExportProject(ProjectIdentifier, Version, string) error
// AnalyzerInfo reports the name and version of the logic used to service
// GetManifestAndLock().
AnalyzerInfo() (name string, version *semver.Version)
// DeduceRootProject takes an import path and deduces the corresponding
// project/source root.
DeduceProjectRoot(ip string) (ProjectRoot, error)
}
// A ProjectAnalyzer is responsible for analyzing a given path for Manifest and
// Lock information. Tools relying on gps must implement one.
type ProjectAnalyzer interface {
// Perform analysis of the filesystem tree rooted at path, with the
// root import path importRoot, to determine the project's constraints, as
// indicated by a Manifest and Lock.
DeriveManifestAndLock(path string, importRoot ProjectRoot) (Manifest, Lock, error)
// Report the name and version of this ProjectAnalyzer.
Info() (name string, version *semver.Version)
}
// SourceMgr is the default SourceManager for gps.
//
// There's no (planned) reason why it would need to be reimplemented by other
// tools; control via dependency injection is intended to be sufficient.
type SourceMgr struct {
cachedir string
lf *os.File
srcs map[string]source
srcmut sync.RWMutex
srcfuts map[string]*unifiedFuture
srcfmut sync.RWMutex
an ProjectAnalyzer
dxt deducerTrie
rootxt prTrie
}
type unifiedFuture struct {
rc, sc chan struct{}
rootf stringFuture
srcf sourceFuture
}
var _ SourceManager = &SourceMgr{}
// NewSourceManager produces an instance of gps's built-in SourceManager. It
// takes a cache directory (where local instances of upstream repositories are
// stored), and a ProjectAnalyzer that is used to extract manifest and lock
// information from source trees.
//
// The returned SourceManager aggressively caches information wherever possible.
// If tools need to do preliminary work involving upstream repository analysis
// prior to invoking a solve run, it is recommended that they create this
// SourceManager as early as possible and use it to their ends. That way, the
// solver can benefit from any caches that may have already been warmed.
//
// gps's SourceManager is intended to be threadsafe (if it's not, please file a
// bug!). It should be safe to reuse across concurrent solving runs, even on
// unrelated projects.
func NewSourceManager(an ProjectAnalyzer, cachedir string) (*SourceMgr, error) {
if an == nil {
return nil, fmt.Errorf("a ProjectAnalyzer must be provided to the SourceManager")
}
err := os.MkdirAll(filepath.Join(cachedir, "sources"), 0777)
if err != nil {
return nil, err
}
glpath := filepath.Join(cachedir, "sm.lock")
_, err = os.Stat(glpath)
if err == nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("cache lock file %s exists - another process crashed or is still running?", glpath),
}
}
fi, err := os.OpenFile(glpath, os.O_CREATE|os.O_EXCL, 0600) // is 0600 sane for this purpose?
if err != nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("err on attempting to create global cache lock: %s", err),
}
}
return &SourceMgr{
cachedir: cachedir,
lf: fi,
srcs: make(map[string]source),
srcfuts: make(map[string]*unifiedFuture),
an: an,
dxt: pathDeducerTrie(),
rootxt: newProjectRootTrie(),
}, nil
}
// CouldNotCreateLockError describe failure modes in which creating a SourceMgr
// did not succeed because there was an error while attempting to create the
// on-disk lock file.
type CouldNotCreateLockError struct {
Path string
Err error
}
func (e CouldNotCreateLockError) Error() string {
return e.Err.Error()
}
// Release lets go of any locks held by the SourceManager.
func (sm *SourceMgr) Release() {
sm.lf.Close()
os.Remove(filepath.Join(sm.cachedir, "sm.lock"))
}
// AnalyzerInfo reports the name and version of the injected ProjectAnalyzer.
func (sm *SourceMgr) AnalyzerInfo() (name string, version *semver.Version) {
return sm.an.Info()
}
// GetManifestAndLock returns manifest and lock information for the provided
// import path. gps currently requires that projects be rooted at their
// repository root, necessitating that the ProjectIdentifier's ProjectRoot must
// also be a repository root.
//
// The work of producing the manifest and lock is delegated to the injected
// ProjectAnalyzer's DeriveManifestAndLock() method.
func (sm *SourceMgr) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return nil, nil, err
}
return src.getManifestAndLock(id.ProjectRoot, v)
}
// ListPackages parses the tree of the Go packages at and below the ProjectRoot
// of the given ProjectIdentifier, at the given version.
func (sm *SourceMgr) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return PackageTree{}, err
}
return src.listPackages(id.ProjectRoot, v)
}
// ListVersions retrieves a list of the available versions for a given
// repository name.
//
// The list is not sorted; while it may be returned in the order that the
// underlying VCS reports version information, no guarantee is made. It is
// expected that the caller either not care about order, or sort the result
// themselves.
//
// This list is always retrieved from upstream on the first call. Subsequent
// calls will return a cached version of the first call's results. if upstream
// is not accessible (network outage, access issues, or the resource actually
// went away), an error will be returned.
func (sm *SourceMgr) ListVersions(id ProjectIdentifier) ([]Version, error) {
src, err := sm.getSourceFor(id)
if err != nil |
return src.listVersions()
}
// RevisionPresentIn indicates whether the provided Revision is present in the given
// repository.
func (sm *SourceMgr) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
// TODO(sdboyer) More-er proper-er errors
return false, err
}
return src.revisionPresentIn(r)
}
// SourceExists checks if a repository exists, either upstream or in the cache,
// for the provided ProjectIdentifier.
func (sm *SourceMgr) SourceExists(id ProjectIdentifier) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return false, err
}
return src.checkExistence(existsInCache) || src.checkExistence(existsUpstream), nil
}
// SyncSourceFor will ensure that all local caches and information about a
// source are up to date with any network-acccesible information.
//
// The primary use case for this is prefetching.
func (sm *SourceMgr) SyncSourceFor(id ProjectIdentifier) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.syncLocal()
}
// ExportProject writes out the tree of the provided ProjectIdentifier's
// ProjectRoot, at the provided version, to the provided directory.
func (sm *SourceMgr) ExportProject(id ProjectIdentifier, v Version, to string) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.exportVersionTo(v, to)
}
// DeduceProjectRoot takes an import path and deduces the corresponding
// project/source root.
//
// Note that some import paths may require network activity to correctly
// determine the root of the path, such as, but not limited to, vanity import
// paths. (A special exception is written for gopkg.in to minimize network
// activity, as its behavior is well-structured)
func (sm *SourceMgr) DeduceProjectRoot(ip string) (ProjectRoot, error) {
if prefix, root, has := sm.rootxt.LongestPrefix(ip); has {
// The non-matching tail of the import path could still be malformed.
// Validate just that part, if it exists
if prefix != ip {
// TODO(sdboyer) commented until i find a proper description of how
// to validate an import path
//if !pathvld.MatchString(strings.TrimPrefix(ip, prefix+"/")) {
//return "", fmt.Errorf("%q is not a valid import path", ip)
//}
// There was one, and it validated fine - add it so we don't have to
// revalidate it later
sm.rootxt.Insert(ip, root)
}
return root, nil
}
ft, err := sm.deducePathAndProcess(ip)
if err != nil {
return "", err
}
r, err := ft.rootf()
return ProjectRoot(r), err
}
func (sm *SourceMgr) getSourceFor(id ProjectIdentifier) (source, error) {
//pretty.Println(id.ProjectRoot)
nn := id.netName()
sm.srcmut.RLock()
src, has := sm.srcs[nn]
sm.srcmut.RUnlock()
if has {
return src, nil
}
ft, err := sm.deducePathAndProcess(nn)
if err != nil {
return nil, err
}
// we don't care about the ident here, and the future produced by
// deducePathAndProcess will dedupe with what's in the sm.srcs map
src, _, err = ft.srcf()
return src, err
}
func (sm *SourceMgr) deducePathAndProcess(path string) (*unifiedFuture, error) {
// Check for an already-existing future in the map first
sm.srcfmut.RLock()
ft, exists := sm.srcfuts[path]
sm.srcfmut.RUnlock()
if exists {
return ft, nil
}
// Don't have one - set one up.
df, err := sm.deduceFromPath(path)
if err != nil {
return nil, err
}
sm.srcfmut.Lock()
defer sm.srcfmut.Unlock()
// A bad interleaving could allow two goroutines to make it here for the
// same path, so we have to re-check existence.
if ft, exists = sm.srcfuts[path]; exists {
return ft, nil
}
ft = &unifiedFuture{
rc: make(chan struct{}, 1),
sc: make(chan struct{}, 1),
}
// Rewrap the rootfinding func in another future
var pr string
var rooterr error
// Kick off the func to get root and register it into the rootxt.
rootf := func() {
defer close(ft.rc)
pr, rooterr = df.root()
if rooterr != nil {
// Don't cache errs. This doesn't really hurt the solver, and is
// beneficial for other use cases because it means we don't have to
// expose any kind of controls for clearing caches.
return
}
tpr := ProjectRoot(pr)
sm.rootxt.Insert(pr, tpr)
// It's not harmful if the netname was a URL rather than an
// import path
if pr != path {
// Insert the result into the rootxt twice - once at the
// root itself, so as to catch siblings/relatives, and again
// at the exact provided import path (assuming they were
// different), so that on subsequent calls, exact matches
// can skip the regex above.
sm.rootxt.Insert(path, tpr)
}
}
// If deduction tells us this is slow, do it async in its own goroutine;
// otherwise, we can do it here and give the scheduler a bit of a break.
if df.rslow {
go rootf()
} else {
rootf()
}
// Store a closure bound to the future result on the futTracker.
ft.rootf = func() (string, error) {
<-ft.rc
return pr, rooterr
}
// Root future is handled, now build up the source future.
//
// First, complete the partialSourceFuture with information the sm has about
// our cachedir and analyzer
fut := df.psf(sm.cachedir, sm.an)
// The maybeSource-trying process is always slow, so keep it async here.
var src source
var ident string
var srcerr error
go func() {
defer close(ft.sc)
src, ident, srcerr = fut()
if srcerr != nil {
// Don't cache errs. This doesn't really hurt the solver, and is
// beneficial for other use cases because it means we don't have
// to expose any kind of controls for clearing caches.
return
}
sm.srcmut.Lock()
defer sm.srcmut.Unlock()
// Check to make sure a source hasn't shown up in the meantime, or that
// there wasn't already one at the ident.
var hasi, hasp bool
var srci, srcp source
if ident != "" {
srci, hasi = sm.srcs[ident]
}
srcp, hasp = sm.srcs[path]
// if neither the ident nor the input path have an entry for this src,
// we're in the simple case - write them both in and we're done
if !hasi && !hasp {
sm.srcs[path] = src
if ident != path && ident != "" {
sm.srcs[ident] = src
}
return
}
// Now, the xors.
//
// If already present for ident but not for path, copy ident's src
// to path. This covers cases like a gopkg.in path referring back
// onto a github repository, where something else already explicitly
// looked up that same gh repo.
if hasi && !hasp {
sm.srcs[path] = srci
src = srci
}
// If already present for path but not for ident, do NOT copy path's
// src to ident, but use the returned one instead. Really, this case
// shouldn't occur at all...? But the crucial thing is that the
// path-based one has already discovered what actual ident of source
// they want to use, and changing that arbitrarily would have
// undefined effects.
if hasp && !hasi && ident != "" {
sm.srcs[ident] = src
}
// If both are present, then assume we're good, and use the path one
if hasp && hasi {
// TODO(sdboyer) compare these (somehow? reflect? pointer?) and if they're not the
// same object, panic
src = srcp
}
}()
ft.srcf = func() (source, string, error) {
<-ft.sc
return src, ident, srcerr
}
sm.srcfuts[path] = ft
return ft, nil
}
| {
// TODO(sdboyer) More-er proper-er errors
return nil, err
} | conditional_block |
source_manager.go | package gps
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"github.com/Masterminds/semver"
)
// Used to compute a friendly filepath from a URL-shaped input
//
// TODO(sdboyer) this is awful. Right?
var sanitizer = strings.NewReplacer(":", "-", "/", "-", "+", "-")
// A SourceManager is responsible for retrieving, managing, and interrogating
// source repositories. Its primary purpose is to serve the needs of a Solver,
// but it is handy for other purposes, as well.
//
// gps's built-in SourceManager, SourceMgr, is intended to be generic and
// sufficient for any purpose. It provides some additional semantics around the
// methods defined here.
type SourceManager interface {
// SourceExists checks if a repository exists, either upstream or in the
// SourceManager's central repository cache.
SourceExists(ProjectIdentifier) (bool, error)
// SyncSourceFor will attempt to bring all local information about a source
// fully up to date.
SyncSourceFor(ProjectIdentifier) error
// ListVersions retrieves a list of the available versions for a given
// repository name.
ListVersions(ProjectIdentifier) ([]Version, error)
// RevisionPresentIn indicates whether the provided Version is present in
// the given repository.
RevisionPresentIn(ProjectIdentifier, Revision) (bool, error)
// ListPackages parses the tree of the Go packages at or below root of the
// provided ProjectIdentifier, at the provided version.
ListPackages(ProjectIdentifier, Version) (PackageTree, error)
// GetManifestAndLock returns manifest and lock information for the provided
// root import path.
//
// gps currently requires that projects be rooted at their repository root,
// necessitating that the ProjectIdentifier's ProjectRoot must also be a
// repository root.
GetManifestAndLock(ProjectIdentifier, Version) (Manifest, Lock, error)
// ExportProject writes out the tree of the provided import path, at the
// provided version, to the provided directory.
ExportProject(ProjectIdentifier, Version, string) error
// AnalyzerInfo reports the name and version of the logic used to service
// GetManifestAndLock().
AnalyzerInfo() (name string, version *semver.Version)
// DeduceRootProject takes an import path and deduces the corresponding
// project/source root.
DeduceProjectRoot(ip string) (ProjectRoot, error)
}
// A ProjectAnalyzer is responsible for analyzing a given path for Manifest and
// Lock information. Tools relying on gps must implement one.
type ProjectAnalyzer interface {
// Perform analysis of the filesystem tree rooted at path, with the
// root import path importRoot, to determine the project's constraints, as
// indicated by a Manifest and Lock.
DeriveManifestAndLock(path string, importRoot ProjectRoot) (Manifest, Lock, error)
// Report the name and version of this ProjectAnalyzer.
Info() (name string, version *semver.Version)
}
// SourceMgr is the default SourceManager for gps.
//
// There's no (planned) reason why it would need to be reimplemented by other
// tools; control via dependency injection is intended to be sufficient.
type SourceMgr struct {
cachedir string
lf *os.File
srcs map[string]source
srcmut sync.RWMutex
srcfuts map[string]*unifiedFuture
srcfmut sync.RWMutex
an ProjectAnalyzer
dxt deducerTrie
rootxt prTrie
}
type unifiedFuture struct {
rc, sc chan struct{}
rootf stringFuture
srcf sourceFuture
}
var _ SourceManager = &SourceMgr{}
// NewSourceManager produces an instance of gps's built-in SourceManager. It
// takes a cache directory (where local instances of upstream repositories are
// stored), and a ProjectAnalyzer that is used to extract manifest and lock
// information from source trees.
//
// The returned SourceManager aggressively caches information wherever possible.
// If tools need to do preliminary work involving upstream repository analysis
// prior to invoking a solve run, it is recommended that they create this
// SourceManager as early as possible and use it to their ends. That way, the
// solver can benefit from any caches that may have already been warmed.
//
// gps's SourceManager is intended to be threadsafe (if it's not, please file a
// bug!). It should be safe to reuse across concurrent solving runs, even on
// unrelated projects.
func NewSourceManager(an ProjectAnalyzer, cachedir string) (*SourceMgr, error) {
if an == nil {
return nil, fmt.Errorf("a ProjectAnalyzer must be provided to the SourceManager")
}
err := os.MkdirAll(filepath.Join(cachedir, "sources"), 0777)
if err != nil {
return nil, err
}
glpath := filepath.Join(cachedir, "sm.lock")
_, err = os.Stat(glpath)
if err == nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("cache lock file %s exists - another process crashed or is still running?", glpath),
}
}
fi, err := os.OpenFile(glpath, os.O_CREATE|os.O_EXCL, 0600) // is 0600 sane for this purpose?
if err != nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("err on attempting to create global cache lock: %s", err),
}
}
return &SourceMgr{
cachedir: cachedir,
lf: fi,
srcs: make(map[string]source),
srcfuts: make(map[string]*unifiedFuture),
an: an,
dxt: pathDeducerTrie(),
rootxt: newProjectRootTrie(),
}, nil
}
// CouldNotCreateLockError describe failure modes in which creating a SourceMgr
// did not succeed because there was an error while attempting to create the
// on-disk lock file.
type CouldNotCreateLockError struct {
Path string
Err error
}
func (e CouldNotCreateLockError) Error() string {
return e.Err.Error()
}
// Release lets go of any locks held by the SourceManager.
func (sm *SourceMgr) Release() {
sm.lf.Close()
os.Remove(filepath.Join(sm.cachedir, "sm.lock"))
}
// AnalyzerInfo reports the name and version of the injected ProjectAnalyzer.
func (sm *SourceMgr) AnalyzerInfo() (name string, version *semver.Version) {
return sm.an.Info()
}
// GetManifestAndLock returns manifest and lock information for the provided
// import path. gps currently requires that projects be rooted at their
// repository root, necessitating that the ProjectIdentifier's ProjectRoot must
// also be a repository root.
//
// The work of producing the manifest and lock is delegated to the injected
// ProjectAnalyzer's DeriveManifestAndLock() method.
func (sm *SourceMgr) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return nil, nil, err
}
return src.getManifestAndLock(id.ProjectRoot, v)
}
// ListPackages parses the tree of the Go packages at and below the ProjectRoot
// of the given ProjectIdentifier, at the given version.
func (sm *SourceMgr) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) |
// ListVersions retrieves a list of the available versions for a given
// repository name.
//
// The list is not sorted; while it may be returned in the order that the
// underlying VCS reports version information, no guarantee is made. It is
// expected that the caller either not care about order, or sort the result
// themselves.
//
// This list is always retrieved from upstream on the first call. Subsequent
// calls will return a cached version of the first call's results. if upstream
// is not accessible (network outage, access issues, or the resource actually
// went away), an error will be returned.
func (sm *SourceMgr) ListVersions(id ProjectIdentifier) ([]Version, error) {
src, err := sm.getSourceFor(id)
if err != nil {
// TODO(sdboyer) More-er proper-er errors
return nil, err
}
return src.listVersions()
}
// RevisionPresentIn indicates whether the provided Revision is present in the given
// repository.
func (sm *SourceMgr) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
// TODO(sdboyer) More-er proper-er errors
return false, err
}
return src.revisionPresentIn(r)
}
// SourceExists checks if a repository exists, either upstream or in the cache,
// for the provided ProjectIdentifier.
func (sm *SourceMgr) SourceExists(id ProjectIdentifier) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return false, err
}
return src.checkExistence(existsInCache) || src.checkExistence(existsUpstream), nil
}
// SyncSourceFor will ensure that all local caches and information about a
// source are up to date with any network-acccesible information.
//
// The primary use case for this is prefetching.
func (sm *SourceMgr) SyncSourceFor(id ProjectIdentifier) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.syncLocal()
}
// ExportProject writes out the tree of the provided ProjectIdentifier's
// ProjectRoot, at the provided version, to the provided directory.
func (sm *SourceMgr) ExportProject(id ProjectIdentifier, v Version, to string) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.exportVersionTo(v, to)
}
// DeduceProjectRoot takes an import path and deduces the corresponding
// project/source root.
//
// Note that some import paths may require network activity to correctly
// determine the root of the path, such as, but not limited to, vanity import
// paths. (A special exception is written for gopkg.in to minimize network
// activity, as its behavior is well-structured)
func (sm *SourceMgr) DeduceProjectRoot(ip string) (ProjectRoot, error) {
if prefix, root, has := sm.rootxt.LongestPrefix(ip); has {
// The non-matching tail of the import path could still be malformed.
// Validate just that part, if it exists
if prefix != ip {
// TODO(sdboyer) commented until i find a proper description of how
// to validate an import path
//if !pathvld.MatchString(strings.TrimPrefix(ip, prefix+"/")) {
//return "", fmt.Errorf("%q is not a valid import path", ip)
//}
// There was one, and it validated fine - add it so we don't have to
// revalidate it later
sm.rootxt.Insert(ip, root)
}
return root, nil
}
ft, err := sm.deducePathAndProcess(ip)
if err != nil {
return "", err
}
r, err := ft.rootf()
return ProjectRoot(r), err
}
func (sm *SourceMgr) getSourceFor(id ProjectIdentifier) (source, error) {
//pretty.Println(id.ProjectRoot)
nn := id.netName()
sm.srcmut.RLock()
src, has := sm.srcs[nn]
sm.srcmut.RUnlock()
if has {
return src, nil
}
ft, err := sm.deducePathAndProcess(nn)
if err != nil {
return nil, err
}
// we don't care about the ident here, and the future produced by
// deducePathAndProcess will dedupe with what's in the sm.srcs map
src, _, err = ft.srcf()
return src, err
}
func (sm *SourceMgr) deducePathAndProcess(path string) (*unifiedFuture, error) {
// Check for an already-existing future in the map first
sm.srcfmut.RLock()
ft, exists := sm.srcfuts[path]
sm.srcfmut.RUnlock()
if exists {
return ft, nil
}
// Don't have one - set one up.
df, err := sm.deduceFromPath(path)
if err != nil {
return nil, err
}
sm.srcfmut.Lock()
defer sm.srcfmut.Unlock()
// A bad interleaving could allow two goroutines to make it here for the
// same path, so we have to re-check existence.
if ft, exists = sm.srcfuts[path]; exists {
return ft, nil
}
ft = &unifiedFuture{
rc: make(chan struct{}, 1),
sc: make(chan struct{}, 1),
}
// Rewrap the rootfinding func in another future
var pr string
var rooterr error
// Kick off the func to get root and register it into the rootxt.
rootf := func() {
defer close(ft.rc)
pr, rooterr = df.root()
if rooterr != nil {
// Don't cache errs. This doesn't really hurt the solver, and is
// beneficial for other use cases because it means we don't have to
// expose any kind of controls for clearing caches.
return
}
tpr := ProjectRoot(pr)
sm.rootxt.Insert(pr, tpr)
// It's not harmful if the netname was a URL rather than an
// import path
if pr != path {
// Insert the result into the rootxt twice - once at the
// root itself, so as to catch siblings/relatives, and again
// at the exact provided import path (assuming they were
// different), so that on subsequent calls, exact matches
// can skip the regex above.
sm.rootxt.Insert(path, tpr)
}
}
// If deduction tells us this is slow, do it async in its own goroutine;
// otherwise, we can do it here and give the scheduler a bit of a break.
if df.rslow {
go rootf()
} else {
rootf()
}
// Store a closure bound to the future result on the futTracker.
ft.rootf = func() (string, error) {
<-ft.rc
return pr, rooterr
}
// Root future is handled, now build up the source future.
//
// First, complete the partialSourceFuture with information the sm has about
// our cachedir and analyzer
fut := df.psf(sm.cachedir, sm.an)
// The maybeSource-trying process is always slow, so keep it async here.
var src source
var ident string
var srcerr error
go func() {
defer close(ft.sc)
src, ident, srcerr = fut()
if srcerr != nil {
// Don't cache errs. This doesn't really hurt the solver, and is
// beneficial for other use cases because it means we don't have
// to expose any kind of controls for clearing caches.
return
}
sm.srcmut.Lock()
defer sm.srcmut.Unlock()
// Check to make sure a source hasn't shown up in the meantime, or that
// there wasn't already one at the ident.
var hasi, hasp bool
var srci, srcp source
if ident != "" {
srci, hasi = sm.srcs[ident]
}
srcp, hasp = sm.srcs[path]
// if neither the ident nor the input path have an entry for this src,
// we're in the simple case - write them both in and we're done
if !hasi && !hasp {
sm.srcs[path] = src
if ident != path && ident != "" {
sm.srcs[ident] = src
}
return
}
// Now, the xors.
//
// If already present for ident but not for path, copy ident's src
// to path. This covers cases like a gopkg.in path referring back
// onto a github repository, where something else already explicitly
// looked up that same gh repo.
if hasi && !hasp {
sm.srcs[path] = srci
src = srci
}
// If already present for path but not for ident, do NOT copy path's
// src to ident, but use the returned one instead. Really, this case
// shouldn't occur at all...? But the crucial thing is that the
// path-based one has already discovered what actual ident of source
// they want to use, and changing that arbitrarily would have
// undefined effects.
if hasp && !hasi && ident != "" {
sm.srcs[ident] = src
}
// If both are present, then assume we're good, and use the path one
if hasp && hasi {
// TODO(sdboyer) compare these (somehow? reflect? pointer?) and if they're not the
// same object, panic
src = srcp
}
}()
ft.srcf = func() (source, string, error) {
<-ft.sc
return src, ident, srcerr
}
sm.srcfuts[path] = ft
return ft, nil
}
| {
src, err := sm.getSourceFor(id)
if err != nil {
return PackageTree{}, err
}
return src.listPackages(id.ProjectRoot, v)
} | identifier_body |
detailed-titanic-analysis-and-solution.py | #!/usr/bin/env python
# coding: utf-8
# Titanic is one of the classical problems in machine learning. There are many solutions with different approaches out there, so here is my take on this problem. I tried to explain every step as detailed as I could, too, so if you're new to ML, this notebook may be helpful for you.
#
# My solution scored 0.79425. If you have noticed any mistakes or if you have any suggestions, you are more than welcome to leave a comment down below.
#
# With that being said, let's start with importing libraries that we'll need and take a peek at the data:
# In[ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().magic(u'matplotlib inline')
# In[ ]:
filePath = "../input/train.csv"
train = pd.read_csv(filePath)
filePath = "../input/test.csv"
test = pd.read_csv(filePath)
# In[ ]:
train.head()
# At the first glance we can already tell that some data is missing.
#
# First, let's see how much data do we actually miss:
# In[ ]:
plt.figure(figsize=(14, 12))
# don't forget to set titles
plt.subplot(211)
sns.heatmap(train.isnull(), yticklabels=False)
plt.subplot(212)
sns.heatmap(test.isnull(), yticklabels=False)
# As we can see, both in both test and train datasets we miss quite a lot of values. Some data like Age and Embarked may be filled out, but the Cabin column misses so much values that it can't really be used as a feature. It can be transformed or substituted, but we will do that later.
#
# Now lets focus on the data in details and see if there are any noticeable correlations.
# # Initial data exploration
# The first thing we need to explore how survivability depends on different factors, such as Sex, Age (younger people are more fit), Passenger Class (possible higher class priority), and Number of Spouses/Siblings
#
# Let's explore how survivability depends on these features and if there are any correlation between them.
# In[ ]:
plt.figure(figsize=(14, 12))
plt.subplot(321)
sns.countplot('Survived', data=train)
plt.subplot(322)
sns.countplot('Sex', data=train, hue='Survived')
plt.subplot(323)
sns.distplot(train['Age'].dropna(), bins=25)
plt.subplot(324)
sns.countplot('Pclass', data=train, hue='Survived')
plt.subplot(325)
sns.countplot('SibSp', data=train)
plt.subplot(326)
sns.countplot('Parch', data=train)
# From these plots we can make several conclusions:
#
# * most people didn't survive the crash.
# * most passengers were males
# * survivability of women was much higher than of men. We will have to explore the Sex feature more later and see if there are any other interesting correlations.
# * most passengers were middle aged, but there were also quite a few children aboard
# * most passeners had the third class tickets
# * survivability of first and second class passengers were higher compared to the third class
# * most passengers traveled alone or with one sibling/spouse
#
# Now we can take a look at each fature specifically to see if it depends on something else or if there ...
# # Filling in the missing data
# Okay, we could jump into full exploration and maybe even transformation of the data, but as we saw before, we miss quite a lot of data. The easiest aproach would be simply dropping all the missing values, be in this case we risk to lose accuracy of our models or entire features.
#
# Instead, we will try to fill the missing values based on some logic. Let's take a look at the training data once again to see which values do we miss
# In[ ]:
plt.figure(figsize=(10,8))
sns.heatmap(train.isnull(), yticklabels=False)
# In current state the train data misses Age, Cabin, and Embarked values. Unfortunatelly, the Cabin column is missing most of its data and we can't really use it as a feature. However, it is not entirely useless, but I'll leave it for later.
#
# Age column can be filled in many ways. For example, we could take a look at the mean age of every passenger class and fill it based on that information. But instead, if we take a look at the names of the passengers, we can notice a information that can help us:
# In[ ]:
train.head()
# Every name has a title (such as Mr., Miss., ets.) and follows the following pattern: Last_Name, Title. First_Name. We can categorise passengers by their titles and set unknown age values to mean value of a corresponding title.
#
# We will do so by adding a column called 'Title' to the data and fill it out with a new funciton.
# In[ ]:
def get_title(pasngr_name):
|
# In[ ]:
train['Title'] = train['Name'].apply(get_title)
test['Title'] = test['Name'].apply(get_title)
# In[ ]:
plt.figure(figsize=(16, 10))
sns.boxplot('Title', 'Age', data=train)
# Now that we have all the titles, we can find out a mean value for each of them and use it to fill the gaps in the data.
# In[ ]:
train.Title.unique()
# In[ ]:
age_by_title = train.groupby('Title')['Age'].mean()
print(age_by_title)
# In[ ]:
def fill_missing_ages(cols):
age = cols[0]
titles = cols[1]
if pd.isnull(age):
return age_by_title[titles]
else:
return age
# In[ ]:
train['Age'] = train[['Age', 'Title']].apply(fill_missing_ages, axis=1)
test['Age'] = test[['Age', 'Title']].apply(fill_missing_ages, axis=1)
#and one Fare value in the test set
test['Fare'].fillna(test['Fare'].mean(), inplace = True)
plt.figure(figsize=(14, 12))
plt.subplot(211)
sns.heatmap(train.isnull(), yticklabels=False)
plt.subplot(212)
sns.heatmap(test.isnull(), yticklabels=False)
# Okay, now we have the Age column filled entirely. There are still missing values in Cabin and Embarked columns. Unfortunatelly, we miss so much data in Cabin that it would be impossible to fill it as we did with Age, but we are not going to get rid of it for now, it will be usefull for us later.
#
# In embarked column only one value is missing, so we can set it to the most common value.
# In[ ]:
sns.countplot('Embarked', data=train)
# In[ ]:
train['Embarked'].fillna('S', inplace=True)
sns.heatmap(train.isnull(), yticklabels=False)
# Now we have patched the missing data and can explore the features and correlations between them without worrying that we may miss something.
# # Detailed exploration
# In this section we will try to explore every possible feature and correlations them. Also, ...
# In[ ]:
plt.figure(figsize=(10,8))
sns.heatmap(train.drop('PassengerId', axis=1).corr(), annot=True)
# Here's a shortened plan that we will follow to evaluate each feature and ...:
# * Age
# * Sex
# * Passenger classes and Fares
# * **(...)**
# ### Age
# The first feature that comes to my mind is Age. The theory is simple: survivability depends on the age of a passenger, old passengers have less chance to survive, younger passengers are more fit, children either not fit enough to survive, or they have higher chances since adults help them
# In[ ]:
plt.figure(figsize=(10, 8))
sns.violinplot('Survived', 'Age', data=train)
# We can already notice that children had better chance to survive, and the majority of casulties were middle aged passengers (which can be explained by the fact that most of the passengers were middle aged).
#
# Let's explore the age, but this time separated by the Sex column.
# In[ ]:
plt.figure(figsize=(10, 8))
sns.violinplot('Sex', 'Age', data=train, hue='Survived', split=True)
# The plot above confirmes our theory for the young boys, but it is rather opposite with young girls: most females under the age of 16 didn't survive. This looks weird at first glance, but maybe it is connected with some other feature.
#
# Let's see if the class had influence on survivability of females.
# In[ ]:
grid = sns.FacetGrid(train, col='Pclass', hue="Survived", size=4)
grid = grid.map(sns.swarmplot, 'Sex', 'Age', order=["female"])
# ### Pclass
# Idea here is pretty straightforward too: the higher the class, the better chance to survive. First, let's take a look at the overall situation:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.countplot('Pclass', data=train, hue='Survived')
# We can already see that the class plays a big role in survivability. Most of third class passengers didn't survive the crash, second class had 50/50 chance, and most of first class passengers survived.
#
# Let's further explore Pclass and try to find any correlations with other features.
#
# If we go back to the correlation heatmap, we will notice that Age and Fare are strongly correlated with Pclass, so they will be our main suspects.
# In[ ]:
plt.figure(figsize=(14, 6))
plt.subplot(121)
sns.barplot('Pclass', 'Fare', data=train)
plt.subplot(122)
sns.barplot('Pclass', 'Age', data=train)
# As expected, these two features indeed are connected with the class. The Fare was rather expected: the higher a class, the more expencive it is.
#
# Age can be explained by the fact that usually older people are wealthier than the younger ones. **(...)**
#
# Here's the overall picture of Fares depending on Ages separated by Classes:
# In[ ]:
sns.lmplot('Age', 'Fare', data=train, hue='Pclass', fit_reg=False, size=7)
# ### Family size
#
# This feature will represent the family size of a passenger. We have information about number of Siblings/Spouses (SibSp) and Parent/Children relationships (Parch). Although it might not be full information about families, we can use it to determine a family size of each passenger by summing these two features.
# In[ ]:
train["FamilySize"] = train["SibSp"] + train["Parch"]
test["FamilySize"] = test["SibSp"] + test["Parch"]
train.head()
# Now let's see how family size affected survivability of passengers:
# In[ ]:
plt.figure(figsize=(14, 6))
plt.subplot(121)
sns.barplot('FamilySize', 'Survived', data=train)
plt.subplot(122)
sns.countplot('FamilySize', data=train, hue='Survived')
# We can notice a curious trend with family size: **(...)**
# In[ ]:
grid = sns.FacetGrid(train, col='Sex', size=6)
grid = grid.map(sns.barplot, 'FamilySize', 'Survived')
# These two plots only confirm our theory. With family size more than 3 survivability drops severely for both women and men. We also should keep in mind while looking at the plots above that women had overall better chances to survive than men.
#
# Let's just check if this trend depends on something else, like Pclass, for example:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.countplot('FamilySize', data=train, hue='Pclass')
# ### Embarked
#
# In[ ]:
sns.countplot('Embarked', data=train, hue='Survived')
# In[ ]:
sns.countplot('Embarked', data=train, hue='Pclass')
# ### Conclusion:
# # Additional features
# Now we've analyzed the data and have an idea of what will be relevant. But before we start building our model, there is one thing we can do to improve it even further.
#
# So far we've worked with features that came with the dataset, but we can also create our own custom features (so far we have FamilySize as a custom, or engineered feature).
# ### Cabin
# Now this is a tricky part. Cabin could be a really important feature, especially if we knew the distribution of cabins on the ship, but we miss so much data that there is almost no practical value in the feature itself. However, there is one trick we can do with it.
#
# Let's create a new feature called CabinKnown that represents if a cabin of a certain passenger is known or not. Our theory here is that if the cabin is known, then probably that passenger survived.
# In[ ]:
def has_cabin(pasngr_cabin):
if pd.isnull(pasngr_cabin):
return 0
else:
return 1
train['CabinKnown'] = train['Cabin'].apply(has_cabin)
test['CabinKnown'] = test['Cabin'].apply(has_cabin)
sns.countplot('CabinKnown', data=train, hue='Survived')
# Clearly, the corelation here is strong: the survivability rate of those passengers, whose cabin is known is 2:1, while situation in case the cabin is unknown is opposite. This would be a very useful feature to have.
#
# But there is one problem with this feature. In real life, we wouldn't know in advance whether a cabin would be known or not (we can't know an outcome before an event happened). That's why this feature is rather "artificial". Sure, it can improve the score of our model for this competition, but using it is kinda cheating.
#
# **(decide what u wanna do with that feature and finish the description)**
# ### Age categories
#
# ** * (explain why categories) * **
#
# Let's start with Age. The most logical way is to devide age into age categories: young, adult, and elder. Let's say that passenger of the age of 16 and younger are children, older than 50 are elder, and anyone else is adult.
# In[ ]:
def get_age_categories(age):
if(age <= 16):
return 'child'
elif(age > 16 and age <= 50):
return 'adult'
else:
return 'elder'
train['AgeCategory'] = train['Age'].apply(get_age_categories)
test['AgeCategory'] = test['Age'].apply(get_age_categories)
# In[ ]:
sns.countplot('AgeCategory', data=train, hue='Survived')
# ** (...) **
# ### Family size category
#
# Now lets do the same for the family size: we will separate it into TraveledAlone, WithFamily, and WithLargeFamily (bigger than 3, where the survivability rate changes the most)
# In[ ]:
def get_family_category(family_size):
if(family_size > 3):
return 'WithLargeFamily'
elif(family_size > 0 and family_size<= 3):
return 'WithFamily'
else:
return 'TraveledAlone'
train['FamilyCategory'] = train['FamilySize'].apply(get_family_category)
test['FamilyCategory'] = test['FamilySize'].apply(get_family_category)
# ** (needs a description depending on whether it will be included or not) **
# ### Title category
# In[ ]:
print(train.Title.unique())
# In[ ]:
plt.figure(figsize=(12, 10))
sns.countplot('Title', data=train)
# In[ ]:
titles_to_cats = {
'HighClass': ['Lady.', 'Sir.'],
'MiddleClass': ['Mr.', 'Mrs.'],
'LowClass': []
}
# ### Fare scaling
#
# If we take a look at the Fare distribution, we will see that it is scattered a lot:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.distplot(train['Fare'])
# # Creating the model:
# Now that we have all the data we need, we can start building the model.
#
# First of all, we need to prepare the data for the actual model. Classification algorithms work only with numbers or True/False values. For example, model can't tell the difference in Sex at the moment because we have text in that field. What we can do is transform the values of this feature into True or False (IsMale = True for males and IsMale = False for women).
#
# For this purpose we will use two methods: transofrmation data into numerical values and dummies.
#
# Lets start with Sex and transformation:
# In[ ]:
train['Sex'] = train['Sex'].astype('category').cat.codes
test['Sex'] = test['Sex'].astype('category').cat.codes
train[['Name', 'Sex']].head()
# As we see, the Sex column is now binary and takes 1 for males and 0 for females. Now classifiers will be able to work with it.
#
# Now we will transform Embarked column, but with a different method:
# In[ ]:
embarkedCat = pd.get_dummies(train['Embarked'])
train = pd.concat([train, embarkedCat], axis=1)
train.drop('Embarked', axis=1, inplace=True)
embarkedCat = pd.get_dummies(test['Embarked'])
test = pd.concat([test, embarkedCat], axis=1)
test.drop('Embarked', axis=1, inplace=True)
train[['Q', 'S', 'C']].head()
# We used dummies, which replaced the Embarked column with three new columns corresponding to the values in the old column. Lets do the same for family size and age categories:
# In[ ]:
# for the train set
familyCat = pd.get_dummies(train['FamilyCategory'])
train = pd.concat([train, familyCat], axis=1)
train.drop('FamilyCategory', axis=1, inplace=True)
ageCat = pd.get_dummies(train['AgeCategory'])
train = pd.concat([train, ageCat], axis=1)
train.drop('AgeCategory', axis=1, inplace=True)
#and for the test
familyCat = pd.get_dummies(test['FamilyCategory'])
test = pd.concat([test, familyCat], axis=1)
test.drop('FamilyCategory', axis=1, inplace=True)
ageCat = pd.get_dummies(test['AgeCategory'])
test = pd.concat([test, ageCat], axis=1)
test.drop('AgeCategory', axis=1, inplace=True)
# In[ ]:
plt.figure(figsize=(14,12))
sns.heatmap(train.drop('PassengerId', axis=1).corr(), annot=True)
# # Modelling
# Now we need to select a classification algorithm for the model. There are plenty of decent classifiers, but which is the best for this task and which one should we choose?
#
# *Here's the idea:* we will take a bunch of classifiers, test them on the data, and choose the best one.
#
# In order to do that, we will create a list of different classifiers and see how each of them performs on the training data. To select the best one, we will evaluate them using cross-validation and compare their accuracy scores (percentage of the right answers). I decided to use Random Forest, KNN, SVC, Decision Tree, AdaBoost, Gradient Boost, Extremely Randomized Trees, and Logistic Regression.
# In[ ]:
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
classifiers = [
RandomForestClassifier(),
KNeighborsClassifier(),
SVC(),
DecisionTreeClassifier(),
AdaBoostClassifier(),
GradientBoostingClassifier(),
ExtraTreesClassifier(),
LogisticRegression()
]
# Now we need to select the features that will be used in the model and drop everything else. Also, the training data has to be split in two parts: *X_train* is the data the classifiers will be trained on, and *y_train* are the answers.
# In[ ]:
X_train = train.drop(['PassengerId', 'Survived', 'SibSp', 'Parch', 'Ticket', 'Name', 'Cabin', 'Title', 'FamilySize'], axis=1)
y_train = train['Survived']
X_final = test.drop(['PassengerId', 'SibSp', 'Parch', 'Ticket', 'Name', 'Cabin', 'Title', 'FamilySize'], axis=1)
# We will use K-Folds as cross-validation. It splits the data into "folds", ** (...) **
# In[ ]:
from sklearn.model_selection import KFold
# n_splits=5
cv_kfold = KFold(n_splits=10)
# Now we evaluate each of the classifiers from the list using K-Folds. The accuracy scores will be stored in a list.
#
# The problem is that K-Folds evaluates each algorithm several times. As result, we will have a list of arrays with scores for each classifier, which is not great for comparison.
#
# To fix it, we will create another list of means of scores for each classifier. That way it will be much easier to compare the algorithms and select the best one.
# In[ ]:
from sklearn.model_selection import cross_val_score
class_scores = []
for classifier in classifiers:
class_scores.append(cross_val_score(classifier, X_train, y_train, scoring='accuracy', cv=cv_kfold))
class_mean_scores = []
for score in class_scores:
class_mean_scores.append(score.mean())
# Now that we have the mean accuracy scores, we need to compare them somehow. But since it's just a list of numbers, we can easily plot them. First, let's create a data frame of classifiers names and their scores, and then plot it:
# In[ ]:
scores_df = pd.DataFrame({
'Classifier':['Random Forest', 'KNeighbors', 'SVC', 'DecisionTreeClassifier', 'AdaBoostClassifier',
'GradientBoostingClassifier', 'ExtraTreesClassifier', 'LogisticRegression'],
'Scores': class_mean_scores
})
print(scores_df)
sns.factorplot('Scores', 'Classifier', data=scores_df, size=6)
# Two best classifiers happened to be Gradient Boost and Logistic Regression. Since Logistic Regression got sligthly lower score and is rather easily overfitted, we will use Gradient Boost.
# ### Selecting the parameters
# Now that we've chosen the algorithm, we need to select the best parameters for it. There are many options, and sometimes it's almost impossible to know the best set of parameters. That's why we will use Grid Search to test out different options and choose the best ones.
#
# But first let's take a look at all the possible parameters of Gradient Boosting classifier:
# In[ ]:
g_boost = GradientBoostingClassifier()
g_boost.get_params().keys()
# We will test different options for min_samples_leaf, min_samples_split, max_depth, and loss parameters. I will set n_estimators to 100, but it can be increased since Gradient Boosting algorithms generally don't tend to overfit.
# In[ ]:
from sklearn.model_selection import GridSearchCV
param_grid = {
'loss': ['deviance', 'exponential'],
'min_samples_leaf': [2, 5, 10],
'min_samples_split': [2, 5, 10],
'n_estimators': [100],
'max_depth': [3, 5, 10, 20]
}
grid_cv = GridSearchCV(g_boost, param_grid, scoring='accuracy', cv=cv_kfold)
grid_cv.fit(X_train, y_train)
grid_cv.best_estimator_
# In[ ]:
print(grid_cv.best_score_)
print(grid_cv.best_params_)
# Now that we have the best parameters we could find, it's time to create and train the model on the training data.
# In[ ]:
g_boost = GradientBoostingClassifier(min_samples_split=5, loss='deviance', n_estimators=1000,
max_depth=3, min_samples_leaf=2)
# In[ ]:
g_boost.fit(X_train, y_train)
# In[ ]:
feature_values = pd.DataFrame({
'Feature': X_final.columns,
'Importance': g_boost.feature_importances_
})
print(feature_values)
sns.factorplot('Importance', 'Feature', data=feature_values, size=6)
# ### Prediction on the testing set and output
# Now our model is ready, and we can make a prediction on the testing set and create a .csv output for submission.
# In[ ]:
prediction = g_boost.predict(X_final)
# In[ ]:
submission = pd.DataFrame({
'PassengerId': test['PassengerId'],
'Survived': prediction
})
# In[ ]:
#submission.to_csv('submission.csv', index=False)
| index_1 = pasngr_name.find(', ') + 2
index_2 = pasngr_name.find('. ') + 1
return pasngr_name[index_1:index_2] | identifier_body |
detailed-titanic-analysis-and-solution.py | #!/usr/bin/env python
# coding: utf-8
# Titanic is one of the classical problems in machine learning. There are many solutions with different approaches out there, so here is my take on this problem. I tried to explain every step as detailed as I could, too, so if you're new to ML, this notebook may be helpful for you.
#
# My solution scored 0.79425. If you have noticed any mistakes or if you have any suggestions, you are more than welcome to leave a comment down below.
#
# With that being said, let's start with importing libraries that we'll need and take a peek at the data:
# In[ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().magic(u'matplotlib inline')
# In[ ]:
filePath = "../input/train.csv"
train = pd.read_csv(filePath)
filePath = "../input/test.csv"
test = pd.read_csv(filePath)
# In[ ]:
train.head()
# At the first glance we can already tell that some data is missing.
#
# First, let's see how much data do we actually miss:
# In[ ]:
plt.figure(figsize=(14, 12))
# don't forget to set titles
plt.subplot(211)
sns.heatmap(train.isnull(), yticklabels=False)
plt.subplot(212)
sns.heatmap(test.isnull(), yticklabels=False)
# As we can see, both in both test and train datasets we miss quite a lot of values. Some data like Age and Embarked may be filled out, but the Cabin column misses so much values that it can't really be used as a feature. It can be transformed or substituted, but we will do that later.
#
# Now lets focus on the data in details and see if there are any noticeable correlations.
# # Initial data exploration
# The first thing we need to explore how survivability depends on different factors, such as Sex, Age (younger people are more fit), Passenger Class (possible higher class priority), and Number of Spouses/Siblings
#
# Let's explore how survivability depends on these features and if there are any correlation between them.
# In[ ]:
plt.figure(figsize=(14, 12))
plt.subplot(321)
sns.countplot('Survived', data=train)
plt.subplot(322)
sns.countplot('Sex', data=train, hue='Survived')
plt.subplot(323)
sns.distplot(train['Age'].dropna(), bins=25)
plt.subplot(324)
sns.countplot('Pclass', data=train, hue='Survived')
plt.subplot(325)
sns.countplot('SibSp', data=train)
plt.subplot(326)
sns.countplot('Parch', data=train)
# From these plots we can make several conclusions:
#
# * most people didn't survive the crash.
# * most passengers were males
# * survivability of women was much higher than of men. We will have to explore the Sex feature more later and see if there are any other interesting correlations.
# * most passengers were middle aged, but there were also quite a few children aboard
# * most passeners had the third class tickets
# * survivability of first and second class passengers were higher compared to the third class
# * most passengers traveled alone or with one sibling/spouse
#
# Now we can take a look at each fature specifically to see if it depends on something else or if there ...
# # Filling in the missing data
# Okay, we could jump into full exploration and maybe even transformation of the data, but as we saw before, we miss quite a lot of data. The easiest aproach would be simply dropping all the missing values, be in this case we risk to lose accuracy of our models or entire features.
#
# Instead, we will try to fill the missing values based on some logic. Let's take a look at the training data once again to see which values do we miss
# In[ ]:
plt.figure(figsize=(10,8))
sns.heatmap(train.isnull(), yticklabels=False)
# In current state the train data misses Age, Cabin, and Embarked values. Unfortunatelly, the Cabin column is missing most of its data and we can't really use it as a feature. However, it is not entirely useless, but I'll leave it for later.
#
# Age column can be filled in many ways. For example, we could take a look at the mean age of every passenger class and fill it based on that information. But instead, if we take a look at the names of the passengers, we can notice a information that can help us:
# In[ ]:
train.head()
# Every name has a title (such as Mr., Miss., ets.) and follows the following pattern: Last_Name, Title. First_Name. We can categorise passengers by their titles and set unknown age values to mean value of a corresponding title.
#
# We will do so by adding a column called 'Title' to the data and fill it out with a new funciton.
# In[ ]:
def get_title(pasngr_name):
index_1 = pasngr_name.find(', ') + 2
index_2 = pasngr_name.find('. ') + 1
return pasngr_name[index_1:index_2]
# In[ ]:
train['Title'] = train['Name'].apply(get_title)
test['Title'] = test['Name'].apply(get_title)
# In[ ]:
plt.figure(figsize=(16, 10))
sns.boxplot('Title', 'Age', data=train)
# Now that we have all the titles, we can find out a mean value for each of them and use it to fill the gaps in the data.
# In[ ]:
train.Title.unique()
# In[ ]:
age_by_title = train.groupby('Title')['Age'].mean()
print(age_by_title)
# In[ ]:
def fill_missing_ages(cols):
age = cols[0]
titles = cols[1]
if pd.isnull(age):
return age_by_title[titles]
else:
return age
# In[ ]:
train['Age'] = train[['Age', 'Title']].apply(fill_missing_ages, axis=1)
test['Age'] = test[['Age', 'Title']].apply(fill_missing_ages, axis=1)
#and one Fare value in the test set
test['Fare'].fillna(test['Fare'].mean(), inplace = True)
plt.figure(figsize=(14, 12))
plt.subplot(211)
sns.heatmap(train.isnull(), yticklabels=False)
plt.subplot(212)
sns.heatmap(test.isnull(), yticklabels=False)
# Okay, now we have the Age column filled entirely. There are still missing values in Cabin and Embarked columns. Unfortunatelly, we miss so much data in Cabin that it would be impossible to fill it as we did with Age, but we are not going to get rid of it for now, it will be usefull for us later.
#
# In embarked column only one value is missing, so we can set it to the most common value.
# In[ ]:
sns.countplot('Embarked', data=train)
# In[ ]:
train['Embarked'].fillna('S', inplace=True)
sns.heatmap(train.isnull(), yticklabels=False)
# Now we have patched the missing data and can explore the features and correlations between them without worrying that we may miss something.
# # Detailed exploration
# In this section we will try to explore every possible feature and correlations them. Also, ...
# In[ ]:
plt.figure(figsize=(10,8))
sns.heatmap(train.drop('PassengerId', axis=1).corr(), annot=True)
# Here's a shortened plan that we will follow to evaluate each feature and ...:
# * Age
# * Sex
# * Passenger classes and Fares
# * **(...)**
# ### Age
# The first feature that comes to my mind is Age. The theory is simple: survivability depends on the age of a passenger, old passengers have less chance to survive, younger passengers are more fit, children either not fit enough to survive, or they have higher chances since adults help them
# In[ ]:
plt.figure(figsize=(10, 8))
sns.violinplot('Survived', 'Age', data=train)
# We can already notice that children had better chance to survive, and the majority of casulties were middle aged passengers (which can be explained by the fact that most of the passengers were middle aged).
#
# Let's explore the age, but this time separated by the Sex column.
# In[ ]:
plt.figure(figsize=(10, 8))
sns.violinplot('Sex', 'Age', data=train, hue='Survived', split=True)
# The plot above confirmes our theory for the young boys, but it is rather opposite with young girls: most females under the age of 16 didn't survive. This looks weird at first glance, but maybe it is connected with some other feature.
#
# Let's see if the class had influence on survivability of females.
# In[ ]:
grid = sns.FacetGrid(train, col='Pclass', hue="Survived", size=4)
grid = grid.map(sns.swarmplot, 'Sex', 'Age', order=["female"])
# ### Pclass
# Idea here is pretty straightforward too: the higher the class, the better chance to survive. First, let's take a look at the overall situation:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.countplot('Pclass', data=train, hue='Survived')
# We can already see that the class plays a big role in survivability. Most of third class passengers didn't survive the crash, second class had 50/50 chance, and most of first class passengers survived.
#
# Let's further explore Pclass and try to find any correlations with other features.
#
# If we go back to the correlation heatmap, we will notice that Age and Fare are strongly correlated with Pclass, so they will be our main suspects.
# In[ ]:
plt.figure(figsize=(14, 6))
plt.subplot(121)
sns.barplot('Pclass', 'Fare', data=train)
plt.subplot(122)
sns.barplot('Pclass', 'Age', data=train)
# As expected, these two features indeed are connected with the class. The Fare was rather expected: the higher a class, the more expencive it is.
#
# Age can be explained by the fact that usually older people are wealthier than the younger ones. **(...)**
#
# Here's the overall picture of Fares depending on Ages separated by Classes:
# In[ ]:
sns.lmplot('Age', 'Fare', data=train, hue='Pclass', fit_reg=False, size=7)
# ### Family size
#
# This feature will represent the family size of a passenger. We have information about number of Siblings/Spouses (SibSp) and Parent/Children relationships (Parch). Although it might not be full information about families, we can use it to determine a family size of each passenger by summing these two features.
# In[ ]:
train["FamilySize"] = train["SibSp"] + train["Parch"]
test["FamilySize"] = test["SibSp"] + test["Parch"]
train.head()
# Now let's see how family size affected survivability of passengers:
# In[ ]:
plt.figure(figsize=(14, 6))
plt.subplot(121)
sns.barplot('FamilySize', 'Survived', data=train)
plt.subplot(122)
sns.countplot('FamilySize', data=train, hue='Survived')
# We can notice a curious trend with family size: **(...)**
# In[ ]:
grid = sns.FacetGrid(train, col='Sex', size=6)
grid = grid.map(sns.barplot, 'FamilySize', 'Survived')
# These two plots only confirm our theory. With family size more than 3 survivability drops severely for both women and men. We also should keep in mind while looking at the plots above that women had overall better chances to survive than men.
#
# Let's just check if this trend depends on something else, like Pclass, for example:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.countplot('FamilySize', data=train, hue='Pclass')
# ### Embarked
#
# In[ ]:
sns.countplot('Embarked', data=train, hue='Survived')
# In[ ]:
sns.countplot('Embarked', data=train, hue='Pclass')
# ### Conclusion:
# # Additional features
# Now we've analyzed the data and have an idea of what will be relevant. But before we start building our model, there is one thing we can do to improve it even further.
#
# So far we've worked with features that came with the dataset, but we can also create our own custom features (so far we have FamilySize as a custom, or engineered feature).
# ### Cabin
# Now this is a tricky part. Cabin could be a really important feature, especially if we knew the distribution of cabins on the ship, but we miss so much data that there is almost no practical value in the feature itself. However, there is one trick we can do with it.
#
# Let's create a new feature called CabinKnown that represents if a cabin of a certain passenger is known or not. Our theory here is that if the cabin is known, then probably that passenger survived.
# In[ ]:
def has_cabin(pasngr_cabin):
if pd.isnull(pasngr_cabin):
return 0
else:
return 1
train['CabinKnown'] = train['Cabin'].apply(has_cabin)
test['CabinKnown'] = test['Cabin'].apply(has_cabin)
sns.countplot('CabinKnown', data=train, hue='Survived')
# Clearly, the corelation here is strong: the survivability rate of those passengers, whose cabin is known is 2:1, while situation in case the cabin is unknown is opposite. This would be a very useful feature to have.
#
# But there is one problem with this feature. In real life, we wouldn't know in advance whether a cabin would be known or not (we can't know an outcome before an event happened). That's why this feature is rather "artificial". Sure, it can improve the score of our model for this competition, but using it is kinda cheating.
#
# **(decide what u wanna do with that feature and finish the description)**
# ### Age categories
#
# ** * (explain why categories) * **
#
# Let's start with Age. The most logical way is to devide age into age categories: young, adult, and elder. Let's say that passenger of the age of 16 and younger are children, older than 50 are elder, and anyone else is adult.
# In[ ]:
def get_age_categories(age):
if(age <= 16):
return 'child'
elif(age > 16 and age <= 50):
return 'adult'
else:
return 'elder'
train['AgeCategory'] = train['Age'].apply(get_age_categories)
test['AgeCategory'] = test['Age'].apply(get_age_categories)
# In[ ]:
sns.countplot('AgeCategory', data=train, hue='Survived')
# ** (...) **
# ### Family size category
#
# Now lets do the same for the family size: we will separate it into TraveledAlone, WithFamily, and WithLargeFamily (bigger than 3, where the survivability rate changes the most)
# In[ ]:
def get_family_category(family_size):
if(family_size > 3):
return 'WithLargeFamily'
elif(family_size > 0 and family_size<= 3):
return 'WithFamily'
else:
return 'TraveledAlone'
train['FamilyCategory'] = train['FamilySize'].apply(get_family_category)
test['FamilyCategory'] = test['FamilySize'].apply(get_family_category)
# ** (needs a description depending on whether it will be included or not) **
# ### Title category
# In[ ]:
print(train.Title.unique())
# In[ ]:
plt.figure(figsize=(12, 10))
sns.countplot('Title', data=train)
# In[ ]:
titles_to_cats = {
'HighClass': ['Lady.', 'Sir.'],
'MiddleClass': ['Mr.', 'Mrs.'],
'LowClass': []
}
# ### Fare scaling
#
# If we take a look at the Fare distribution, we will see that it is scattered a lot:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.distplot(train['Fare'])
# # Creating the model:
# Now that we have all the data we need, we can start building the model.
#
# First of all, we need to prepare the data for the actual model. Classification algorithms work only with numbers or True/False values. For example, model can't tell the difference in Sex at the moment because we have text in that field. What we can do is transform the values of this feature into True or False (IsMale = True for males and IsMale = False for women).
#
# For this purpose we will use two methods: transofrmation data into numerical values and dummies.
#
# Lets start with Sex and transformation:
# In[ ]:
train['Sex'] = train['Sex'].astype('category').cat.codes
test['Sex'] = test['Sex'].astype('category').cat.codes
train[['Name', 'Sex']].head()
# As we see, the Sex column is now binary and takes 1 for males and 0 for females. Now classifiers will be able to work with it.
#
# Now we will transform Embarked column, but with a different method:
# In[ ]:
embarkedCat = pd.get_dummies(train['Embarked'])
train = pd.concat([train, embarkedCat], axis=1)
train.drop('Embarked', axis=1, inplace=True)
embarkedCat = pd.get_dummies(test['Embarked'])
test = pd.concat([test, embarkedCat], axis=1)
test.drop('Embarked', axis=1, inplace=True)
train[['Q', 'S', 'C']].head()
# We used dummies, which replaced the Embarked column with three new columns corresponding to the values in the old column. Lets do the same for family size and age categories:
# In[ ]:
# for the train set
familyCat = pd.get_dummies(train['FamilyCategory'])
train = pd.concat([train, familyCat], axis=1)
train.drop('FamilyCategory', axis=1, inplace=True)
ageCat = pd.get_dummies(train['AgeCategory'])
train = pd.concat([train, ageCat], axis=1)
train.drop('AgeCategory', axis=1, inplace=True)
#and for the test
familyCat = pd.get_dummies(test['FamilyCategory'])
test = pd.concat([test, familyCat], axis=1)
test.drop('FamilyCategory', axis=1, inplace=True)
ageCat = pd.get_dummies(test['AgeCategory'])
test = pd.concat([test, ageCat], axis=1)
test.drop('AgeCategory', axis=1, inplace=True)
# In[ ]:
plt.figure(figsize=(14,12))
sns.heatmap(train.drop('PassengerId', axis=1).corr(), annot=True)
# # Modelling
# Now we need to select a classification algorithm for the model. There are plenty of decent classifiers, but which is the best for this task and which one should we choose?
#
# *Here's the idea:* we will take a bunch of classifiers, test them on the data, and choose the best one.
#
# In order to do that, we will create a list of different classifiers and see how each of them performs on the training data. To select the best one, we will evaluate them using cross-validation and compare their accuracy scores (percentage of the right answers). I decided to use Random Forest, KNN, SVC, Decision Tree, AdaBoost, Gradient Boost, Extremely Randomized Trees, and Logistic Regression.
# In[ ]:
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
classifiers = [
RandomForestClassifier(),
KNeighborsClassifier(),
SVC(),
DecisionTreeClassifier(),
AdaBoostClassifier(),
GradientBoostingClassifier(),
ExtraTreesClassifier(),
LogisticRegression()
]
# Now we need to select the features that will be used in the model and drop everything else. Also, the training data has to be split in two parts: *X_train* is the data the classifiers will be trained on, and *y_train* are the answers.
# In[ ]:
X_train = train.drop(['PassengerId', 'Survived', 'SibSp', 'Parch', 'Ticket', 'Name', 'Cabin', 'Title', 'FamilySize'], axis=1)
y_train = train['Survived']
X_final = test.drop(['PassengerId', 'SibSp', 'Parch', 'Ticket', 'Name', 'Cabin', 'Title', 'FamilySize'], axis=1)
# We will use K-Folds as cross-validation. It splits the data into "folds", ** (...) **
# In[ ]:
from sklearn.model_selection import KFold
# n_splits=5
cv_kfold = KFold(n_splits=10)
# Now we evaluate each of the classifiers from the list using K-Folds. The accuracy scores will be stored in a list.
#
# The problem is that K-Folds evaluates each algorithm several times. As result, we will have a list of arrays with scores for each classifier, which is not great for comparison.
#
# To fix it, we will create another list of means of scores for each classifier. That way it will be much easier to compare the algorithms and select the best one.
# In[ ]:
from sklearn.model_selection import cross_val_score
class_scores = []
for classifier in classifiers:
|
class_mean_scores = []
for score in class_scores:
class_mean_scores.append(score.mean())
# Now that we have the mean accuracy scores, we need to compare them somehow. But since it's just a list of numbers, we can easily plot them. First, let's create a data frame of classifiers names and their scores, and then plot it:
# In[ ]:
scores_df = pd.DataFrame({
'Classifier':['Random Forest', 'KNeighbors', 'SVC', 'DecisionTreeClassifier', 'AdaBoostClassifier',
'GradientBoostingClassifier', 'ExtraTreesClassifier', 'LogisticRegression'],
'Scores': class_mean_scores
})
print(scores_df)
sns.factorplot('Scores', 'Classifier', data=scores_df, size=6)
# Two best classifiers happened to be Gradient Boost and Logistic Regression. Since Logistic Regression got sligthly lower score and is rather easily overfitted, we will use Gradient Boost.
# ### Selecting the parameters
# Now that we've chosen the algorithm, we need to select the best parameters for it. There are many options, and sometimes it's almost impossible to know the best set of parameters. That's why we will use Grid Search to test out different options and choose the best ones.
#
# But first let's take a look at all the possible parameters of Gradient Boosting classifier:
# In[ ]:
g_boost = GradientBoostingClassifier()
g_boost.get_params().keys()
# We will test different options for min_samples_leaf, min_samples_split, max_depth, and loss parameters. I will set n_estimators to 100, but it can be increased since Gradient Boosting algorithms generally don't tend to overfit.
# In[ ]:
from sklearn.model_selection import GridSearchCV
param_grid = {
'loss': ['deviance', 'exponential'],
'min_samples_leaf': [2, 5, 10],
'min_samples_split': [2, 5, 10],
'n_estimators': [100],
'max_depth': [3, 5, 10, 20]
}
grid_cv = GridSearchCV(g_boost, param_grid, scoring='accuracy', cv=cv_kfold)
grid_cv.fit(X_train, y_train)
grid_cv.best_estimator_
# In[ ]:
print(grid_cv.best_score_)
print(grid_cv.best_params_)
# Now that we have the best parameters we could find, it's time to create and train the model on the training data.
# In[ ]:
g_boost = GradientBoostingClassifier(min_samples_split=5, loss='deviance', n_estimators=1000,
max_depth=3, min_samples_leaf=2)
# In[ ]:
g_boost.fit(X_train, y_train)
# In[ ]:
feature_values = pd.DataFrame({
'Feature': X_final.columns,
'Importance': g_boost.feature_importances_
})
print(feature_values)
sns.factorplot('Importance', 'Feature', data=feature_values, size=6)
# ### Prediction on the testing set and output
# Now our model is ready, and we can make a prediction on the testing set and create a .csv output for submission.
# In[ ]:
prediction = g_boost.predict(X_final)
# In[ ]:
submission = pd.DataFrame({
'PassengerId': test['PassengerId'],
'Survived': prediction
})
# In[ ]:
#submission.to_csv('submission.csv', index=False)
| class_scores.append(cross_val_score(classifier, X_train, y_train, scoring='accuracy', cv=cv_kfold)) | conditional_block |
detailed-titanic-analysis-and-solution.py | #!/usr/bin/env python
# coding: utf-8
# Titanic is one of the classical problems in machine learning. There are many solutions with different approaches out there, so here is my take on this problem. I tried to explain every step as detailed as I could, too, so if you're new to ML, this notebook may be helpful for you.
#
# My solution scored 0.79425. If you have noticed any mistakes or if you have any suggestions, you are more than welcome to leave a comment down below.
#
# With that being said, let's start with importing libraries that we'll need and take a peek at the data:
# In[ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().magic(u'matplotlib inline')
# In[ ]:
filePath = "../input/train.csv"
train = pd.read_csv(filePath)
filePath = "../input/test.csv"
test = pd.read_csv(filePath)
# In[ ]:
train.head()
# At the first glance we can already tell that some data is missing.
#
# First, let's see how much data do we actually miss:
# In[ ]:
plt.figure(figsize=(14, 12))
# don't forget to set titles
plt.subplot(211)
sns.heatmap(train.isnull(), yticklabels=False)
plt.subplot(212)
sns.heatmap(test.isnull(), yticklabels=False)
# As we can see, both in both test and train datasets we miss quite a lot of values. Some data like Age and Embarked may be filled out, but the Cabin column misses so much values that it can't really be used as a feature. It can be transformed or substituted, but we will do that later.
#
# Now lets focus on the data in details and see if there are any noticeable correlations.
# # Initial data exploration
# The first thing we need to explore how survivability depends on different factors, such as Sex, Age (younger people are more fit), Passenger Class (possible higher class priority), and Number of Spouses/Siblings
#
# Let's explore how survivability depends on these features and if there are any correlation between them.
# In[ ]:
plt.figure(figsize=(14, 12))
plt.subplot(321)
sns.countplot('Survived', data=train)
plt.subplot(322)
sns.countplot('Sex', data=train, hue='Survived')
plt.subplot(323)
sns.distplot(train['Age'].dropna(), bins=25)
plt.subplot(324)
sns.countplot('Pclass', data=train, hue='Survived')
plt.subplot(325)
sns.countplot('SibSp', data=train)
plt.subplot(326)
sns.countplot('Parch', data=train)
# From these plots we can make several conclusions:
#
# * most people didn't survive the crash.
# * most passengers were males
# * survivability of women was much higher than of men. We will have to explore the Sex feature more later and see if there are any other interesting correlations.
# * most passengers were middle aged, but there were also quite a few children aboard
# * most passeners had the third class tickets
# * survivability of first and second class passengers were higher compared to the third class
# * most passengers traveled alone or with one sibling/spouse
#
# Now we can take a look at each fature specifically to see if it depends on something else or if there ...
# # Filling in the missing data
# Okay, we could jump into full exploration and maybe even transformation of the data, but as we saw before, we miss quite a lot of data. The easiest aproach would be simply dropping all the missing values, be in this case we risk to lose accuracy of our models or entire features.
#
# Instead, we will try to fill the missing values based on some logic. Let's take a look at the training data once again to see which values do we miss
# In[ ]:
plt.figure(figsize=(10,8))
sns.heatmap(train.isnull(), yticklabels=False)
# In current state the train data misses Age, Cabin, and Embarked values. Unfortunatelly, the Cabin column is missing most of its data and we can't really use it as a feature. However, it is not entirely useless, but I'll leave it for later.
#
# Age column can be filled in many ways. For example, we could take a look at the mean age of every passenger class and fill it based on that information. But instead, if we take a look at the names of the passengers, we can notice a information that can help us:
# In[ ]:
train.head()
# Every name has a title (such as Mr., Miss., ets.) and follows the following pattern: Last_Name, Title. First_Name. We can categorise passengers by their titles and set unknown age values to mean value of a corresponding title.
#
# We will do so by adding a column called 'Title' to the data and fill it out with a new funciton.
# In[ ]:
def get_title(pasngr_name):
index_1 = pasngr_name.find(', ') + 2
index_2 = pasngr_name.find('. ') + 1
return pasngr_name[index_1:index_2]
# In[ ]:
train['Title'] = train['Name'].apply(get_title)
test['Title'] = test['Name'].apply(get_title)
# In[ ]:
plt.figure(figsize=(16, 10))
sns.boxplot('Title', 'Age', data=train)
# Now that we have all the titles, we can find out a mean value for each of them and use it to fill the gaps in the data.
# In[ ]:
train.Title.unique()
# In[ ]:
age_by_title = train.groupby('Title')['Age'].mean()
print(age_by_title)
# In[ ]:
def | (cols):
age = cols[0]
titles = cols[1]
if pd.isnull(age):
return age_by_title[titles]
else:
return age
# In[ ]:
train['Age'] = train[['Age', 'Title']].apply(fill_missing_ages, axis=1)
test['Age'] = test[['Age', 'Title']].apply(fill_missing_ages, axis=1)
#and one Fare value in the test set
test['Fare'].fillna(test['Fare'].mean(), inplace = True)
plt.figure(figsize=(14, 12))
plt.subplot(211)
sns.heatmap(train.isnull(), yticklabels=False)
plt.subplot(212)
sns.heatmap(test.isnull(), yticklabels=False)
# Okay, now we have the Age column filled entirely. There are still missing values in Cabin and Embarked columns. Unfortunatelly, we miss so much data in Cabin that it would be impossible to fill it as we did with Age, but we are not going to get rid of it for now, it will be usefull for us later.
#
# In embarked column only one value is missing, so we can set it to the most common value.
# In[ ]:
sns.countplot('Embarked', data=train)
# In[ ]:
train['Embarked'].fillna('S', inplace=True)
sns.heatmap(train.isnull(), yticklabels=False)
# Now we have patched the missing data and can explore the features and correlations between them without worrying that we may miss something.
# # Detailed exploration
# In this section we will try to explore every possible feature and correlations them. Also, ...
# In[ ]:
plt.figure(figsize=(10,8))
sns.heatmap(train.drop('PassengerId', axis=1).corr(), annot=True)
# Here's a shortened plan that we will follow to evaluate each feature and ...:
# * Age
# * Sex
# * Passenger classes and Fares
# * **(...)**
# ### Age
# The first feature that comes to my mind is Age. The theory is simple: survivability depends on the age of a passenger, old passengers have less chance to survive, younger passengers are more fit, children either not fit enough to survive, or they have higher chances since adults help them
# In[ ]:
plt.figure(figsize=(10, 8))
sns.violinplot('Survived', 'Age', data=train)
# We can already notice that children had better chance to survive, and the majority of casulties were middle aged passengers (which can be explained by the fact that most of the passengers were middle aged).
#
# Let's explore the age, but this time separated by the Sex column.
# In[ ]:
plt.figure(figsize=(10, 8))
sns.violinplot('Sex', 'Age', data=train, hue='Survived', split=True)
# The plot above confirmes our theory for the young boys, but it is rather opposite with young girls: most females under the age of 16 didn't survive. This looks weird at first glance, but maybe it is connected with some other feature.
#
# Let's see if the class had influence on survivability of females.
# In[ ]:
grid = sns.FacetGrid(train, col='Pclass', hue="Survived", size=4)
grid = grid.map(sns.swarmplot, 'Sex', 'Age', order=["female"])
# ### Pclass
# Idea here is pretty straightforward too: the higher the class, the better chance to survive. First, let's take a look at the overall situation:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.countplot('Pclass', data=train, hue='Survived')
# We can already see that the class plays a big role in survivability. Most of third class passengers didn't survive the crash, second class had 50/50 chance, and most of first class passengers survived.
#
# Let's further explore Pclass and try to find any correlations with other features.
#
# If we go back to the correlation heatmap, we will notice that Age and Fare are strongly correlated with Pclass, so they will be our main suspects.
# In[ ]:
plt.figure(figsize=(14, 6))
plt.subplot(121)
sns.barplot('Pclass', 'Fare', data=train)
plt.subplot(122)
sns.barplot('Pclass', 'Age', data=train)
# As expected, these two features indeed are connected with the class. The Fare was rather expected: the higher a class, the more expencive it is.
#
# Age can be explained by the fact that usually older people are wealthier than the younger ones. **(...)**
#
# Here's the overall picture of Fares depending on Ages separated by Classes:
# In[ ]:
sns.lmplot('Age', 'Fare', data=train, hue='Pclass', fit_reg=False, size=7)
# ### Family size
#
# This feature will represent the family size of a passenger. We have information about number of Siblings/Spouses (SibSp) and Parent/Children relationships (Parch). Although it might not be full information about families, we can use it to determine a family size of each passenger by summing these two features.
# In[ ]:
train["FamilySize"] = train["SibSp"] + train["Parch"]
test["FamilySize"] = test["SibSp"] + test["Parch"]
train.head()
# Now let's see how family size affected survivability of passengers:
# In[ ]:
plt.figure(figsize=(14, 6))
plt.subplot(121)
sns.barplot('FamilySize', 'Survived', data=train)
plt.subplot(122)
sns.countplot('FamilySize', data=train, hue='Survived')
# We can notice a curious trend with family size: **(...)**
# In[ ]:
grid = sns.FacetGrid(train, col='Sex', size=6)
grid = grid.map(sns.barplot, 'FamilySize', 'Survived')
# These two plots only confirm our theory. With family size more than 3 survivability drops severely for both women and men. We also should keep in mind while looking at the plots above that women had overall better chances to survive than men.
#
# Let's just check if this trend depends on something else, like Pclass, for example:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.countplot('FamilySize', data=train, hue='Pclass')
# ### Embarked
#
# In[ ]:
sns.countplot('Embarked', data=train, hue='Survived')
# In[ ]:
sns.countplot('Embarked', data=train, hue='Pclass')
# ### Conclusion:
# # Additional features
# Now we've analyzed the data and have an idea of what will be relevant. But before we start building our model, there is one thing we can do to improve it even further.
#
# So far we've worked with features that came with the dataset, but we can also create our own custom features (so far we have FamilySize as a custom, or engineered feature).
# ### Cabin
# Now this is a tricky part. Cabin could be a really important feature, especially if we knew the distribution of cabins on the ship, but we miss so much data that there is almost no practical value in the feature itself. However, there is one trick we can do with it.
#
# Let's create a new feature called CabinKnown that represents if a cabin of a certain passenger is known or not. Our theory here is that if the cabin is known, then probably that passenger survived.
# In[ ]:
def has_cabin(pasngr_cabin):
if pd.isnull(pasngr_cabin):
return 0
else:
return 1
train['CabinKnown'] = train['Cabin'].apply(has_cabin)
test['CabinKnown'] = test['Cabin'].apply(has_cabin)
sns.countplot('CabinKnown', data=train, hue='Survived')
# Clearly, the corelation here is strong: the survivability rate of those passengers, whose cabin is known is 2:1, while situation in case the cabin is unknown is opposite. This would be a very useful feature to have.
#
# But there is one problem with this feature. In real life, we wouldn't know in advance whether a cabin would be known or not (we can't know an outcome before an event happened). That's why this feature is rather "artificial". Sure, it can improve the score of our model for this competition, but using it is kinda cheating.
#
# **(decide what u wanna do with that feature and finish the description)**
# ### Age categories
#
# ** * (explain why categories) * **
#
# Let's start with Age. The most logical way is to devide age into age categories: young, adult, and elder. Let's say that passenger of the age of 16 and younger are children, older than 50 are elder, and anyone else is adult.
# In[ ]:
def get_age_categories(age):
if(age <= 16):
return 'child'
elif(age > 16 and age <= 50):
return 'adult'
else:
return 'elder'
train['AgeCategory'] = train['Age'].apply(get_age_categories)
test['AgeCategory'] = test['Age'].apply(get_age_categories)
# In[ ]:
sns.countplot('AgeCategory', data=train, hue='Survived')
# ** (...) **
# ### Family size category
#
# Now lets do the same for the family size: we will separate it into TraveledAlone, WithFamily, and WithLargeFamily (bigger than 3, where the survivability rate changes the most)
# In[ ]:
def get_family_category(family_size):
if(family_size > 3):
return 'WithLargeFamily'
elif(family_size > 0 and family_size<= 3):
return 'WithFamily'
else:
return 'TraveledAlone'
train['FamilyCategory'] = train['FamilySize'].apply(get_family_category)
test['FamilyCategory'] = test['FamilySize'].apply(get_family_category)
# ** (needs a description depending on whether it will be included or not) **
# ### Title category
# In[ ]:
print(train.Title.unique())
# In[ ]:
plt.figure(figsize=(12, 10))
sns.countplot('Title', data=train)
# In[ ]:
titles_to_cats = {
'HighClass': ['Lady.', 'Sir.'],
'MiddleClass': ['Mr.', 'Mrs.'],
'LowClass': []
}
# ### Fare scaling
#
# If we take a look at the Fare distribution, we will see that it is scattered a lot:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.distplot(train['Fare'])
# # Creating the model:
# Now that we have all the data we need, we can start building the model.
#
# First of all, we need to prepare the data for the actual model. Classification algorithms work only with numbers or True/False values. For example, model can't tell the difference in Sex at the moment because we have text in that field. What we can do is transform the values of this feature into True or False (IsMale = True for males and IsMale = False for women).
#
# For this purpose we will use two methods: transofrmation data into numerical values and dummies.
#
# Lets start with Sex and transformation:
# In[ ]:
train['Sex'] = train['Sex'].astype('category').cat.codes
test['Sex'] = test['Sex'].astype('category').cat.codes
train[['Name', 'Sex']].head()
# As we see, the Sex column is now binary and takes 1 for males and 0 for females. Now classifiers will be able to work with it.
#
# Now we will transform Embarked column, but with a different method:
# In[ ]:
embarkedCat = pd.get_dummies(train['Embarked'])
train = pd.concat([train, embarkedCat], axis=1)
train.drop('Embarked', axis=1, inplace=True)
embarkedCat = pd.get_dummies(test['Embarked'])
test = pd.concat([test, embarkedCat], axis=1)
test.drop('Embarked', axis=1, inplace=True)
train[['Q', 'S', 'C']].head()
# We used dummies, which replaced the Embarked column with three new columns corresponding to the values in the old column. Lets do the same for family size and age categories:
# In[ ]:
# for the train set
familyCat = pd.get_dummies(train['FamilyCategory'])
train = pd.concat([train, familyCat], axis=1)
train.drop('FamilyCategory', axis=1, inplace=True)
ageCat = pd.get_dummies(train['AgeCategory'])
train = pd.concat([train, ageCat], axis=1)
train.drop('AgeCategory', axis=1, inplace=True)
#and for the test
familyCat = pd.get_dummies(test['FamilyCategory'])
test = pd.concat([test, familyCat], axis=1)
test.drop('FamilyCategory', axis=1, inplace=True)
ageCat = pd.get_dummies(test['AgeCategory'])
test = pd.concat([test, ageCat], axis=1)
test.drop('AgeCategory', axis=1, inplace=True)
# In[ ]:
plt.figure(figsize=(14,12))
sns.heatmap(train.drop('PassengerId', axis=1).corr(), annot=True)
# # Modelling
# Now we need to select a classification algorithm for the model. There are plenty of decent classifiers, but which is the best for this task and which one should we choose?
#
# *Here's the idea:* we will take a bunch of classifiers, test them on the data, and choose the best one.
#
# In order to do that, we will create a list of different classifiers and see how each of them performs on the training data. To select the best one, we will evaluate them using cross-validation and compare their accuracy scores (percentage of the right answers). I decided to use Random Forest, KNN, SVC, Decision Tree, AdaBoost, Gradient Boost, Extremely Randomized Trees, and Logistic Regression.
# In[ ]:
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
classifiers = [
RandomForestClassifier(),
KNeighborsClassifier(),
SVC(),
DecisionTreeClassifier(),
AdaBoostClassifier(),
GradientBoostingClassifier(),
ExtraTreesClassifier(),
LogisticRegression()
]
# Now we need to select the features that will be used in the model and drop everything else. Also, the training data has to be split in two parts: *X_train* is the data the classifiers will be trained on, and *y_train* are the answers.
# In[ ]:
X_train = train.drop(['PassengerId', 'Survived', 'SibSp', 'Parch', 'Ticket', 'Name', 'Cabin', 'Title', 'FamilySize'], axis=1)
y_train = train['Survived']
X_final = test.drop(['PassengerId', 'SibSp', 'Parch', 'Ticket', 'Name', 'Cabin', 'Title', 'FamilySize'], axis=1)
# We will use K-Folds as cross-validation. It splits the data into "folds", ** (...) **
# In[ ]:
from sklearn.model_selection import KFold
# n_splits=5
cv_kfold = KFold(n_splits=10)
# Now we evaluate each of the classifiers from the list using K-Folds. The accuracy scores will be stored in a list.
#
# The problem is that K-Folds evaluates each algorithm several times. As result, we will have a list of arrays with scores for each classifier, which is not great for comparison.
#
# To fix it, we will create another list of means of scores for each classifier. That way it will be much easier to compare the algorithms and select the best one.
# In[ ]:
from sklearn.model_selection import cross_val_score
class_scores = []
for classifier in classifiers:
class_scores.append(cross_val_score(classifier, X_train, y_train, scoring='accuracy', cv=cv_kfold))
class_mean_scores = []
for score in class_scores:
class_mean_scores.append(score.mean())
# Now that we have the mean accuracy scores, we need to compare them somehow. But since it's just a list of numbers, we can easily plot them. First, let's create a data frame of classifiers names and their scores, and then plot it:
# In[ ]:
scores_df = pd.DataFrame({
'Classifier':['Random Forest', 'KNeighbors', 'SVC', 'DecisionTreeClassifier', 'AdaBoostClassifier',
'GradientBoostingClassifier', 'ExtraTreesClassifier', 'LogisticRegression'],
'Scores': class_mean_scores
})
print(scores_df)
sns.factorplot('Scores', 'Classifier', data=scores_df, size=6)
# Two best classifiers happened to be Gradient Boost and Logistic Regression. Since Logistic Regression got sligthly lower score and is rather easily overfitted, we will use Gradient Boost.
# ### Selecting the parameters
# Now that we've chosen the algorithm, we need to select the best parameters for it. There are many options, and sometimes it's almost impossible to know the best set of parameters. That's why we will use Grid Search to test out different options and choose the best ones.
#
# But first let's take a look at all the possible parameters of Gradient Boosting classifier:
# In[ ]:
g_boost = GradientBoostingClassifier()
g_boost.get_params().keys()
# We will test different options for min_samples_leaf, min_samples_split, max_depth, and loss parameters. I will set n_estimators to 100, but it can be increased since Gradient Boosting algorithms generally don't tend to overfit.
# In[ ]:
from sklearn.model_selection import GridSearchCV
param_grid = {
'loss': ['deviance', 'exponential'],
'min_samples_leaf': [2, 5, 10],
'min_samples_split': [2, 5, 10],
'n_estimators': [100],
'max_depth': [3, 5, 10, 20]
}
grid_cv = GridSearchCV(g_boost, param_grid, scoring='accuracy', cv=cv_kfold)
grid_cv.fit(X_train, y_train)
grid_cv.best_estimator_
# In[ ]:
print(grid_cv.best_score_)
print(grid_cv.best_params_)
# Now that we have the best parameters we could find, it's time to create and train the model on the training data.
# In[ ]:
g_boost = GradientBoostingClassifier(min_samples_split=5, loss='deviance', n_estimators=1000,
max_depth=3, min_samples_leaf=2)
# In[ ]:
g_boost.fit(X_train, y_train)
# In[ ]:
feature_values = pd.DataFrame({
'Feature': X_final.columns,
'Importance': g_boost.feature_importances_
})
print(feature_values)
sns.factorplot('Importance', 'Feature', data=feature_values, size=6)
# ### Prediction on the testing set and output
# Now our model is ready, and we can make a prediction on the testing set and create a .csv output for submission.
# In[ ]:
prediction = g_boost.predict(X_final)
# In[ ]:
submission = pd.DataFrame({
'PassengerId': test['PassengerId'],
'Survived': prediction
})
# In[ ]:
#submission.to_csv('submission.csv', index=False)
| fill_missing_ages | identifier_name |
detailed-titanic-analysis-and-solution.py | #!/usr/bin/env python
# coding: utf-8
# Titanic is one of the classical problems in machine learning. There are many solutions with different approaches out there, so here is my take on this problem. I tried to explain every step as detailed as I could, too, so if you're new to ML, this notebook may be helpful for you.
#
# My solution scored 0.79425. If you have noticed any mistakes or if you have any suggestions, you are more than welcome to leave a comment down below.
#
# With that being said, let's start with importing libraries that we'll need and take a peek at the data:
# In[ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().magic(u'matplotlib inline')
# In[ ]:
filePath = "../input/train.csv"
train = pd.read_csv(filePath)
filePath = "../input/test.csv"
test = pd.read_csv(filePath)
# In[ ]:
train.head()
# At the first glance we can already tell that some data is missing.
#
# First, let's see how much data do we actually miss:
# In[ ]:
plt.figure(figsize=(14, 12))
# don't forget to set titles
plt.subplot(211)
sns.heatmap(train.isnull(), yticklabels=False)
plt.subplot(212)
sns.heatmap(test.isnull(), yticklabels=False)
# As we can see, both in both test and train datasets we miss quite a lot of values. Some data like Age and Embarked may be filled out, but the Cabin column misses so much values that it can't really be used as a feature. It can be transformed or substituted, but we will do that later.
#
# Now lets focus on the data in details and see if there are any noticeable correlations.
# # Initial data exploration
# The first thing we need to explore how survivability depends on different factors, such as Sex, Age (younger people are more fit), Passenger Class (possible higher class priority), and Number of Spouses/Siblings
#
# Let's explore how survivability depends on these features and if there are any correlation between them.
# In[ ]:
plt.figure(figsize=(14, 12))
plt.subplot(321)
sns.countplot('Survived', data=train)
plt.subplot(322)
sns.countplot('Sex', data=train, hue='Survived')
plt.subplot(323)
sns.distplot(train['Age'].dropna(), bins=25)
plt.subplot(324)
sns.countplot('Pclass', data=train, hue='Survived')
plt.subplot(325)
sns.countplot('SibSp', data=train)
plt.subplot(326)
sns.countplot('Parch', data=train)
# From these plots we can make several conclusions:
#
# * most people didn't survive the crash.
# * most passengers were males
# * survivability of women was much higher than of men. We will have to explore the Sex feature more later and see if there are any other interesting correlations.
# * most passengers were middle aged, but there were also quite a few children aboard
# * most passeners had the third class tickets
# * survivability of first and second class passengers were higher compared to the third class
# * most passengers traveled alone or with one sibling/spouse
#
# Now we can take a look at each fature specifically to see if it depends on something else or if there ...
# # Filling in the missing data
# Okay, we could jump into full exploration and maybe even transformation of the data, but as we saw before, we miss quite a lot of data. The easiest aproach would be simply dropping all the missing values, be in this case we risk to lose accuracy of our models or entire features.
#
# Instead, we will try to fill the missing values based on some logic. Let's take a look at the training data once again to see which values do we miss
# In[ ]:
plt.figure(figsize=(10,8))
sns.heatmap(train.isnull(), yticklabels=False)
# In current state the train data misses Age, Cabin, and Embarked values. Unfortunatelly, the Cabin column is missing most of its data and we can't really use it as a feature. However, it is not entirely useless, but I'll leave it for later.
#
# Age column can be filled in many ways. For example, we could take a look at the mean age of every passenger class and fill it based on that information. But instead, if we take a look at the names of the passengers, we can notice a information that can help us:
# In[ ]:
train.head()
# Every name has a title (such as Mr., Miss., ets.) and follows the following pattern: Last_Name, Title. First_Name. We can categorise passengers by their titles and set unknown age values to mean value of a corresponding title.
#
# We will do so by adding a column called 'Title' to the data and fill it out with a new funciton.
# In[ ]:
def get_title(pasngr_name):
index_1 = pasngr_name.find(', ') + 2
index_2 = pasngr_name.find('. ') + 1
return pasngr_name[index_1:index_2]
# In[ ]:
train['Title'] = train['Name'].apply(get_title)
test['Title'] = test['Name'].apply(get_title)
# In[ ]:
plt.figure(figsize=(16, 10))
sns.boxplot('Title', 'Age', data=train)
# Now that we have all the titles, we can find out a mean value for each of them and use it to fill the gaps in the data.
# In[ ]:
train.Title.unique()
# In[ ]:
age_by_title = train.groupby('Title')['Age'].mean()
print(age_by_title)
# In[ ]:
def fill_missing_ages(cols):
age = cols[0]
titles = cols[1]
if pd.isnull(age):
return age_by_title[titles]
else:
return age
# In[ ]:
train['Age'] = train[['Age', 'Title']].apply(fill_missing_ages, axis=1)
test['Age'] = test[['Age', 'Title']].apply(fill_missing_ages, axis=1)
#and one Fare value in the test set
test['Fare'].fillna(test['Fare'].mean(), inplace = True)
plt.figure(figsize=(14, 12))
plt.subplot(211)
sns.heatmap(train.isnull(), yticklabels=False)
plt.subplot(212)
sns.heatmap(test.isnull(), yticklabels=False)
# Okay, now we have the Age column filled entirely. There are still missing values in Cabin and Embarked columns. Unfortunatelly, we miss so much data in Cabin that it would be impossible to fill it as we did with Age, but we are not going to get rid of it for now, it will be usefull for us later.
#
# In embarked column only one value is missing, so we can set it to the most common value.
# In[ ]:
sns.countplot('Embarked', data=train)
# In[ ]:
train['Embarked'].fillna('S', inplace=True)
sns.heatmap(train.isnull(), yticklabels=False)
# Now we have patched the missing data and can explore the features and correlations between them without worrying that we may miss something.
# # Detailed exploration
# In this section we will try to explore every possible feature and correlations them. Also, ...
# In[ ]:
plt.figure(figsize=(10,8))
sns.heatmap(train.drop('PassengerId', axis=1).corr(), annot=True)
# Here's a shortened plan that we will follow to evaluate each feature and ...:
# * Age
# * Sex
# * Passenger classes and Fares
# * **(...)**
# ### Age
# The first feature that comes to my mind is Age. The theory is simple: survivability depends on the age of a passenger, old passengers have less chance to survive, younger passengers are more fit, children either not fit enough to survive, or they have higher chances since adults help them
# In[ ]:
plt.figure(figsize=(10, 8))
sns.violinplot('Survived', 'Age', data=train)
# We can already notice that children had better chance to survive, and the majority of casulties were middle aged passengers (which can be explained by the fact that most of the passengers were middle aged).
#
# Let's explore the age, but this time separated by the Sex column.
# In[ ]:
plt.figure(figsize=(10, 8))
sns.violinplot('Sex', 'Age', data=train, hue='Survived', split=True)
# The plot above confirmes our theory for the young boys, but it is rather opposite with young girls: most females under the age of 16 didn't survive. This looks weird at first glance, but maybe it is connected with some other feature.
#
# Let's see if the class had influence on survivability of females.
# In[ ]:
grid = sns.FacetGrid(train, col='Pclass', hue="Survived", size=4)
grid = grid.map(sns.swarmplot, 'Sex', 'Age', order=["female"])
# ### Pclass
# Idea here is pretty straightforward too: the higher the class, the better chance to survive. First, let's take a look at the overall situation:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.countplot('Pclass', data=train, hue='Survived')
# We can already see that the class plays a big role in survivability. Most of third class passengers didn't survive the crash, second class had 50/50 chance, and most of first class passengers survived.
#
# Let's further explore Pclass and try to find any correlations with other features.
#
# If we go back to the correlation heatmap, we will notice that Age and Fare are strongly correlated with Pclass, so they will be our main suspects.
# In[ ]:
plt.figure(figsize=(14, 6))
plt.subplot(121)
sns.barplot('Pclass', 'Fare', data=train)
plt.subplot(122)
sns.barplot('Pclass', 'Age', data=train)
# As expected, these two features indeed are connected with the class. The Fare was rather expected: the higher a class, the more expencive it is.
#
# Age can be explained by the fact that usually older people are wealthier than the younger ones. **(...)**
#
# Here's the overall picture of Fares depending on Ages separated by Classes:
# In[ ]:
sns.lmplot('Age', 'Fare', data=train, hue='Pclass', fit_reg=False, size=7)
# ### Family size
#
# This feature will represent the family size of a passenger. We have information about number of Siblings/Spouses (SibSp) and Parent/Children relationships (Parch). Although it might not be full information about families, we can use it to determine a family size of each passenger by summing these two features.
# In[ ]:
train["FamilySize"] = train["SibSp"] + train["Parch"]
test["FamilySize"] = test["SibSp"] + test["Parch"]
train.head()
# Now let's see how family size affected survivability of passengers:
# In[ ]:
plt.figure(figsize=(14, 6))
plt.subplot(121)
sns.barplot('FamilySize', 'Survived', data=train)
plt.subplot(122)
sns.countplot('FamilySize', data=train, hue='Survived')
# We can notice a curious trend with family size: **(...)**
# In[ ]:
grid = sns.FacetGrid(train, col='Sex', size=6)
grid = grid.map(sns.barplot, 'FamilySize', 'Survived')
# These two plots only confirm our theory. With family size more than 3 survivability drops severely for both women and men. We also should keep in mind while looking at the plots above that women had overall better chances to survive than men.
#
# Let's just check if this trend depends on something else, like Pclass, for example:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.countplot('FamilySize', data=train, hue='Pclass')
# ### Embarked
#
# In[ ]:
sns.countplot('Embarked', data=train, hue='Survived')
# In[ ]:
sns.countplot('Embarked', data=train, hue='Pclass')
# ### Conclusion:
# # Additional features
# Now we've analyzed the data and have an idea of what will be relevant. But before we start building our model, there is one thing we can do to improve it even further.
#
# So far we've worked with features that came with the dataset, but we can also create our own custom features (so far we have FamilySize as a custom, or engineered feature).
# ### Cabin
# Now this is a tricky part. Cabin could be a really important feature, especially if we knew the distribution of cabins on the ship, but we miss so much data that there is almost no practical value in the feature itself. However, there is one trick we can do with it.
#
# Let's create a new feature called CabinKnown that represents if a cabin of a certain passenger is known or not. Our theory here is that if the cabin is known, then probably that passenger survived.
# In[ ]:
def has_cabin(pasngr_cabin):
if pd.isnull(pasngr_cabin):
return 0
else:
return 1
train['CabinKnown'] = train['Cabin'].apply(has_cabin)
test['CabinKnown'] = test['Cabin'].apply(has_cabin)
sns.countplot('CabinKnown', data=train, hue='Survived')
# Clearly, the corelation here is strong: the survivability rate of those passengers, whose cabin is known is 2:1, while situation in case the cabin is unknown is opposite. This would be a very useful feature to have.
#
# But there is one problem with this feature. In real life, we wouldn't know in advance whether a cabin would be known or not (we can't know an outcome before an event happened). That's why this feature is rather "artificial". Sure, it can improve the score of our model for this competition, but using it is kinda cheating.
#
# **(decide what u wanna do with that feature and finish the description)**
# ### Age categories
#
# ** * (explain why categories) * **
#
# Let's start with Age. The most logical way is to devide age into age categories: young, adult, and elder. Let's say that passenger of the age of 16 and younger are children, older than 50 are elder, and anyone else is adult.
# In[ ]:
def get_age_categories(age):
if(age <= 16):
return 'child'
elif(age > 16 and age <= 50):
return 'adult'
else:
return 'elder'
train['AgeCategory'] = train['Age'].apply(get_age_categories)
test['AgeCategory'] = test['Age'].apply(get_age_categories)
# In[ ]:
sns.countplot('AgeCategory', data=train, hue='Survived')
# ** (...) **
# ### Family size category
#
# Now lets do the same for the family size: we will separate it into TraveledAlone, WithFamily, and WithLargeFamily (bigger than 3, where the survivability rate changes the most)
# In[ ]:
def get_family_category(family_size):
if(family_size > 3):
return 'WithLargeFamily'
elif(family_size > 0 and family_size<= 3):
return 'WithFamily'
else:
return 'TraveledAlone'
train['FamilyCategory'] = train['FamilySize'].apply(get_family_category)
test['FamilyCategory'] = test['FamilySize'].apply(get_family_category)
# ** (needs a description depending on whether it will be included or not) **
# ### Title category
# In[ ]:
print(train.Title.unique())
# In[ ]:
plt.figure(figsize=(12, 10))
sns.countplot('Title', data=train)
# In[ ]:
titles_to_cats = {
'HighClass': ['Lady.', 'Sir.'],
'MiddleClass': ['Mr.', 'Mrs.'],
'LowClass': []
}
# ### Fare scaling
#
# If we take a look at the Fare distribution, we will see that it is scattered a lot:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.distplot(train['Fare'])
# # Creating the model:
# Now that we have all the data we need, we can start building the model.
#
# First of all, we need to prepare the data for the actual model. Classification algorithms work only with numbers or True/False values. For example, model can't tell the difference in Sex at the moment because we have text in that field. What we can do is transform the values of this feature into True or False (IsMale = True for males and IsMale = False for women).
#
# For this purpose we will use two methods: transofrmation data into numerical values and dummies.
#
# Lets start with Sex and transformation:
# In[ ]:
train['Sex'] = train['Sex'].astype('category').cat.codes
test['Sex'] = test['Sex'].astype('category').cat.codes
train[['Name', 'Sex']].head()
# As we see, the Sex column is now binary and takes 1 for males and 0 for females. Now classifiers will be able to work with it.
#
# Now we will transform Embarked column, but with a different method:
# In[ ]:
embarkedCat = pd.get_dummies(train['Embarked'])
train = pd.concat([train, embarkedCat], axis=1)
train.drop('Embarked', axis=1, inplace=True)
embarkedCat = pd.get_dummies(test['Embarked'])
test = pd.concat([test, embarkedCat], axis=1)
test.drop('Embarked', axis=1, inplace=True)
train[['Q', 'S', 'C']].head()
# We used dummies, which replaced the Embarked column with three new columns corresponding to the values in the old column. Lets do the same for family size and age categories:
# In[ ]:
# for the train set
familyCat = pd.get_dummies(train['FamilyCategory'])
train = pd.concat([train, familyCat], axis=1)
train.drop('FamilyCategory', axis=1, inplace=True)
ageCat = pd.get_dummies(train['AgeCategory'])
train = pd.concat([train, ageCat], axis=1)
train.drop('AgeCategory', axis=1, inplace=True)
#and for the test
familyCat = pd.get_dummies(test['FamilyCategory'])
test = pd.concat([test, familyCat], axis=1)
test.drop('FamilyCategory', axis=1, inplace=True)
ageCat = pd.get_dummies(test['AgeCategory'])
test = pd.concat([test, ageCat], axis=1)
test.drop('AgeCategory', axis=1, inplace=True)
# In[ ]:
plt.figure(figsize=(14,12))
sns.heatmap(train.drop('PassengerId', axis=1).corr(), annot=True)
# # Modelling
# Now we need to select a classification algorithm for the model. There are plenty of decent classifiers, but which is the best for this task and which one should we choose?
#
# *Here's the idea:* we will take a bunch of classifiers, test them on the data, and choose the best one.
#
# In order to do that, we will create a list of different classifiers and see how each of them performs on the training data. To select the best one, we will evaluate them using cross-validation and compare their accuracy scores (percentage of the right answers). I decided to use Random Forest, KNN, SVC, Decision Tree, AdaBoost, Gradient Boost, Extremely Randomized Trees, and Logistic Regression.
# In[ ]:
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
classifiers = [
RandomForestClassifier(),
KNeighborsClassifier(),
SVC(),
DecisionTreeClassifier(),
AdaBoostClassifier(),
GradientBoostingClassifier(),
ExtraTreesClassifier(),
LogisticRegression()
]
# Now we need to select the features that will be used in the model and drop everything else. Also, the training data has to be split in two parts: *X_train* is the data the classifiers will be trained on, and *y_train* are the answers.
# In[ ]:
X_train = train.drop(['PassengerId', 'Survived', 'SibSp', 'Parch', 'Ticket', 'Name', 'Cabin', 'Title', 'FamilySize'], axis=1)
y_train = train['Survived']
X_final = test.drop(['PassengerId', 'SibSp', 'Parch', 'Ticket', 'Name', 'Cabin', 'Title', 'FamilySize'], axis=1)
# We will use K-Folds as cross-validation. It splits the data into "folds", ** (...) **
# In[ ]:
from sklearn.model_selection import KFold
# n_splits=5
cv_kfold = KFold(n_splits=10)
# Now we evaluate each of the classifiers from the list using K-Folds. The accuracy scores will be stored in a list.
#
# The problem is that K-Folds evaluates each algorithm several times. As result, we will have a list of arrays with scores for each classifier, which is not great for comparison.
#
# To fix it, we will create another list of means of scores for each classifier. That way it will be much easier to compare the algorithms and select the best one.
# In[ ]:
from sklearn.model_selection import cross_val_score
class_scores = []
for classifier in classifiers:
class_scores.append(cross_val_score(classifier, X_train, y_train, scoring='accuracy', cv=cv_kfold))
class_mean_scores = []
for score in class_scores:
class_mean_scores.append(score.mean())
# Now that we have the mean accuracy scores, we need to compare them somehow. But since it's just a list of numbers, we can easily plot them. First, let's create a data frame of classifiers names and their scores, and then plot it:
# In[ ]:
scores_df = pd.DataFrame({
'Classifier':['Random Forest', 'KNeighbors', 'SVC', 'DecisionTreeClassifier', 'AdaBoostClassifier',
'GradientBoostingClassifier', 'ExtraTreesClassifier', 'LogisticRegression'],
'Scores': class_mean_scores
})
print(scores_df)
sns.factorplot('Scores', 'Classifier', data=scores_df, size=6)
# Two best classifiers happened to be Gradient Boost and Logistic Regression. Since Logistic Regression got sligthly lower score and is rather easily overfitted, we will use Gradient Boost.
# ### Selecting the parameters
# Now that we've chosen the algorithm, we need to select the best parameters for it. There are many options, and sometimes it's almost impossible to know the best set of parameters. That's why we will use Grid Search to test out different options and choose the best ones.
#
# But first let's take a look at all the possible parameters of Gradient Boosting classifier:
# In[ ]:
g_boost = GradientBoostingClassifier()
g_boost.get_params().keys()
# We will test different options for min_samples_leaf, min_samples_split, max_depth, and loss parameters. I will set n_estimators to 100, but it can be increased since Gradient Boosting algorithms generally don't tend to overfit.
# In[ ]:
from sklearn.model_selection import GridSearchCV
param_grid = {
'loss': ['deviance', 'exponential'],
'min_samples_leaf': [2, 5, 10],
'min_samples_split': [2, 5, 10],
'n_estimators': [100],
'max_depth': [3, 5, 10, 20]
}
grid_cv = GridSearchCV(g_boost, param_grid, scoring='accuracy', cv=cv_kfold)
grid_cv.fit(X_train, y_train)
grid_cv.best_estimator_
# In[ ]:
print(grid_cv.best_score_)
print(grid_cv.best_params_)
# Now that we have the best parameters we could find, it's time to create and train the model on the training data.
# In[ ]:
g_boost = GradientBoostingClassifier(min_samples_split=5, loss='deviance', n_estimators=1000,
max_depth=3, min_samples_leaf=2)
# In[ ]:
g_boost.fit(X_train, y_train)
# In[ ]:
feature_values = pd.DataFrame({
'Feature': X_final.columns,
'Importance': g_boost.feature_importances_
})
print(feature_values)
sns.factorplot('Importance', 'Feature', data=feature_values, size=6) | # ### Prediction on the testing set and output
# Now our model is ready, and we can make a prediction on the testing set and create a .csv output for submission.
# In[ ]:
prediction = g_boost.predict(X_final)
# In[ ]:
submission = pd.DataFrame({
'PassengerId': test['PassengerId'],
'Survived': prediction
})
# In[ ]:
#submission.to_csv('submission.csv', index=False) | random_line_split |
|
getdata.py | """
These are data input download and prep scripts. They download and massage the data for the UBM calculations (calc.py)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import urllib
try:
# For Python 3.0 and later
import urllib.request
except ImportError:
# Fall back to Python 2's urllib2
import urllib2
import re
import glob
import os
import arcpy
from arcpy.sa import *
def get_modis(tiles, save_path, months='', years=''):
"""The following script automatically retrieves monthly MODIS16 hdf file from the ntsg website.
:param tiles: Tile number in format h##v##; based on grid from https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
:param save_path: name of output file name
:param months: months of interest; defaults to [1,12]
:param years: years of interest; defaults to [2000,2015]
:return: saves files in outpath
"""
from bs4 import BeautifulSoup
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
mons = [str(i).zfill(2) for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for tile in tiles:
for yr in yrs:
for m in mons:
base_url = "http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16A2_MONTHLY.MERRA_GMAO_1kmALB/"
dir_path = "Y{:}/M{:}/".format(yr, m)
url = base_url + dir_path
soup = BeautifulSoup(urllib2.urlopen(url), "lxml")
hdf_name = soup.find_all('', {
'href': re.compile('MOD16A2.A{:}M{:}.{:}.105'.format(yr, m, tile), re.IGNORECASE)})
files = urllib.urlretrieve(url + hdf_name[0].text, save_path + hdf_name[0].text)
print(save_path + hdf_name[0].text)
time.sleep(0.5)
def get_file_list(save_path, wld='*.105*.hdf'):
"""
Args:
save_path: path to folder where raw MODIS files are
wld: common wildcard in all of the raw MODIS files
Returns:
list of files to analyze in the raw folder
"""
return glob.glob(os.path.join(save_path, wld))
def reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):
"""Iterates through MODIS files in a folder reprojecting them.
Takes the crazy MODIS sinusoidal projection to a user defined projection.
Args:
files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))
save_path: folder to store the reprojected files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
eight_day: time span of modis file; Bool where default is true (input 8-day rasters)
proj: projection of output data by epsg number; default is nad83 zone 12
Returns:
Reprojected MODIS files
..notes:
The EPSG code for NAD83 Zone 12 is 26912.
The EPSG code for Albers Equal Area is 102003
http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf
https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<
https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125
"""
import pymodis
# dictionary to designate a directory
datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}
# dictionary to select layer from hdf file that contains the datatype
matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0], 'PLE': [0, 0, 0, 1]}
# check for file folder and make it if it doesn't exist
if not os.path.exists(save_path + datadir[data_type]):
os.makedirs(save_path + datadir[data_type])
print('created {:}'.format(save_path + datadir[data_type]))
for f in files:
year = f.split('\\')[1].split('.')[1][1:5]
v = f.split('\\')[1].split('.')[2][-2:] # parse v (cell coordinate) from hdf filename
h = f.split('\\')[1].split('.')[2][1:3] # parse h (cell coordinate) from hdf filename
# names file based on time span of input rasters; 8-day by default
if eight_day:
doy = f.split('\\')[1].split('.')[1][-3:] # parse day of year from hdf filename
fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
else:
month = f.split('\\')[1].split('.')[1][-2:] # parse month from hdf filename
fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=f, prefix=pref,
subset=matrdir[data_type],
res=1000, epsg=proj)
# [ET,LE,PET,PLE]
try:
convertsingle.run()
except:
print(fname + ' failed!')
pass
def clip_and_fix(path, outpath, data_type, area=''):
"""Clips raster to Utah's Watersheds and makes exception values null.
Args:
path: folder of the reprojected MODIS files
outpath: ESRI gdb to store the clipped files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
area: path to polygon used to clip tiles
"""
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
|
arcpy.env.mask = area
arcpy.CheckOutExtension("spatial")
for rast in arcpy.ListRasters():
calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))
calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
def merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):
"""Mosaics (merges) different MODIS cells into one layer.
"""
if monthRange == '':
monthRange = [1, 12]
if yearRange == '':
yearRange = [2000, 2015]
if outpath == '':
outpath = path
arcpy.env.workspace = path
outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
rlist = []
for rast in arcpy.ListRasters(nm + '*'):
rlist.append(rast)
try:
arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c', outCS, \
"16_BIT_UNSIGNED", "1000", "1", "LAST", "LAST")
print(path + nm + 'c')
except:
print(nm + ' failed!')
pass
def scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange=[1, 12], yearRange=[2000, 2014]):
"""
:param path: directory to unconverted modis tiles
:param out_path: directory to put output in
:param scaleby: scaling factor for MODIS data; default converts to meters/month
:param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'
:param monthRange: range of months to process data
:param yearRange: range of years to process data
:return:
"""
arcpy.CheckOutExtension("spatial")
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
calc = Divide(nm + 'c', scaleby)
calc.save(out_path + nm)
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
with tarfile.open(filepath, compression) as tfile:
filelist = tfile.getnames()
tfile.extractall(path=outfoldername)
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete tar archive {0}.".format(filepath))
return filelist
def ungz(filepath, compression='rb', deletesource=False):
"""
Given an input gz archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import gzip
with gzip.open(filepath, compression) as f:
outF = open(filepath[:-3], 'wb')
outF.write(f.read())
f.close()
outF.close()
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete gz archive {0}.".format(filepath))
return filepath[:-3]
def replace_hdr_file(hdrfile):
"""
Replace the .hdr file for a .bil raster with the correct data for Arc processing
Required: hdrfile -- filepath for .hdr file to replace/create
Output: None
"""
# hdr file replacment string
HDRFILE_STRING = "byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n\
ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n"
with open(hdrfile, 'w') as o:
o.write(HDRFILE_STRING)
def get_snodas(out_dir, months='', years=''):
"""Downloads daily SNODAS data from ftp. This is slow.
:param out_dir: directory to store downloaded SNODAS zip files
:param months: months desired for download
:param years: years desired for download
:return: saved zip files in out_dir
.. note:
Use polaris: http://nsidc.org/data/polaris/
"""
import ftplib
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
mons = [str(i).zfill(2) + "_" + monnames[i - 1] for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for yr in yrs:
for m in mons:
ftp_addr = "sidads.colorado.edu"
ftp = ftplib.FTP(ftp_addr)
ftp.login()
dir_path = "pub/DATASETS/NOAA/G02158/masked/" + yr + "/" + m + "/"
ftp.cwd(dir_path)
files = ftp.nlst()
for f in files:
if len(f) > 4:
save_file = open(out_dir + "/" + f, 'wb')
ftp.retrbinary("RETR " + f, save_file.write)
save_file.close()
print(f)
ftp.close()
def rename_polaris_snodas(path):
prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML', 'us_ssmv11050lL00T': 'SPSB',
'us_ssmv11034tS__T': 'SWEQ', 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',
'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}
for filename in os.listdir(path):
if filename.startswith("us_ssmv"):
code = prodcode[filename[0:17]]
yrsrt = filename.find('TNATS') + 5
yr = filename[yrsrt:yrsrt + 4]
mo = filename[yrsrt + 4:yrsrt + 6]
dy = filename[yrsrt + 6:yrsrt + 8]
try:
os.rename(os.path.join(path, filename), os.path.join(path, code + yr + mo + dy + filename[-4:]))
except:
pass
def snow_summary(code, scalingFactor, statistics="SUM", outcellsize='1000', monthRange='', yearRange='',
path="H:/GIS/SNODAS/SNWDS/", outpath="H:/GIS/SNODAS.gdb/", area=''):
"""
summarizes daily SNODAS data to monthly values
INPUT
-----
code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'
scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/
statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,
RANGE, STD, SUM, or VARIETY
monthRange = len 2 list; begin and end month of data you wish to analyze
yearRange = len 2 list; bengin and end year of data you wish to analyze
path = directory where raw geoTiffs are located
outpath = directory where final data will be stored
OUTPUT
------
projected and scaled monthly rasters
"""
if monthRange == '':
months = [1, 12]
if yearRange == '':
years = [2000, 2015]
g = {}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
# arcpy.env.mask = area
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',
'MINORITY': 'MNR',
'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
for y in range(yearRange[0], yearRange[1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[1] + 1): # set months converted here
g[code + str(y) + str(m).zfill(2)] = [] # this defines the dictionary key based on data type month and year
for name in sorted(
glob.glob(path + code + '*.tif')): # pick all tiff files from raw data folder of a data type
rast = os.path.basename(name)
if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]) == m:
g[code + str(y) + str(m).zfill(2)].append(rast) # create a list of rasters for each month
else:
pass
if len(g[code + str(y) + str(m).zfill(2)]) > 0:
# print(g[code+str(y)+str(m).zfill(2)])
# ifnull = 'in_memory/ifnull'
# arcpy sa functions that summarize the daily data to monthly data
cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2)], statistics_type=statistics,
ignore_nodata="DATA")
div = Divide(cellstats, scalingFactor) # scale factor, converts to kg/m2 10 then to m 0.001
calc = Con(div < 0.0, 0.0, div) # remove negative and null values
ifnull = Con(IsNull(calc), 0, calc) # remove null
# WKID 102039
outCS = arcpy.SpatialReference(102039) # change coordinate units to m for spatial analysis
# define save path for file
outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2) + statstype[statistics]
memoryFeature = "in_memory/myMemoryFeature"
# memoryFeature = outnm
arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS, 'BILINEAR', outcellsize,
'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')
# Execute ExtractByMask to clip snodas data to Utah watersheds
extrc = arcpy.sa.ExtractByMask(memoryFeature, area)
extrc.save(outnm)
print(outnm)
arcpy.Delete_management("in_memory")
def totalavg(code, statistics="MEAN", monthRange=[1, 12], yearRange=[2003, 2016],
path="H:/GIS/SNODAS/SNODASproj.gdb/", outpath="H:/GIS/SNODAS/SNODASproj.gdb/"):
"""Summarizes daily raster data into monthly data.
INPUT
-----
code = string with four letters represting data type to summarize (example 'BSSB')
statistics = how data will be summarized; defaults to monthly averages; options are
['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']
Most common are 'MEAN','MEDIAN', and 'SUM'
These are inputs that will be used in the ArcPy CellStatistics function.
See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation
monthRange = beginning and end months of summary statistics
yearRange = beginning and end years of summary statistics
path = location of geodatabase of data to summarize
outpath = location of geodatabase where output data should be stored
OUTPUT
------
summary raster(s) stored in outpath
"""
g = {}
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',
'MINORITY': 'MNR',
'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
# iterate over month range set here; default is 1 to 12 (Jan to Dec)
for m in range(monthRange[0], monthRange[1] + 1):
# this defines the dictionary key based on data type, month, and year
g[code + '0000' + str(m).zfill(2)] = []
# pick all tiff files from raw data folder of a data type
for rast in arcpy.ListRasters():
yrrng = range(yearRange[0], yearRange[1] + 1) # set years converted here
# create a list of rasters with the right code and month and year
if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]) == m:
g[code + '0000' + str(m).zfill(2)].append(rast) # create a list of rasters for each month
else:
pass
if len(g[code + '0000' + str(m).zfill(2)]) > 0:
# arcpy sa functions that summarize the daily data to monthly data
calc = CellStatistics(g[code + '0000' + str(m).zfill(2)], statistics_type=statistics, ignore_nodata="DATA")
calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])
print(code + '0000' + str(m).zfill(2) + statstype[statistics])
if __name__ == '__main__':
main()
| area = 'H:/GIS/Calc.gdb/WBD_UT' | conditional_block |
getdata.py | """
These are data input download and prep scripts. They download and massage the data for the UBM calculations (calc.py)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import urllib
try:
# For Python 3.0 and later
import urllib.request
except ImportError:
# Fall back to Python 2's urllib2
import urllib2
import re
import glob
import os
import arcpy
from arcpy.sa import *
def get_modis(tiles, save_path, months='', years=''):
"""The following script automatically retrieves monthly MODIS16 hdf file from the ntsg website.
:param tiles: Tile number in format h##v##; based on grid from https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
:param save_path: name of output file name
:param months: months of interest; defaults to [1,12]
:param years: years of interest; defaults to [2000,2015]
:return: saves files in outpath
"""
from bs4 import BeautifulSoup
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
mons = [str(i).zfill(2) for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for tile in tiles:
for yr in yrs:
for m in mons:
base_url = "http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16A2_MONTHLY.MERRA_GMAO_1kmALB/"
dir_path = "Y{:}/M{:}/".format(yr, m)
url = base_url + dir_path
soup = BeautifulSoup(urllib2.urlopen(url), "lxml")
hdf_name = soup.find_all('', {
'href': re.compile('MOD16A2.A{:}M{:}.{:}.105'.format(yr, m, tile), re.IGNORECASE)})
files = urllib.urlretrieve(url + hdf_name[0].text, save_path + hdf_name[0].text)
print(save_path + hdf_name[0].text)
time.sleep(0.5)
def | (save_path, wld='*.105*.hdf'):
"""
Args:
save_path: path to folder where raw MODIS files are
wld: common wildcard in all of the raw MODIS files
Returns:
list of files to analyze in the raw folder
"""
return glob.glob(os.path.join(save_path, wld))
def reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):
"""Iterates through MODIS files in a folder reprojecting them.
Takes the crazy MODIS sinusoidal projection to a user defined projection.
Args:
files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))
save_path: folder to store the reprojected files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
eight_day: time span of modis file; Bool where default is true (input 8-day rasters)
proj: projection of output data by epsg number; default is nad83 zone 12
Returns:
Reprojected MODIS files
..notes:
The EPSG code for NAD83 Zone 12 is 26912.
The EPSG code for Albers Equal Area is 102003
http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf
https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<
https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125
"""
import pymodis
# dictionary to designate a directory
datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}
# dictionary to select layer from hdf file that contains the datatype
matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0], 'PLE': [0, 0, 0, 1]}
# check for file folder and make it if it doesn't exist
if not os.path.exists(save_path + datadir[data_type]):
os.makedirs(save_path + datadir[data_type])
print('created {:}'.format(save_path + datadir[data_type]))
for f in files:
year = f.split('\\')[1].split('.')[1][1:5]
v = f.split('\\')[1].split('.')[2][-2:] # parse v (cell coordinate) from hdf filename
h = f.split('\\')[1].split('.')[2][1:3] # parse h (cell coordinate) from hdf filename
# names file based on time span of input rasters; 8-day by default
if eight_day:
doy = f.split('\\')[1].split('.')[1][-3:] # parse day of year from hdf filename
fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
else:
month = f.split('\\')[1].split('.')[1][-2:] # parse month from hdf filename
fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=f, prefix=pref,
subset=matrdir[data_type],
res=1000, epsg=proj)
# [ET,LE,PET,PLE]
try:
convertsingle.run()
except:
print(fname + ' failed!')
pass
def clip_and_fix(path, outpath, data_type, area=''):
"""Clips raster to Utah's Watersheds and makes exception values null.
Args:
path: folder of the reprojected MODIS files
outpath: ESRI gdb to store the clipped files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
area: path to polygon used to clip tiles
"""
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
arcpy.env.mask = area
arcpy.CheckOutExtension("spatial")
for rast in arcpy.ListRasters():
calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))
calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
def merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):
"""Mosaics (merges) different MODIS cells into one layer.
"""
if monthRange == '':
monthRange = [1, 12]
if yearRange == '':
yearRange = [2000, 2015]
if outpath == '':
outpath = path
arcpy.env.workspace = path
outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
rlist = []
for rast in arcpy.ListRasters(nm + '*'):
rlist.append(rast)
try:
arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c', outCS, \
"16_BIT_UNSIGNED", "1000", "1", "LAST", "LAST")
print(path + nm + 'c')
except:
print(nm + ' failed!')
pass
def scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange=[1, 12], yearRange=[2000, 2014]):
"""
:param path: directory to unconverted modis tiles
:param out_path: directory to put output in
:param scaleby: scaling factor for MODIS data; default converts to meters/month
:param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'
:param monthRange: range of months to process data
:param yearRange: range of years to process data
:return:
"""
arcpy.CheckOutExtension("spatial")
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
calc = Divide(nm + 'c', scaleby)
calc.save(out_path + nm)
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
with tarfile.open(filepath, compression) as tfile:
filelist = tfile.getnames()
tfile.extractall(path=outfoldername)
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete tar archive {0}.".format(filepath))
return filelist
def ungz(filepath, compression='rb', deletesource=False):
"""
Given an input gz archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import gzip
with gzip.open(filepath, compression) as f:
outF = open(filepath[:-3], 'wb')
outF.write(f.read())
f.close()
outF.close()
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete gz archive {0}.".format(filepath))
return filepath[:-3]
def replace_hdr_file(hdrfile):
"""
Replace the .hdr file for a .bil raster with the correct data for Arc processing
Required: hdrfile -- filepath for .hdr file to replace/create
Output: None
"""
# hdr file replacment string
HDRFILE_STRING = "byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n\
ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n"
with open(hdrfile, 'w') as o:
o.write(HDRFILE_STRING)
def get_snodas(out_dir, months='', years=''):
"""Downloads daily SNODAS data from ftp. This is slow.
:param out_dir: directory to store downloaded SNODAS zip files
:param months: months desired for download
:param years: years desired for download
:return: saved zip files in out_dir
.. note:
Use polaris: http://nsidc.org/data/polaris/
"""
import ftplib
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
mons = [str(i).zfill(2) + "_" + monnames[i - 1] for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for yr in yrs:
for m in mons:
ftp_addr = "sidads.colorado.edu"
ftp = ftplib.FTP(ftp_addr)
ftp.login()
dir_path = "pub/DATASETS/NOAA/G02158/masked/" + yr + "/" + m + "/"
ftp.cwd(dir_path)
files = ftp.nlst()
for f in files:
if len(f) > 4:
save_file = open(out_dir + "/" + f, 'wb')
ftp.retrbinary("RETR " + f, save_file.write)
save_file.close()
print(f)
ftp.close()
def rename_polaris_snodas(path):
prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML', 'us_ssmv11050lL00T': 'SPSB',
'us_ssmv11034tS__T': 'SWEQ', 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',
'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}
for filename in os.listdir(path):
if filename.startswith("us_ssmv"):
code = prodcode[filename[0:17]]
yrsrt = filename.find('TNATS') + 5
yr = filename[yrsrt:yrsrt + 4]
mo = filename[yrsrt + 4:yrsrt + 6]
dy = filename[yrsrt + 6:yrsrt + 8]
try:
os.rename(os.path.join(path, filename), os.path.join(path, code + yr + mo + dy + filename[-4:]))
except:
pass
def snow_summary(code, scalingFactor, statistics="SUM", outcellsize='1000', monthRange='', yearRange='',
path="H:/GIS/SNODAS/SNWDS/", outpath="H:/GIS/SNODAS.gdb/", area=''):
"""
summarizes daily SNODAS data to monthly values
INPUT
-----
code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'
scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/
statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,
RANGE, STD, SUM, or VARIETY
monthRange = len 2 list; begin and end month of data you wish to analyze
yearRange = len 2 list; bengin and end year of data you wish to analyze
path = directory where raw geoTiffs are located
outpath = directory where final data will be stored
OUTPUT
------
projected and scaled monthly rasters
"""
if monthRange == '':
months = [1, 12]
if yearRange == '':
years = [2000, 2015]
g = {}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
# arcpy.env.mask = area
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',
'MINORITY': 'MNR',
'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
for y in range(yearRange[0], yearRange[1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[1] + 1): # set months converted here
g[code + str(y) + str(m).zfill(2)] = [] # this defines the dictionary key based on data type month and year
for name in sorted(
glob.glob(path + code + '*.tif')): # pick all tiff files from raw data folder of a data type
rast = os.path.basename(name)
if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]) == m:
g[code + str(y) + str(m).zfill(2)].append(rast) # create a list of rasters for each month
else:
pass
if len(g[code + str(y) + str(m).zfill(2)]) > 0:
# print(g[code+str(y)+str(m).zfill(2)])
# ifnull = 'in_memory/ifnull'
# arcpy sa functions that summarize the daily data to monthly data
cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2)], statistics_type=statistics,
ignore_nodata="DATA")
div = Divide(cellstats, scalingFactor) # scale factor, converts to kg/m2 10 then to m 0.001
calc = Con(div < 0.0, 0.0, div) # remove negative and null values
ifnull = Con(IsNull(calc), 0, calc) # remove null
# WKID 102039
outCS = arcpy.SpatialReference(102039) # change coordinate units to m for spatial analysis
# define save path for file
outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2) + statstype[statistics]
memoryFeature = "in_memory/myMemoryFeature"
# memoryFeature = outnm
arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS, 'BILINEAR', outcellsize,
'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')
# Execute ExtractByMask to clip snodas data to Utah watersheds
extrc = arcpy.sa.ExtractByMask(memoryFeature, area)
extrc.save(outnm)
print(outnm)
arcpy.Delete_management("in_memory")
def totalavg(code, statistics="MEAN", monthRange=[1, 12], yearRange=[2003, 2016],
path="H:/GIS/SNODAS/SNODASproj.gdb/", outpath="H:/GIS/SNODAS/SNODASproj.gdb/"):
"""Summarizes daily raster data into monthly data.
INPUT
-----
code = string with four letters represting data type to summarize (example 'BSSB')
statistics = how data will be summarized; defaults to monthly averages; options are
['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']
Most common are 'MEAN','MEDIAN', and 'SUM'
These are inputs that will be used in the ArcPy CellStatistics function.
See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation
monthRange = beginning and end months of summary statistics
yearRange = beginning and end years of summary statistics
path = location of geodatabase of data to summarize
outpath = location of geodatabase where output data should be stored
OUTPUT
------
summary raster(s) stored in outpath
"""
g = {}
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',
'MINORITY': 'MNR',
'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
# iterate over month range set here; default is 1 to 12 (Jan to Dec)
for m in range(monthRange[0], monthRange[1] + 1):
# this defines the dictionary key based on data type, month, and year
g[code + '0000' + str(m).zfill(2)] = []
# pick all tiff files from raw data folder of a data type
for rast in arcpy.ListRasters():
yrrng = range(yearRange[0], yearRange[1] + 1) # set years converted here
# create a list of rasters with the right code and month and year
if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]) == m:
g[code + '0000' + str(m).zfill(2)].append(rast) # create a list of rasters for each month
else:
pass
if len(g[code + '0000' + str(m).zfill(2)]) > 0:
# arcpy sa functions that summarize the daily data to monthly data
calc = CellStatistics(g[code + '0000' + str(m).zfill(2)], statistics_type=statistics, ignore_nodata="DATA")
calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])
print(code + '0000' + str(m).zfill(2) + statstype[statistics])
if __name__ == '__main__':
main()
| get_file_list | identifier_name |
getdata.py | """
These are data input download and prep scripts. They download and massage the data for the UBM calculations (calc.py)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import urllib
try:
# For Python 3.0 and later
import urllib.request
except ImportError:
# Fall back to Python 2's urllib2
import urllib2
import re
import glob
import os
import arcpy
from arcpy.sa import *
def get_modis(tiles, save_path, months='', years=''):
"""The following script automatically retrieves monthly MODIS16 hdf file from the ntsg website.
:param tiles: Tile number in format h##v##; based on grid from https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
:param save_path: name of output file name
:param months: months of interest; defaults to [1,12]
:param years: years of interest; defaults to [2000,2015]
:return: saves files in outpath
"""
from bs4 import BeautifulSoup
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
mons = [str(i).zfill(2) for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for tile in tiles:
for yr in yrs:
for m in mons:
base_url = "http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16A2_MONTHLY.MERRA_GMAO_1kmALB/"
dir_path = "Y{:}/M{:}/".format(yr, m)
url = base_url + dir_path
soup = BeautifulSoup(urllib2.urlopen(url), "lxml")
hdf_name = soup.find_all('', {
'href': re.compile('MOD16A2.A{:}M{:}.{:}.105'.format(yr, m, tile), re.IGNORECASE)})
files = urllib.urlretrieve(url + hdf_name[0].text, save_path + hdf_name[0].text)
print(save_path + hdf_name[0].text)
time.sleep(0.5)
def get_file_list(save_path, wld='*.105*.hdf'):
"""
Args:
save_path: path to folder where raw MODIS files are
wld: common wildcard in all of the raw MODIS files
Returns:
list of files to analyze in the raw folder
"""
return glob.glob(os.path.join(save_path, wld))
def reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):
"""Iterates through MODIS files in a folder reprojecting them.
Takes the crazy MODIS sinusoidal projection to a user defined projection.
Args:
files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))
save_path: folder to store the reprojected files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
eight_day: time span of modis file; Bool where default is true (input 8-day rasters)
proj: projection of output data by epsg number; default is nad83 zone 12
Returns:
Reprojected MODIS files
..notes:
The EPSG code for NAD83 Zone 12 is 26912.
The EPSG code for Albers Equal Area is 102003
http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf
https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<
https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125
"""
import pymodis
# dictionary to designate a directory
datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}
# dictionary to select layer from hdf file that contains the datatype
matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0], 'PLE': [0, 0, 0, 1]}
# check for file folder and make it if it doesn't exist
if not os.path.exists(save_path + datadir[data_type]):
os.makedirs(save_path + datadir[data_type])
print('created {:}'.format(save_path + datadir[data_type]))
for f in files:
year = f.split('\\')[1].split('.')[1][1:5]
v = f.split('\\')[1].split('.')[2][-2:] # parse v (cell coordinate) from hdf filename
h = f.split('\\')[1].split('.')[2][1:3] # parse h (cell coordinate) from hdf filename
# names file based on time span of input rasters; 8-day by default
if eight_day:
doy = f.split('\\')[1].split('.')[1][-3:] # parse day of year from hdf filename
fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
else:
month = f.split('\\')[1].split('.')[1][-2:] # parse month from hdf filename
fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=f, prefix=pref,
subset=matrdir[data_type],
res=1000, epsg=proj)
# [ET,LE,PET,PLE]
try:
convertsingle.run()
except:
print(fname + ' failed!')
pass
def clip_and_fix(path, outpath, data_type, area=''):
"""Clips raster to Utah's Watersheds and makes exception values null.
Args:
path: folder of the reprojected MODIS files
outpath: ESRI gdb to store the clipped files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
area: path to polygon used to clip tiles
"""
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
arcpy.env.mask = area
arcpy.CheckOutExtension("spatial")
for rast in arcpy.ListRasters():
calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))
calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
def merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):
"""Mosaics (merges) different MODIS cells into one layer.
"""
if monthRange == '':
monthRange = [1, 12]
if yearRange == '':
yearRange = [2000, 2015]
if outpath == '':
outpath = path
arcpy.env.workspace = path
outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
rlist = []
for rast in arcpy.ListRasters(nm + '*'):
rlist.append(rast)
try:
arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c', outCS, \
"16_BIT_UNSIGNED", "1000", "1", "LAST", "LAST")
print(path + nm + 'c')
except:
print(nm + ' failed!')
pass
def scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange=[1, 12], yearRange=[2000, 2014]):
"""
:param path: directory to unconverted modis tiles
:param out_path: directory to put output in
:param scaleby: scaling factor for MODIS data; default converts to meters/month
:param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'
:param monthRange: range of months to process data
:param yearRange: range of years to process data
:return:
"""
arcpy.CheckOutExtension("spatial")
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
calc = Divide(nm + 'c', scaleby)
calc.save(out_path + nm)
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
with tarfile.open(filepath, compression) as tfile:
filelist = tfile.getnames()
tfile.extractall(path=outfoldername)
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete tar archive {0}.".format(filepath))
return filelist
def ungz(filepath, compression='rb', deletesource=False):
"""
Given an input gz archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import gzip
with gzip.open(filepath, compression) as f:
outF = open(filepath[:-3], 'wb')
outF.write(f.read())
f.close()
outF.close()
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete gz archive {0}.".format(filepath))
return filepath[:-3]
def replace_hdr_file(hdrfile):
"""
Replace the .hdr file for a .bil raster with the correct data for Arc processing
Required: hdrfile -- filepath for .hdr file to replace/create
Output: None
"""
# hdr file replacment string
HDRFILE_STRING = "byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n\
ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n"
with open(hdrfile, 'w') as o:
o.write(HDRFILE_STRING)
def get_snodas(out_dir, months='', years=''):
"""Downloads daily SNODAS data from ftp. This is slow.
:param out_dir: directory to store downloaded SNODAS zip files
:param months: months desired for download
:param years: years desired for download
:return: saved zip files in out_dir
.. note:
Use polaris: http://nsidc.org/data/polaris/
"""
import ftplib
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
mons = [str(i).zfill(2) + "_" + monnames[i - 1] for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for yr in yrs:
for m in mons:
ftp_addr = "sidads.colorado.edu"
ftp = ftplib.FTP(ftp_addr)
ftp.login()
dir_path = "pub/DATASETS/NOAA/G02158/masked/" + yr + "/" + m + "/"
ftp.cwd(dir_path)
files = ftp.nlst()
for f in files:
if len(f) > 4:
save_file = open(out_dir + "/" + f, 'wb')
ftp.retrbinary("RETR " + f, save_file.write)
save_file.close()
print(f)
ftp.close()
| prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML', 'us_ssmv11050lL00T': 'SPSB',
'us_ssmv11034tS__T': 'SWEQ', 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',
'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}
for filename in os.listdir(path):
if filename.startswith("us_ssmv"):
code = prodcode[filename[0:17]]
yrsrt = filename.find('TNATS') + 5
yr = filename[yrsrt:yrsrt + 4]
mo = filename[yrsrt + 4:yrsrt + 6]
dy = filename[yrsrt + 6:yrsrt + 8]
try:
os.rename(os.path.join(path, filename), os.path.join(path, code + yr + mo + dy + filename[-4:]))
except:
pass
def snow_summary(code, scalingFactor, statistics="SUM", outcellsize='1000', monthRange='', yearRange='',
path="H:/GIS/SNODAS/SNWDS/", outpath="H:/GIS/SNODAS.gdb/", area=''):
"""
summarizes daily SNODAS data to monthly values
INPUT
-----
code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'
scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/
statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,
RANGE, STD, SUM, or VARIETY
monthRange = len 2 list; begin and end month of data you wish to analyze
yearRange = len 2 list; bengin and end year of data you wish to analyze
path = directory where raw geoTiffs are located
outpath = directory where final data will be stored
OUTPUT
------
projected and scaled monthly rasters
"""
if monthRange == '':
months = [1, 12]
if yearRange == '':
years = [2000, 2015]
g = {}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
# arcpy.env.mask = area
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',
'MINORITY': 'MNR',
'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
for y in range(yearRange[0], yearRange[1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[1] + 1): # set months converted here
g[code + str(y) + str(m).zfill(2)] = [] # this defines the dictionary key based on data type month and year
for name in sorted(
glob.glob(path + code + '*.tif')): # pick all tiff files from raw data folder of a data type
rast = os.path.basename(name)
if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]) == m:
g[code + str(y) + str(m).zfill(2)].append(rast) # create a list of rasters for each month
else:
pass
if len(g[code + str(y) + str(m).zfill(2)]) > 0:
# print(g[code+str(y)+str(m).zfill(2)])
# ifnull = 'in_memory/ifnull'
# arcpy sa functions that summarize the daily data to monthly data
cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2)], statistics_type=statistics,
ignore_nodata="DATA")
div = Divide(cellstats, scalingFactor) # scale factor, converts to kg/m2 10 then to m 0.001
calc = Con(div < 0.0, 0.0, div) # remove negative and null values
ifnull = Con(IsNull(calc), 0, calc) # remove null
# WKID 102039
outCS = arcpy.SpatialReference(102039) # change coordinate units to m for spatial analysis
# define save path for file
outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2) + statstype[statistics]
memoryFeature = "in_memory/myMemoryFeature"
# memoryFeature = outnm
arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS, 'BILINEAR', outcellsize,
'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')
# Execute ExtractByMask to clip snodas data to Utah watersheds
extrc = arcpy.sa.ExtractByMask(memoryFeature, area)
extrc.save(outnm)
print(outnm)
arcpy.Delete_management("in_memory")
def totalavg(code, statistics="MEAN", monthRange=[1, 12], yearRange=[2003, 2016],
path="H:/GIS/SNODAS/SNODASproj.gdb/", outpath="H:/GIS/SNODAS/SNODASproj.gdb/"):
"""Summarizes daily raster data into monthly data.
INPUT
-----
code = string with four letters represting data type to summarize (example 'BSSB')
statistics = how data will be summarized; defaults to monthly averages; options are
['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']
Most common are 'MEAN','MEDIAN', and 'SUM'
These are inputs that will be used in the ArcPy CellStatistics function.
See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation
monthRange = beginning and end months of summary statistics
yearRange = beginning and end years of summary statistics
path = location of geodatabase of data to summarize
outpath = location of geodatabase where output data should be stored
OUTPUT
------
summary raster(s) stored in outpath
"""
g = {}
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',
'MINORITY': 'MNR',
'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
# iterate over month range set here; default is 1 to 12 (Jan to Dec)
for m in range(monthRange[0], monthRange[1] + 1):
# this defines the dictionary key based on data type, month, and year
g[code + '0000' + str(m).zfill(2)] = []
# pick all tiff files from raw data folder of a data type
for rast in arcpy.ListRasters():
yrrng = range(yearRange[0], yearRange[1] + 1) # set years converted here
# create a list of rasters with the right code and month and year
if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]) == m:
g[code + '0000' + str(m).zfill(2)].append(rast) # create a list of rasters for each month
else:
pass
if len(g[code + '0000' + str(m).zfill(2)]) > 0:
# arcpy sa functions that summarize the daily data to monthly data
calc = CellStatistics(g[code + '0000' + str(m).zfill(2)], statistics_type=statistics, ignore_nodata="DATA")
calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])
print(code + '0000' + str(m).zfill(2) + statstype[statistics])
if __name__ == '__main__':
main() | def rename_polaris_snodas(path): | random_line_split |
getdata.py | """
These are data input download and prep scripts. They download and massage the data for the UBM calculations (calc.py)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import urllib
try:
# For Python 3.0 and later
import urllib.request
except ImportError:
# Fall back to Python 2's urllib2
import urllib2
import re
import glob
import os
import arcpy
from arcpy.sa import *
def get_modis(tiles, save_path, months='', years=''):
"""The following script automatically retrieves monthly MODIS16 hdf file from the ntsg website.
:param tiles: Tile number in format h##v##; based on grid from https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
:param save_path: name of output file name
:param months: months of interest; defaults to [1,12]
:param years: years of interest; defaults to [2000,2015]
:return: saves files in outpath
"""
from bs4 import BeautifulSoup
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
mons = [str(i).zfill(2) for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for tile in tiles:
for yr in yrs:
for m in mons:
base_url = "http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16A2_MONTHLY.MERRA_GMAO_1kmALB/"
dir_path = "Y{:}/M{:}/".format(yr, m)
url = base_url + dir_path
soup = BeautifulSoup(urllib2.urlopen(url), "lxml")
hdf_name = soup.find_all('', {
'href': re.compile('MOD16A2.A{:}M{:}.{:}.105'.format(yr, m, tile), re.IGNORECASE)})
files = urllib.urlretrieve(url + hdf_name[0].text, save_path + hdf_name[0].text)
print(save_path + hdf_name[0].text)
time.sleep(0.5)
def get_file_list(save_path, wld='*.105*.hdf'):
"""
Args:
save_path: path to folder where raw MODIS files are
wld: common wildcard in all of the raw MODIS files
Returns:
list of files to analyze in the raw folder
"""
return glob.glob(os.path.join(save_path, wld))
def reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):
"""Iterates through MODIS files in a folder reprojecting them.
Takes the crazy MODIS sinusoidal projection to a user defined projection.
Args:
files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))
save_path: folder to store the reprojected files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
eight_day: time span of modis file; Bool where default is true (input 8-day rasters)
proj: projection of output data by epsg number; default is nad83 zone 12
Returns:
Reprojected MODIS files
..notes:
The EPSG code for NAD83 Zone 12 is 26912.
The EPSG code for Albers Equal Area is 102003
http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf
https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<
https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125
"""
import pymodis
# dictionary to designate a directory
datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}
# dictionary to select layer from hdf file that contains the datatype
matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0], 'PLE': [0, 0, 0, 1]}
# check for file folder and make it if it doesn't exist
if not os.path.exists(save_path + datadir[data_type]):
os.makedirs(save_path + datadir[data_type])
print('created {:}'.format(save_path + datadir[data_type]))
for f in files:
year = f.split('\\')[1].split('.')[1][1:5]
v = f.split('\\')[1].split('.')[2][-2:] # parse v (cell coordinate) from hdf filename
h = f.split('\\')[1].split('.')[2][1:3] # parse h (cell coordinate) from hdf filename
# names file based on time span of input rasters; 8-day by default
if eight_day:
doy = f.split('\\')[1].split('.')[1][-3:] # parse day of year from hdf filename
fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
else:
month = f.split('\\')[1].split('.')[1][-2:] # parse month from hdf filename
fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=f, prefix=pref,
subset=matrdir[data_type],
res=1000, epsg=proj)
# [ET,LE,PET,PLE]
try:
convertsingle.run()
except:
print(fname + ' failed!')
pass
def clip_and_fix(path, outpath, data_type, area=''):
|
def merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):
"""Mosaics (merges) different MODIS cells into one layer.
"""
if monthRange == '':
monthRange = [1, 12]
if yearRange == '':
yearRange = [2000, 2015]
if outpath == '':
outpath = path
arcpy.env.workspace = path
outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
rlist = []
for rast in arcpy.ListRasters(nm + '*'):
rlist.append(rast)
try:
arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c', outCS, \
"16_BIT_UNSIGNED", "1000", "1", "LAST", "LAST")
print(path + nm + 'c')
except:
print(nm + ' failed!')
pass
def scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange=[1, 12], yearRange=[2000, 2014]):
"""
:param path: directory to unconverted modis tiles
:param out_path: directory to put output in
:param scaleby: scaling factor for MODIS data; default converts to meters/month
:param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'
:param monthRange: range of months to process data
:param yearRange: range of years to process data
:return:
"""
arcpy.CheckOutExtension("spatial")
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
calc = Divide(nm + 'c', scaleby)
calc.save(out_path + nm)
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
with tarfile.open(filepath, compression) as tfile:
filelist = tfile.getnames()
tfile.extractall(path=outfoldername)
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete tar archive {0}.".format(filepath))
return filelist
def ungz(filepath, compression='rb', deletesource=False):
"""
Given an input gz archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import gzip
with gzip.open(filepath, compression) as f:
outF = open(filepath[:-3], 'wb')
outF.write(f.read())
f.close()
outF.close()
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete gz archive {0}.".format(filepath))
return filepath[:-3]
def replace_hdr_file(hdrfile):
"""
Replace the .hdr file for a .bil raster with the correct data for Arc processing
Required: hdrfile -- filepath for .hdr file to replace/create
Output: None
"""
# hdr file replacment string
HDRFILE_STRING = "byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n\
ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n"
with open(hdrfile, 'w') as o:
o.write(HDRFILE_STRING)
def get_snodas(out_dir, months='', years=''):
"""Downloads daily SNODAS data from ftp. This is slow.
:param out_dir: directory to store downloaded SNODAS zip files
:param months: months desired for download
:param years: years desired for download
:return: saved zip files in out_dir
.. note:
Use polaris: http://nsidc.org/data/polaris/
"""
import ftplib
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
mons = [str(i).zfill(2) + "_" + monnames[i - 1] for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for yr in yrs:
for m in mons:
ftp_addr = "sidads.colorado.edu"
ftp = ftplib.FTP(ftp_addr)
ftp.login()
dir_path = "pub/DATASETS/NOAA/G02158/masked/" + yr + "/" + m + "/"
ftp.cwd(dir_path)
files = ftp.nlst()
for f in files:
if len(f) > 4:
save_file = open(out_dir + "/" + f, 'wb')
ftp.retrbinary("RETR " + f, save_file.write)
save_file.close()
print(f)
ftp.close()
def rename_polaris_snodas(path):
prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML', 'us_ssmv11050lL00T': 'SPSB',
'us_ssmv11034tS__T': 'SWEQ', 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',
'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}
for filename in os.listdir(path):
if filename.startswith("us_ssmv"):
code = prodcode[filename[0:17]]
yrsrt = filename.find('TNATS') + 5
yr = filename[yrsrt:yrsrt + 4]
mo = filename[yrsrt + 4:yrsrt + 6]
dy = filename[yrsrt + 6:yrsrt + 8]
try:
os.rename(os.path.join(path, filename), os.path.join(path, code + yr + mo + dy + filename[-4:]))
except:
pass
def snow_summary(code, scalingFactor, statistics="SUM", outcellsize='1000', monthRange='', yearRange='',
path="H:/GIS/SNODAS/SNWDS/", outpath="H:/GIS/SNODAS.gdb/", area=''):
"""
summarizes daily SNODAS data to monthly values
INPUT
-----
code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'
scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/
statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,
RANGE, STD, SUM, or VARIETY
monthRange = len 2 list; begin and end month of data you wish to analyze
yearRange = len 2 list; bengin and end year of data you wish to analyze
path = directory where raw geoTiffs are located
outpath = directory where final data will be stored
OUTPUT
------
projected and scaled monthly rasters
"""
if monthRange == '':
months = [1, 12]
if yearRange == '':
years = [2000, 2015]
g = {}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
# arcpy.env.mask = area
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',
'MINORITY': 'MNR',
'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
for y in range(yearRange[0], yearRange[1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[1] + 1): # set months converted here
g[code + str(y) + str(m).zfill(2)] = [] # this defines the dictionary key based on data type month and year
for name in sorted(
glob.glob(path + code + '*.tif')): # pick all tiff files from raw data folder of a data type
rast = os.path.basename(name)
if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]) == m:
g[code + str(y) + str(m).zfill(2)].append(rast) # create a list of rasters for each month
else:
pass
if len(g[code + str(y) + str(m).zfill(2)]) > 0:
# print(g[code+str(y)+str(m).zfill(2)])
# ifnull = 'in_memory/ifnull'
# arcpy sa functions that summarize the daily data to monthly data
cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2)], statistics_type=statistics,
ignore_nodata="DATA")
div = Divide(cellstats, scalingFactor) # scale factor, converts to kg/m2 10 then to m 0.001
calc = Con(div < 0.0, 0.0, div) # remove negative and null values
ifnull = Con(IsNull(calc), 0, calc) # remove null
# WKID 102039
outCS = arcpy.SpatialReference(102039) # change coordinate units to m for spatial analysis
# define save path for file
outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2) + statstype[statistics]
memoryFeature = "in_memory/myMemoryFeature"
# memoryFeature = outnm
arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS, 'BILINEAR', outcellsize,
'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')
# Execute ExtractByMask to clip snodas data to Utah watersheds
extrc = arcpy.sa.ExtractByMask(memoryFeature, area)
extrc.save(outnm)
print(outnm)
arcpy.Delete_management("in_memory")
def totalavg(code, statistics="MEAN", monthRange=[1, 12], yearRange=[2003, 2016],
path="H:/GIS/SNODAS/SNODASproj.gdb/", outpath="H:/GIS/SNODAS/SNODASproj.gdb/"):
"""Summarizes daily raster data into monthly data.
INPUT
-----
code = string with four letters represting data type to summarize (example 'BSSB')
statistics = how data will be summarized; defaults to monthly averages; options are
['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']
Most common are 'MEAN','MEDIAN', and 'SUM'
These are inputs that will be used in the ArcPy CellStatistics function.
See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation
monthRange = beginning and end months of summary statistics
yearRange = beginning and end years of summary statistics
path = location of geodatabase of data to summarize
outpath = location of geodatabase where output data should be stored
OUTPUT
------
summary raster(s) stored in outpath
"""
g = {}
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',
'MINORITY': 'MNR',
'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
# iterate over month range set here; default is 1 to 12 (Jan to Dec)
for m in range(monthRange[0], monthRange[1] + 1):
# this defines the dictionary key based on data type, month, and year
g[code + '0000' + str(m).zfill(2)] = []
# pick all tiff files from raw data folder of a data type
for rast in arcpy.ListRasters():
yrrng = range(yearRange[0], yearRange[1] + 1) # set years converted here
# create a list of rasters with the right code and month and year
if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]) == m:
g[code + '0000' + str(m).zfill(2)].append(rast) # create a list of rasters for each month
else:
pass
if len(g[code + '0000' + str(m).zfill(2)]) > 0:
# arcpy sa functions that summarize the daily data to monthly data
calc = CellStatistics(g[code + '0000' + str(m).zfill(2)], statistics_type=statistics, ignore_nodata="DATA")
calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])
print(code + '0000' + str(m).zfill(2) + statstype[statistics])
if __name__ == '__main__':
main()
| """Clips raster to Utah's Watersheds and makes exception values null.
Args:
path: folder of the reprojected MODIS files
outpath: ESRI gdb to store the clipped files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
area: path to polygon used to clip tiles
"""
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
arcpy.env.mask = area
arcpy.CheckOutExtension("spatial")
for rast in arcpy.ListRasters():
calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))
calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14]) | identifier_body |
nfa2regex.py | from util import AutomataError
from automata import NFA
from base import Node
from copy import copy, deepcopy
from os.path import commonprefix
DEBUG = False
LAMBDA = u'\u03bb'
PHI = u'\u00d8'
def copyDeltas(src):
out = dict()
for k in src:
out[k] = dict()
for k2 in src[k]:
out[k][k2] = copy(src[k][k2])
return out
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
def commonsuffix(seq):
def reverse(s):
out = ''
for c in reversed(s):
out += c
return out
seq = [reverse(i) for i in seq]
return reverse(commonprefix(seq))
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([len(i) == 1 for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set(['{%s}' % i for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([type(i) is Node for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][input].union(
dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)
else:
raise AutomataError(
'Delta source must be Node, not %s.' % type(node).__name__)
def remDelta(self, node, input):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError(
'Delta source must be a Node, not %s' % type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - (self._charset.union(set('()+*'))):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join(
[i.label for i in self._terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label, input or 'lambda', ','.join(
[i.label for i in self._deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
# Take care of multi-terminals
# if len(network.terminals) > 1:
## end = Node('qf')
# network.addNode(end)
# for i in copy(network.terminals):
## network.addDelta(i, '', end)
# network.remTerminal(i)
# network.addTerminal(end)
# Add a dummy start and end nodes
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
# Collapse connections
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
# Collect pliable nodes
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
# Build a distance-from-terminal table
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes) # Lazy
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
|
# Sort pliable nodes by distance from terminal
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
# Remove Node
network.remNode(node)
# Save delta
delta = copy(network.getDelta(node))
# Convert loops to regex
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loopRegex + '*'
# Remove loops
for input in copy(delta):
if delta[input] == set([node]):
del delta[input]
elif node in delta[input]:
delta[input].remove(node)
# Search lambda-closure equivalence
if '' in delta and (len(delta) != 1 or len(delta['']) != 1):
eligible = []
for dest in delta['']:
delta_temp = network.getDelta(dest)
if '' in delta_temp and node in delta_temp['']:
eligible.append(dest)
if len(eligible):
replaceNode(network, node, eligible[0])
continue
# Remove delta
try:
del network._deltas[node]
except KeyError: # No deltas remaining, had only loops
continue
if DEBUG:
print('Working on connections: ', node, delta)
# Check all possible connections through this node
deltas_temp = copyDeltas(network._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
tempDeltaDest = network.getDelta(src)[input]
if node in tempDeltaDest:
tempDeltaDest.remove(node)
if len(tempDeltaDest) == 0:
network.remDelta(src, input)
for input2 in delta:
for dest in delta[input2]:
if not (src == dest and (input + loopRegex + input2) == ''):
network.addDelta(
src, input + loopRegex + input2, dest)
if DEBUG:
print('New Delta:', src, input,
loopRegex, input2, dest, network)
# Extract common prefix/suffix
branches = network.getDelta(network.start).keys()
if len(branches) == 1:
regex = branches[0]
else:
prefix = commonprefix(branches)
suffix = commonsuffix(branches)
branches = [i[len(prefix):-len(suffix)] if len(suffix) else i[len(prefix):]
for i in branches]
branches.sort(key=len)
if len(prefix) or len(suffix):
regex = prefix + \
'(' + '+'.join([i or LAMBDA for i in branches]) + ')' + suffix
else:
regex = '+'.join([i or LAMBDA for i in branches]) or PHI
return regex
| for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node) | conditional_block |
nfa2regex.py | from util import AutomataError
from automata import NFA
from base import Node
from copy import copy, deepcopy
from os.path import commonprefix
DEBUG = False
LAMBDA = u'\u03bb'
PHI = u'\u00d8'
def copyDeltas(src):
out = dict()
for k in src:
out[k] = dict()
for k2 in src[k]:
out[k][k2] = copy(src[k][k2])
return out
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
def commonsuffix(seq):
def reverse(s):
|
seq = [reverse(i) for i in seq]
return reverse(commonprefix(seq))
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([len(i) == 1 for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set(['{%s}' % i for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([type(i) is Node for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][input].union(
dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)
else:
raise AutomataError(
'Delta source must be Node, not %s.' % type(node).__name__)
def remDelta(self, node, input):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError(
'Delta source must be a Node, not %s' % type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - (self._charset.union(set('()+*'))):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join(
[i.label for i in self._terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label, input or 'lambda', ','.join(
[i.label for i in self._deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
# Take care of multi-terminals
# if len(network.terminals) > 1:
## end = Node('qf')
# network.addNode(end)
# for i in copy(network.terminals):
## network.addDelta(i, '', end)
# network.remTerminal(i)
# network.addTerminal(end)
# Add a dummy start and end nodes
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
# Collapse connections
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
# Collect pliable nodes
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
# Build a distance-from-terminal table
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes) # Lazy
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node)
# Sort pliable nodes by distance from terminal
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
# Remove Node
network.remNode(node)
# Save delta
delta = copy(network.getDelta(node))
# Convert loops to regex
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loopRegex + '*'
# Remove loops
for input in copy(delta):
if delta[input] == set([node]):
del delta[input]
elif node in delta[input]:
delta[input].remove(node)
# Search lambda-closure equivalence
if '' in delta and (len(delta) != 1 or len(delta['']) != 1):
eligible = []
for dest in delta['']:
delta_temp = network.getDelta(dest)
if '' in delta_temp and node in delta_temp['']:
eligible.append(dest)
if len(eligible):
replaceNode(network, node, eligible[0])
continue
# Remove delta
try:
del network._deltas[node]
except KeyError: # No deltas remaining, had only loops
continue
if DEBUG:
print('Working on connections: ', node, delta)
# Check all possible connections through this node
deltas_temp = copyDeltas(network._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
tempDeltaDest = network.getDelta(src)[input]
if node in tempDeltaDest:
tempDeltaDest.remove(node)
if len(tempDeltaDest) == 0:
network.remDelta(src, input)
for input2 in delta:
for dest in delta[input2]:
if not (src == dest and (input + loopRegex + input2) == ''):
network.addDelta(
src, input + loopRegex + input2, dest)
if DEBUG:
print('New Delta:', src, input,
loopRegex, input2, dest, network)
# Extract common prefix/suffix
branches = network.getDelta(network.start).keys()
if len(branches) == 1:
regex = branches[0]
else:
prefix = commonprefix(branches)
suffix = commonsuffix(branches)
branches = [i[len(prefix):-len(suffix)] if len(suffix) else i[len(prefix):]
for i in branches]
branches.sort(key=len)
if len(prefix) or len(suffix):
regex = prefix + \
'(' + '+'.join([i or LAMBDA for i in branches]) + ')' + suffix
else:
regex = '+'.join([i or LAMBDA for i in branches]) or PHI
return regex
| out = ''
for c in reversed(s):
out += c
return out | identifier_body |
nfa2regex.py | from util import AutomataError
from automata import NFA
from base import Node
from copy import copy, deepcopy
from os.path import commonprefix
DEBUG = False
LAMBDA = u'\u03bb'
PHI = u'\u00d8'
def | (src):
out = dict()
for k in src:
out[k] = dict()
for k2 in src[k]:
out[k][k2] = copy(src[k][k2])
return out
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
def commonsuffix(seq):
def reverse(s):
out = ''
for c in reversed(s):
out += c
return out
seq = [reverse(i) for i in seq]
return reverse(commonprefix(seq))
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([len(i) == 1 for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set(['{%s}' % i for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([type(i) is Node for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][input].union(
dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)
else:
raise AutomataError(
'Delta source must be Node, not %s.' % type(node).__name__)
def remDelta(self, node, input):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError(
'Delta source must be a Node, not %s' % type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - (self._charset.union(set('()+*'))):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join(
[i.label for i in self._terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label, input or 'lambda', ','.join(
[i.label for i in self._deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
# Take care of multi-terminals
# if len(network.terminals) > 1:
## end = Node('qf')
# network.addNode(end)
# for i in copy(network.terminals):
## network.addDelta(i, '', end)
# network.remTerminal(i)
# network.addTerminal(end)
# Add a dummy start and end nodes
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
# Collapse connections
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
# Collect pliable nodes
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
# Build a distance-from-terminal table
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes) # Lazy
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node)
# Sort pliable nodes by distance from terminal
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
# Remove Node
network.remNode(node)
# Save delta
delta = copy(network.getDelta(node))
# Convert loops to regex
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loopRegex + '*'
# Remove loops
for input in copy(delta):
if delta[input] == set([node]):
del delta[input]
elif node in delta[input]:
delta[input].remove(node)
# Search lambda-closure equivalence
if '' in delta and (len(delta) != 1 or len(delta['']) != 1):
eligible = []
for dest in delta['']:
delta_temp = network.getDelta(dest)
if '' in delta_temp and node in delta_temp['']:
eligible.append(dest)
if len(eligible):
replaceNode(network, node, eligible[0])
continue
# Remove delta
try:
del network._deltas[node]
except KeyError: # No deltas remaining, had only loops
continue
if DEBUG:
print('Working on connections: ', node, delta)
# Check all possible connections through this node
deltas_temp = copyDeltas(network._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
tempDeltaDest = network.getDelta(src)[input]
if node in tempDeltaDest:
tempDeltaDest.remove(node)
if len(tempDeltaDest) == 0:
network.remDelta(src, input)
for input2 in delta:
for dest in delta[input2]:
if not (src == dest and (input + loopRegex + input2) == ''):
network.addDelta(
src, input + loopRegex + input2, dest)
if DEBUG:
print('New Delta:', src, input,
loopRegex, input2, dest, network)
# Extract common prefix/suffix
branches = network.getDelta(network.start).keys()
if len(branches) == 1:
regex = branches[0]
else:
prefix = commonprefix(branches)
suffix = commonsuffix(branches)
branches = [i[len(prefix):-len(suffix)] if len(suffix) else i[len(prefix):]
for i in branches]
branches.sort(key=len)
if len(prefix) or len(suffix):
regex = prefix + \
'(' + '+'.join([i or LAMBDA for i in branches]) + ')' + suffix
else:
regex = '+'.join([i or LAMBDA for i in branches]) or PHI
return regex
| copyDeltas | identifier_name |
nfa2regex.py | from util import AutomataError
from automata import NFA
from base import Node
from copy import copy, deepcopy
from os.path import commonprefix
DEBUG = False
LAMBDA = u'\u03bb'
PHI = u'\u00d8'
def copyDeltas(src):
out = dict()
for k in src:
out[k] = dict()
for k2 in src[k]:
out[k][k2] = copy(src[k][k2])
return out
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
def commonsuffix(seq):
def reverse(s):
out = ''
for c in reversed(s):
out += c
return out
seq = [reverse(i) for i in seq]
return reverse(commonprefix(seq))
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([len(i) == 1 for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set(['{%s}' % i for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - (self._charset.union(set('()+*'))): | if type(dest) is set and all([type(i) is Node for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][input].union(
dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)
else:
raise AutomataError(
'Delta source must be Node, not %s.' % type(node).__name__)
def remDelta(self, node, input):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError(
'Delta source must be a Node, not %s' % type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - (self._charset.union(set('()+*'))):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join(
[i.label for i in self._terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label, input or 'lambda', ','.join(
[i.label for i in self._deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
# Take care of multi-terminals
# if len(network.terminals) > 1:
## end = Node('qf')
# network.addNode(end)
# for i in copy(network.terminals):
## network.addDelta(i, '', end)
# network.remTerminal(i)
# network.addTerminal(end)
# Add a dummy start and end nodes
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
# Collapse connections
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
# Collect pliable nodes
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
# Build a distance-from-terminal table
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes) # Lazy
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node)
# Sort pliable nodes by distance from terminal
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
# Remove Node
network.remNode(node)
# Save delta
delta = copy(network.getDelta(node))
# Convert loops to regex
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loopRegex + '*'
# Remove loops
for input in copy(delta):
if delta[input] == set([node]):
del delta[input]
elif node in delta[input]:
delta[input].remove(node)
# Search lambda-closure equivalence
if '' in delta and (len(delta) != 1 or len(delta['']) != 1):
eligible = []
for dest in delta['']:
delta_temp = network.getDelta(dest)
if '' in delta_temp and node in delta_temp['']:
eligible.append(dest)
if len(eligible):
replaceNode(network, node, eligible[0])
continue
# Remove delta
try:
del network._deltas[node]
except KeyError: # No deltas remaining, had only loops
continue
if DEBUG:
print('Working on connections: ', node, delta)
# Check all possible connections through this node
deltas_temp = copyDeltas(network._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
tempDeltaDest = network.getDelta(src)[input]
if node in tempDeltaDest:
tempDeltaDest.remove(node)
if len(tempDeltaDest) == 0:
network.remDelta(src, input)
for input2 in delta:
for dest in delta[input2]:
if not (src == dest and (input + loopRegex + input2) == ''):
network.addDelta(
src, input + loopRegex + input2, dest)
if DEBUG:
print('New Delta:', src, input,
loopRegex, input2, dest, network)
# Extract common prefix/suffix
branches = network.getDelta(network.start).keys()
if len(branches) == 1:
regex = branches[0]
else:
prefix = commonprefix(branches)
suffix = commonsuffix(branches)
branches = [i[len(prefix):-len(suffix)] if len(suffix) else i[len(prefix):]
for i in branches]
branches.sort(key=len)
if len(prefix) or len(suffix):
regex = prefix + \
'(' + '+'.join([i or LAMBDA for i in branches]) + ')' + suffix
else:
regex = '+'.join([i or LAMBDA for i in branches]) or PHI
return regex | raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node: | random_line_split |
metrics.py | import math
import sys
import os
import numpy as np
import requests
import zipfile
from collections import Counter
from clint.textui import progress
from metrics.bleu_metrics import BleuMetrics
from metrics.distinct_metrics import DistinctMetrics
from metrics.entropy_metrics import EntropyMetrics
from metrics.embedding_metrics import EmbeddingMetrics
from metrics.divergence_metrics import DivergenceMetrics
from metrics.coherence_metrics import CoherenceMetrics
from utils import utils
class Metrics:
def __init__(self, config):
'''
Params:
:config: A Config instance containing arguments.
'''
self.project_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..')
self.test_responses = os.path.join(self.project_path,
config.test_responses)
if not os.path.exists(self.test_responses):
print('Can\' find test responses at ' + self.test_responses +
', please specify the path.')
sys.exit()
self.config = config
self.distro = {'uni': {}, 'bi': {}}
self.vocab = {}
# Save all filenames of test responses and build output path.
filenames = []
if os.path.isdir(self.test_responses):
self.input_dir = self.test_responses
self.output_path = os.path.join(self.test_responses, 'metrics.txt')
for filename in os.listdir(self.test_responses):
filenames.append(os.path.join(self.test_responses, filename))
else:
self.input_dir = '/'.join(self.test_responses.split('/')[:-1])
filenames.append(self.test_responses)
self.output_path = os.path.join(self.input_dir, 'metrics.txt')
# Initialize metrics and a bool dict for which metrics should be selected.
self.which_metrics = dict(config.metrics)
self.metrics = dict([(name, dict(
[(key, []) for key in config.metrics])) for name in filenames])
# Absolute path.
self.train_source = os.path.join(self.project_path, config.train_source)
self.test_source = os.path.join(self.project_path, config.test_source)
self.test_target = os.path.join(self.project_path, config.test_target)
self.text_vocab = os.path.join(self.project_path, config.text_vocab)
self.vector_vocab = os.path.join(self.project_path, config.vector_vocab)
# Check which metrics we can compute.
if not os.path.exists(self.train_source):
print('Can\'t find train data at ' + self.train_source + ', entropy ' +
'metrics, \'coherence\' and \'embedding-average\' won\'t be computed.')
self.delete_from_metrics(['entropy', 'average', 'coherence'])
if not os.path.exists(self.test_source):
print('Can\' find test sources at ' + self.test_source +
', \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence'])
if not os.path.exists(self.test_target):
print('Can\' find test targets at ' + self.test_target +
', embedding, kl divergence, and bleu metrics won\'t be computed.')
self.delete_from_metrics(['kl-div', 'embedding', 'bleu'])
if not os.path.exists(self.vector_vocab):
print('File containing word vectors not found in ' + self.vector_vocab)
print('If you would like to use FastText embeddings press \'y\'')
if input() == 'y':
self.get_fast_text_embeddings()
else:
print('Embedding metrics and \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence', 'embedding'])
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
if os.path.exists(self.train_source):
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
# Build vocab and train data distribution if needed.
if os.path.exists(self.text_vocab):
self.build_vocab()
if os.path.exists(self.train_source):
utils.build_distro(self.distro, self.train_source, self.vocab, True)
self.objects = {}
self.objects['distinct'] = DistinctMetrics(self.vocab)
# Initialize metric objects.
if self.these_metrics('entropy'):
self.objects['entropy'] = EntropyMetrics(self.vocab, self.distro)
if self.these_metrics('kl-div'):
self.objects['divergence'] = DivergenceMetrics(self.vocab,
self.test_target)
if self.these_metrics('embedding'):
self.objects['embedding'] = EmbeddingMetrics(
self.vocab,
self.distro['uni'],
self.emb_dim,
self.which_metrics['embedding-average'])
if self.these_metrics('coherence'):
self.objects['coherence'] = CoherenceMetrics(
self.vocab, self.distro['uni'], self.emb_dim)
if self.these_metrics('bleu'):
self.objects['bleu'] = BleuMetrics(config.bleu_smoothing)
# Whether these metrics are activated.
def these_metrics(self, metric):
activated = False
for key in self.which_metrics:
if metric in key and self.which_metrics[key]:
activated = True
return activated
# Download data from fasttext.
def download_fasttext(self):
# Open the url and download the data with progress bars.
data_stream = requests.get('https://dl.fbaipublicfiles.com/fasttext/' + | total_length = int(data_stream.headers.get('content-length'))
for chunk in progress.bar(data_stream.iter_content(chunk_size=1024),
expected_size=total_length / 1024 + 1):
if chunk:
file.write(chunk)
file.flush()
# Extract file.
zip_file = zipfile.ZipFile(zipped_path, 'r')
zip_file.extractall(self.input_dir)
zip_file.close()
# Generate a vocab from data files.
def get_vocab(self):
vocab = []
if not os.path.exists(self.train_source):
print('No train data, can\'t build vocab file.')
sys.exit()
with open(self.text_vocab, 'w', encoding='utf-8') as file:
with open(self.train_source, encoding='utf-8') as in_file:
for line in in_file:
vocab.extend(line.split())
file.write('\n'.join(list(Counter(vocab))))
# Download FastText word embeddings.
def get_fast_text_embeddings(self):
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
fasttext_path = os.path.join(self.input_dir, 'cc.' + self.config.lang + '.300.vec')
if not os.path.exists(fasttext_path):
self.download_fasttext()
vocab = [line.strip('\n') for line in open(self.text_vocab, encoding='utf-8')]
self.vector_vocab = os.path.join(self.input_dir, 'vocab.npy')
# Save the vectors for words in the vocab.
with open(fasttext_path, errors='ignore', encoding='utf-8') as in_file:
with open(self.vector_vocab, 'w', encoding='utf-8') as out_file:
vectors = {}
for line in in_file:
tokens = line.strip().split()
if len(tokens) == 301:
vectors[tokens[0]] = line
elif tokens[1] == '»':
vectors[tokens[0]] = tokens[0] + ' ' + ' '.join(tokens[2:]) + '\n'
for word in vocab:
try:
out_file.write(vectors[word])
except KeyError:
pass
# Set to 0 a given list of metrics in the which_metrics dict.
def delete_from_metrics(self, metric_list):
for key in self.which_metrics:
for metric in metric_list:
if metric in key:
self.which_metrics[key] = 0
# Build a vocabulary.
def build_vocab(self):
# Build the word vectors if possible.
try:
with open(self.vector_vocab, encoding='utf-8') as file:
for line in file:
tokens = line.split()
self.vocab[tokens[0]] = [np.array(list(map(float, tokens[1:])))]
self.emb_dim = list(self.vocab.values())[0][0].size
except FileNotFoundError:
self.emb_dim = 1
# Extend the remaining vocab.
with open(self.text_vocab, encoding='utf-8') as file:
for line in file:
line = line.strip()
if not self.vocab.get(line):
self.vocab[line] = [np.zeros(self.emb_dim)]
# Compute all metrics for all files.
def run(self):
for filename in self.metrics:
responses = open(filename, encoding='utf-8')
# If we don't need these just open a dummy file.
sources = open(self.test_source, encoding='utf-8') \
if os.path.exists(self.test_source) else open(filename, encoding='utf-8')
gt_responses = open(self.test_target, encoding='utf-8') \
if os.path.exists(self.test_target) else open(filename, encoding='utf-8')
# Some metrics require pre-computation.
self.objects['distinct'].calculate_metrics(filename)
if self.objects.get('divergence'):
self.objects['divergence'].setup(filename)
# Loop through the test and ground truth responses, calculate metrics.
for source, response, target in zip(sources, responses, gt_responses):
gt_words = target.split()
resp_words = response.split()
source_words = source.split()
self.metrics[filename]['length'].append(len(resp_words))
for key in self.objects:
self.objects[key].update_metrics(resp_words, gt_words, source_words)
sources.close()
gt_responses.close()
responses.close()
# Save individual metrics to self.metrics
for key in self.objects:
for metric_name, metric in self.objects[key].metrics.items():
self.metrics[filename][metric_name] = list(metric)
self.objects[key].metrics[metric_name].clear()
self.write_metrics()
# Compute mean, std and confidence, and write all metrics to output file.
def write_metrics(self):
with open(self.output_path, 'w') as output:
output.write('filename ')
output.write(' '.join([k for k, v in self.which_metrics.items() if v]))
output.write('\n')
''' The first row contains the names of the metrics, then each row
contains the name of the file and its metrics separated by spaces.
Each metric contains 3 numbers separated by ',': mean,std,confidence. '''
for filename, metrics in self.metrics.items():
output.write(filename.split('/')[-1] + ' ')
for metric_name, metric in metrics.items():
if self.which_metrics[metric_name]:
length = len(metric)
avg = sum(metric) / length
std = np.std(metric) if length > 1 else 0
confidence = self.config.t * std / math.sqrt(length)
# Write the metric to file.
m = str(avg) + ',' + str(std) + ',' + str(confidence)
output.write(m + ' ')
output.write('\n') | 'vectors-english/wiki-news-300d-1M.vec.zip', stream=True)
zipped_path = os.path.join(self.input_dir, 'fasttext.zip')
with open(zipped_path, 'wb') as file: | random_line_split |
metrics.py | import math
import sys
import os
import numpy as np
import requests
import zipfile
from collections import Counter
from clint.textui import progress
from metrics.bleu_metrics import BleuMetrics
from metrics.distinct_metrics import DistinctMetrics
from metrics.entropy_metrics import EntropyMetrics
from metrics.embedding_metrics import EmbeddingMetrics
from metrics.divergence_metrics import DivergenceMetrics
from metrics.coherence_metrics import CoherenceMetrics
from utils import utils
class Metrics:
def __init__(self, config):
'''
Params:
:config: A Config instance containing arguments.
'''
self.project_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..')
self.test_responses = os.path.join(self.project_path,
config.test_responses)
if not os.path.exists(self.test_responses):
print('Can\' find test responses at ' + self.test_responses +
', please specify the path.')
sys.exit()
self.config = config
self.distro = {'uni': {}, 'bi': {}}
self.vocab = {}
# Save all filenames of test responses and build output path.
filenames = []
if os.path.isdir(self.test_responses):
self.input_dir = self.test_responses
self.output_path = os.path.join(self.test_responses, 'metrics.txt')
for filename in os.listdir(self.test_responses):
filenames.append(os.path.join(self.test_responses, filename))
else:
self.input_dir = '/'.join(self.test_responses.split('/')[:-1])
filenames.append(self.test_responses)
self.output_path = os.path.join(self.input_dir, 'metrics.txt')
# Initialize metrics and a bool dict for which metrics should be selected.
self.which_metrics = dict(config.metrics)
self.metrics = dict([(name, dict(
[(key, []) for key in config.metrics])) for name in filenames])
# Absolute path.
self.train_source = os.path.join(self.project_path, config.train_source)
self.test_source = os.path.join(self.project_path, config.test_source)
self.test_target = os.path.join(self.project_path, config.test_target)
self.text_vocab = os.path.join(self.project_path, config.text_vocab)
self.vector_vocab = os.path.join(self.project_path, config.vector_vocab)
# Check which metrics we can compute.
if not os.path.exists(self.train_source):
print('Can\'t find train data at ' + self.train_source + ', entropy ' +
'metrics, \'coherence\' and \'embedding-average\' won\'t be computed.')
self.delete_from_metrics(['entropy', 'average', 'coherence'])
if not os.path.exists(self.test_source):
print('Can\' find test sources at ' + self.test_source +
', \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence'])
if not os.path.exists(self.test_target):
|
if not os.path.exists(self.vector_vocab):
print('File containing word vectors not found in ' + self.vector_vocab)
print('If you would like to use FastText embeddings press \'y\'')
if input() == 'y':
self.get_fast_text_embeddings()
else:
print('Embedding metrics and \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence', 'embedding'])
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
if os.path.exists(self.train_source):
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
# Build vocab and train data distribution if needed.
if os.path.exists(self.text_vocab):
self.build_vocab()
if os.path.exists(self.train_source):
utils.build_distro(self.distro, self.train_source, self.vocab, True)
self.objects = {}
self.objects['distinct'] = DistinctMetrics(self.vocab)
# Initialize metric objects.
if self.these_metrics('entropy'):
self.objects['entropy'] = EntropyMetrics(self.vocab, self.distro)
if self.these_metrics('kl-div'):
self.objects['divergence'] = DivergenceMetrics(self.vocab,
self.test_target)
if self.these_metrics('embedding'):
self.objects['embedding'] = EmbeddingMetrics(
self.vocab,
self.distro['uni'],
self.emb_dim,
self.which_metrics['embedding-average'])
if self.these_metrics('coherence'):
self.objects['coherence'] = CoherenceMetrics(
self.vocab, self.distro['uni'], self.emb_dim)
if self.these_metrics('bleu'):
self.objects['bleu'] = BleuMetrics(config.bleu_smoothing)
# Whether these metrics are activated.
def these_metrics(self, metric):
activated = False
for key in self.which_metrics:
if metric in key and self.which_metrics[key]:
activated = True
return activated
# Download data from fasttext.
def download_fasttext(self):
# Open the url and download the data with progress bars.
data_stream = requests.get('https://dl.fbaipublicfiles.com/fasttext/' +
'vectors-english/wiki-news-300d-1M.vec.zip', stream=True)
zipped_path = os.path.join(self.input_dir, 'fasttext.zip')
with open(zipped_path, 'wb') as file:
total_length = int(data_stream.headers.get('content-length'))
for chunk in progress.bar(data_stream.iter_content(chunk_size=1024),
expected_size=total_length / 1024 + 1):
if chunk:
file.write(chunk)
file.flush()
# Extract file.
zip_file = zipfile.ZipFile(zipped_path, 'r')
zip_file.extractall(self.input_dir)
zip_file.close()
# Generate a vocab from data files.
def get_vocab(self):
vocab = []
if not os.path.exists(self.train_source):
print('No train data, can\'t build vocab file.')
sys.exit()
with open(self.text_vocab, 'w', encoding='utf-8') as file:
with open(self.train_source, encoding='utf-8') as in_file:
for line in in_file:
vocab.extend(line.split())
file.write('\n'.join(list(Counter(vocab))))
# Download FastText word embeddings.
def get_fast_text_embeddings(self):
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
fasttext_path = os.path.join(self.input_dir, 'cc.' + self.config.lang + '.300.vec')
if not os.path.exists(fasttext_path):
self.download_fasttext()
vocab = [line.strip('\n') for line in open(self.text_vocab, encoding='utf-8')]
self.vector_vocab = os.path.join(self.input_dir, 'vocab.npy')
# Save the vectors for words in the vocab.
with open(fasttext_path, errors='ignore', encoding='utf-8') as in_file:
with open(self.vector_vocab, 'w', encoding='utf-8') as out_file:
vectors = {}
for line in in_file:
tokens = line.strip().split()
if len(tokens) == 301:
vectors[tokens[0]] = line
elif tokens[1] == '»':
vectors[tokens[0]] = tokens[0] + ' ' + ' '.join(tokens[2:]) + '\n'
for word in vocab:
try:
out_file.write(vectors[word])
except KeyError:
pass
# Set to 0 a given list of metrics in the which_metrics dict.
def delete_from_metrics(self, metric_list):
for key in self.which_metrics:
for metric in metric_list:
if metric in key:
self.which_metrics[key] = 0
# Build a vocabulary.
def build_vocab(self):
# Build the word vectors if possible.
try:
with open(self.vector_vocab, encoding='utf-8') as file:
for line in file:
tokens = line.split()
self.vocab[tokens[0]] = [np.array(list(map(float, tokens[1:])))]
self.emb_dim = list(self.vocab.values())[0][0].size
except FileNotFoundError:
self.emb_dim = 1
# Extend the remaining vocab.
with open(self.text_vocab, encoding='utf-8') as file:
for line in file:
line = line.strip()
if not self.vocab.get(line):
self.vocab[line] = [np.zeros(self.emb_dim)]
# Compute all metrics for all files.
def run(self):
for filename in self.metrics:
responses = open(filename, encoding='utf-8')
# If we don't need these just open a dummy file.
sources = open(self.test_source, encoding='utf-8') \
if os.path.exists(self.test_source) else open(filename, encoding='utf-8')
gt_responses = open(self.test_target, encoding='utf-8') \
if os.path.exists(self.test_target) else open(filename, encoding='utf-8')
# Some metrics require pre-computation.
self.objects['distinct'].calculate_metrics(filename)
if self.objects.get('divergence'):
self.objects['divergence'].setup(filename)
# Loop through the test and ground truth responses, calculate metrics.
for source, response, target in zip(sources, responses, gt_responses):
gt_words = target.split()
resp_words = response.split()
source_words = source.split()
self.metrics[filename]['length'].append(len(resp_words))
for key in self.objects:
self.objects[key].update_metrics(resp_words, gt_words, source_words)
sources.close()
gt_responses.close()
responses.close()
# Save individual metrics to self.metrics
for key in self.objects:
for metric_name, metric in self.objects[key].metrics.items():
self.metrics[filename][metric_name] = list(metric)
self.objects[key].metrics[metric_name].clear()
self.write_metrics()
# Compute mean, std and confidence, and write all metrics to output file.
def write_metrics(self):
with open(self.output_path, 'w') as output:
output.write('filename ')
output.write(' '.join([k for k, v in self.which_metrics.items() if v]))
output.write('\n')
''' The first row contains the names of the metrics, then each row
contains the name of the file and its metrics separated by spaces.
Each metric contains 3 numbers separated by ',': mean,std,confidence. '''
for filename, metrics in self.metrics.items():
output.write(filename.split('/')[-1] + ' ')
for metric_name, metric in metrics.items():
if self.which_metrics[metric_name]:
length = len(metric)
avg = sum(metric) / length
std = np.std(metric) if length > 1 else 0
confidence = self.config.t * std / math.sqrt(length)
# Write the metric to file.
m = str(avg) + ',' + str(std) + ',' + str(confidence)
output.write(m + ' ')
output.write('\n')
| print('Can\' find test targets at ' + self.test_target +
', embedding, kl divergence, and bleu metrics won\'t be computed.')
self.delete_from_metrics(['kl-div', 'embedding', 'bleu']) | conditional_block |
metrics.py | import math
import sys
import os
import numpy as np
import requests
import zipfile
from collections import Counter
from clint.textui import progress
from metrics.bleu_metrics import BleuMetrics
from metrics.distinct_metrics import DistinctMetrics
from metrics.entropy_metrics import EntropyMetrics
from metrics.embedding_metrics import EmbeddingMetrics
from metrics.divergence_metrics import DivergenceMetrics
from metrics.coherence_metrics import CoherenceMetrics
from utils import utils
class Metrics:
def __init__(self, config):
'''
Params:
:config: A Config instance containing arguments.
'''
self.project_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..')
self.test_responses = os.path.join(self.project_path,
config.test_responses)
if not os.path.exists(self.test_responses):
print('Can\' find test responses at ' + self.test_responses +
', please specify the path.')
sys.exit()
self.config = config
self.distro = {'uni': {}, 'bi': {}}
self.vocab = {}
# Save all filenames of test responses and build output path.
filenames = []
if os.path.isdir(self.test_responses):
self.input_dir = self.test_responses
self.output_path = os.path.join(self.test_responses, 'metrics.txt')
for filename in os.listdir(self.test_responses):
filenames.append(os.path.join(self.test_responses, filename))
else:
self.input_dir = '/'.join(self.test_responses.split('/')[:-1])
filenames.append(self.test_responses)
self.output_path = os.path.join(self.input_dir, 'metrics.txt')
# Initialize metrics and a bool dict for which metrics should be selected.
self.which_metrics = dict(config.metrics)
self.metrics = dict([(name, dict(
[(key, []) for key in config.metrics])) for name in filenames])
# Absolute path.
self.train_source = os.path.join(self.project_path, config.train_source)
self.test_source = os.path.join(self.project_path, config.test_source)
self.test_target = os.path.join(self.project_path, config.test_target)
self.text_vocab = os.path.join(self.project_path, config.text_vocab)
self.vector_vocab = os.path.join(self.project_path, config.vector_vocab)
# Check which metrics we can compute.
if not os.path.exists(self.train_source):
print('Can\'t find train data at ' + self.train_source + ', entropy ' +
'metrics, \'coherence\' and \'embedding-average\' won\'t be computed.')
self.delete_from_metrics(['entropy', 'average', 'coherence'])
if not os.path.exists(self.test_source):
print('Can\' find test sources at ' + self.test_source +
', \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence'])
if not os.path.exists(self.test_target):
print('Can\' find test targets at ' + self.test_target +
', embedding, kl divergence, and bleu metrics won\'t be computed.')
self.delete_from_metrics(['kl-div', 'embedding', 'bleu'])
if not os.path.exists(self.vector_vocab):
print('File containing word vectors not found in ' + self.vector_vocab)
print('If you would like to use FastText embeddings press \'y\'')
if input() == 'y':
self.get_fast_text_embeddings()
else:
print('Embedding metrics and \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence', 'embedding'])
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
if os.path.exists(self.train_source):
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
# Build vocab and train data distribution if needed.
if os.path.exists(self.text_vocab):
self.build_vocab()
if os.path.exists(self.train_source):
utils.build_distro(self.distro, self.train_source, self.vocab, True)
self.objects = {}
self.objects['distinct'] = DistinctMetrics(self.vocab)
# Initialize metric objects.
if self.these_metrics('entropy'):
self.objects['entropy'] = EntropyMetrics(self.vocab, self.distro)
if self.these_metrics('kl-div'):
self.objects['divergence'] = DivergenceMetrics(self.vocab,
self.test_target)
if self.these_metrics('embedding'):
self.objects['embedding'] = EmbeddingMetrics(
self.vocab,
self.distro['uni'],
self.emb_dim,
self.which_metrics['embedding-average'])
if self.these_metrics('coherence'):
self.objects['coherence'] = CoherenceMetrics(
self.vocab, self.distro['uni'], self.emb_dim)
if self.these_metrics('bleu'):
self.objects['bleu'] = BleuMetrics(config.bleu_smoothing)
# Whether these metrics are activated.
def these_metrics(self, metric):
activated = False
for key in self.which_metrics:
if metric in key and self.which_metrics[key]:
activated = True
return activated
# Download data from fasttext.
def download_fasttext(self):
# Open the url and download the data with progress bars.
data_stream = requests.get('https://dl.fbaipublicfiles.com/fasttext/' +
'vectors-english/wiki-news-300d-1M.vec.zip', stream=True)
zipped_path = os.path.join(self.input_dir, 'fasttext.zip')
with open(zipped_path, 'wb') as file:
total_length = int(data_stream.headers.get('content-length'))
for chunk in progress.bar(data_stream.iter_content(chunk_size=1024),
expected_size=total_length / 1024 + 1):
if chunk:
file.write(chunk)
file.flush()
# Extract file.
zip_file = zipfile.ZipFile(zipped_path, 'r')
zip_file.extractall(self.input_dir)
zip_file.close()
# Generate a vocab from data files.
def get_vocab(self):
vocab = []
if not os.path.exists(self.train_source):
print('No train data, can\'t build vocab file.')
sys.exit()
with open(self.text_vocab, 'w', encoding='utf-8') as file:
with open(self.train_source, encoding='utf-8') as in_file:
for line in in_file:
vocab.extend(line.split())
file.write('\n'.join(list(Counter(vocab))))
# Download FastText word embeddings.
def get_fast_text_embeddings(self):
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
fasttext_path = os.path.join(self.input_dir, 'cc.' + self.config.lang + '.300.vec')
if not os.path.exists(fasttext_path):
self.download_fasttext()
vocab = [line.strip('\n') for line in open(self.text_vocab, encoding='utf-8')]
self.vector_vocab = os.path.join(self.input_dir, 'vocab.npy')
# Save the vectors for words in the vocab.
with open(fasttext_path, errors='ignore', encoding='utf-8') as in_file:
with open(self.vector_vocab, 'w', encoding='utf-8') as out_file:
vectors = {}
for line in in_file:
tokens = line.strip().split()
if len(tokens) == 301:
vectors[tokens[0]] = line
elif tokens[1] == '»':
vectors[tokens[0]] = tokens[0] + ' ' + ' '.join(tokens[2:]) + '\n'
for word in vocab:
try:
out_file.write(vectors[word])
except KeyError:
pass
# Set to 0 a given list of metrics in the which_metrics dict.
def delete_from_metrics(self, metric_list):
for key in self.which_metrics:
for metric in metric_list:
if metric in key:
self.which_metrics[key] = 0
# Build a vocabulary.
def build_vocab(self):
# Build the word vectors if possible.
try:
with open(self.vector_vocab, encoding='utf-8') as file:
for line in file:
tokens = line.split()
self.vocab[tokens[0]] = [np.array(list(map(float, tokens[1:])))]
self.emb_dim = list(self.vocab.values())[0][0].size
except FileNotFoundError:
self.emb_dim = 1
# Extend the remaining vocab.
with open(self.text_vocab, encoding='utf-8') as file:
for line in file:
line = line.strip()
if not self.vocab.get(line):
self.vocab[line] = [np.zeros(self.emb_dim)]
# Compute all metrics for all files.
def r | self):
for filename in self.metrics:
responses = open(filename, encoding='utf-8')
# If we don't need these just open a dummy file.
sources = open(self.test_source, encoding='utf-8') \
if os.path.exists(self.test_source) else open(filename, encoding='utf-8')
gt_responses = open(self.test_target, encoding='utf-8') \
if os.path.exists(self.test_target) else open(filename, encoding='utf-8')
# Some metrics require pre-computation.
self.objects['distinct'].calculate_metrics(filename)
if self.objects.get('divergence'):
self.objects['divergence'].setup(filename)
# Loop through the test and ground truth responses, calculate metrics.
for source, response, target in zip(sources, responses, gt_responses):
gt_words = target.split()
resp_words = response.split()
source_words = source.split()
self.metrics[filename]['length'].append(len(resp_words))
for key in self.objects:
self.objects[key].update_metrics(resp_words, gt_words, source_words)
sources.close()
gt_responses.close()
responses.close()
# Save individual metrics to self.metrics
for key in self.objects:
for metric_name, metric in self.objects[key].metrics.items():
self.metrics[filename][metric_name] = list(metric)
self.objects[key].metrics[metric_name].clear()
self.write_metrics()
# Compute mean, std and confidence, and write all metrics to output file.
def write_metrics(self):
with open(self.output_path, 'w') as output:
output.write('filename ')
output.write(' '.join([k for k, v in self.which_metrics.items() if v]))
output.write('\n')
''' The first row contains the names of the metrics, then each row
contains the name of the file and its metrics separated by spaces.
Each metric contains 3 numbers separated by ',': mean,std,confidence. '''
for filename, metrics in self.metrics.items():
output.write(filename.split('/')[-1] + ' ')
for metric_name, metric in metrics.items():
if self.which_metrics[metric_name]:
length = len(metric)
avg = sum(metric) / length
std = np.std(metric) if length > 1 else 0
confidence = self.config.t * std / math.sqrt(length)
# Write the metric to file.
m = str(avg) + ',' + str(std) + ',' + str(confidence)
output.write(m + ' ')
output.write('\n')
| un( | identifier_name |
metrics.py | import math
import sys
import os
import numpy as np
import requests
import zipfile
from collections import Counter
from clint.textui import progress
from metrics.bleu_metrics import BleuMetrics
from metrics.distinct_metrics import DistinctMetrics
from metrics.entropy_metrics import EntropyMetrics
from metrics.embedding_metrics import EmbeddingMetrics
from metrics.divergence_metrics import DivergenceMetrics
from metrics.coherence_metrics import CoherenceMetrics
from utils import utils
class Metrics:
def __init__(self, config):
'''
Params:
:config: A Config instance containing arguments.
'''
self.project_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..')
self.test_responses = os.path.join(self.project_path,
config.test_responses)
if not os.path.exists(self.test_responses):
print('Can\' find test responses at ' + self.test_responses +
', please specify the path.')
sys.exit()
self.config = config
self.distro = {'uni': {}, 'bi': {}}
self.vocab = {}
# Save all filenames of test responses and build output path.
filenames = []
if os.path.isdir(self.test_responses):
self.input_dir = self.test_responses
self.output_path = os.path.join(self.test_responses, 'metrics.txt')
for filename in os.listdir(self.test_responses):
filenames.append(os.path.join(self.test_responses, filename))
else:
self.input_dir = '/'.join(self.test_responses.split('/')[:-1])
filenames.append(self.test_responses)
self.output_path = os.path.join(self.input_dir, 'metrics.txt')
# Initialize metrics and a bool dict for which metrics should be selected.
self.which_metrics = dict(config.metrics)
self.metrics = dict([(name, dict(
[(key, []) for key in config.metrics])) for name in filenames])
# Absolute path.
self.train_source = os.path.join(self.project_path, config.train_source)
self.test_source = os.path.join(self.project_path, config.test_source)
self.test_target = os.path.join(self.project_path, config.test_target)
self.text_vocab = os.path.join(self.project_path, config.text_vocab)
self.vector_vocab = os.path.join(self.project_path, config.vector_vocab)
# Check which metrics we can compute.
if not os.path.exists(self.train_source):
print('Can\'t find train data at ' + self.train_source + ', entropy ' +
'metrics, \'coherence\' and \'embedding-average\' won\'t be computed.')
self.delete_from_metrics(['entropy', 'average', 'coherence'])
if not os.path.exists(self.test_source):
print('Can\' find test sources at ' + self.test_source +
', \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence'])
if not os.path.exists(self.test_target):
print('Can\' find test targets at ' + self.test_target +
', embedding, kl divergence, and bleu metrics won\'t be computed.')
self.delete_from_metrics(['kl-div', 'embedding', 'bleu'])
if not os.path.exists(self.vector_vocab):
print('File containing word vectors not found in ' + self.vector_vocab)
print('If you would like to use FastText embeddings press \'y\'')
if input() == 'y':
self.get_fast_text_embeddings()
else:
print('Embedding metrics and \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence', 'embedding'])
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
if os.path.exists(self.train_source):
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
# Build vocab and train data distribution if needed.
if os.path.exists(self.text_vocab):
self.build_vocab()
if os.path.exists(self.train_source):
utils.build_distro(self.distro, self.train_source, self.vocab, True)
self.objects = {}
self.objects['distinct'] = DistinctMetrics(self.vocab)
# Initialize metric objects.
if self.these_metrics('entropy'):
self.objects['entropy'] = EntropyMetrics(self.vocab, self.distro)
if self.these_metrics('kl-div'):
self.objects['divergence'] = DivergenceMetrics(self.vocab,
self.test_target)
if self.these_metrics('embedding'):
self.objects['embedding'] = EmbeddingMetrics(
self.vocab,
self.distro['uni'],
self.emb_dim,
self.which_metrics['embedding-average'])
if self.these_metrics('coherence'):
self.objects['coherence'] = CoherenceMetrics(
self.vocab, self.distro['uni'], self.emb_dim)
if self.these_metrics('bleu'):
self.objects['bleu'] = BleuMetrics(config.bleu_smoothing)
# Whether these metrics are activated.
def these_metrics(self, metric):
activated = False
for key in self.which_metrics:
if metric in key and self.which_metrics[key]:
activated = True
return activated
# Download data from fasttext.
def download_fasttext(self):
# Open the url and download the data with progress bars.
data_stream = requests.get('https://dl.fbaipublicfiles.com/fasttext/' +
'vectors-english/wiki-news-300d-1M.vec.zip', stream=True)
zipped_path = os.path.join(self.input_dir, 'fasttext.zip')
with open(zipped_path, 'wb') as file:
total_length = int(data_stream.headers.get('content-length'))
for chunk in progress.bar(data_stream.iter_content(chunk_size=1024),
expected_size=total_length / 1024 + 1):
if chunk:
file.write(chunk)
file.flush()
# Extract file.
zip_file = zipfile.ZipFile(zipped_path, 'r')
zip_file.extractall(self.input_dir)
zip_file.close()
# Generate a vocab from data files.
def get_vocab(self):
vocab = []
if not os.path.exists(self.train_source):
print('No train data, can\'t build vocab file.')
sys.exit()
with open(self.text_vocab, 'w', encoding='utf-8') as file:
with open(self.train_source, encoding='utf-8') as in_file:
for line in in_file:
vocab.extend(line.split())
file.write('\n'.join(list(Counter(vocab))))
# Download FastText word embeddings.
def get_fast_text_embeddings(self):
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
fasttext_path = os.path.join(self.input_dir, 'cc.' + self.config.lang + '.300.vec')
if not os.path.exists(fasttext_path):
self.download_fasttext()
vocab = [line.strip('\n') for line in open(self.text_vocab, encoding='utf-8')]
self.vector_vocab = os.path.join(self.input_dir, 'vocab.npy')
# Save the vectors for words in the vocab.
with open(fasttext_path, errors='ignore', encoding='utf-8') as in_file:
with open(self.vector_vocab, 'w', encoding='utf-8') as out_file:
vectors = {}
for line in in_file:
tokens = line.strip().split()
if len(tokens) == 301:
vectors[tokens[0]] = line
elif tokens[1] == '»':
vectors[tokens[0]] = tokens[0] + ' ' + ' '.join(tokens[2:]) + '\n'
for word in vocab:
try:
out_file.write(vectors[word])
except KeyError:
pass
# Set to 0 a given list of metrics in the which_metrics dict.
def delete_from_metrics(self, metric_list):
for key in self.which_metrics:
for metric in metric_list:
if metric in key:
self.which_metrics[key] = 0
# Build a vocabulary.
def build_vocab(self):
# Build the word vectors if possible.
try:
with open(self.vector_vocab, encoding='utf-8') as file:
for line in file:
tokens = line.split()
self.vocab[tokens[0]] = [np.array(list(map(float, tokens[1:])))]
self.emb_dim = list(self.vocab.values())[0][0].size
except FileNotFoundError:
self.emb_dim = 1
# Extend the remaining vocab.
with open(self.text_vocab, encoding='utf-8') as file:
for line in file:
line = line.strip()
if not self.vocab.get(line):
self.vocab[line] = [np.zeros(self.emb_dim)]
# Compute all metrics for all files.
def run(self):
for filename in self.metrics:
responses = open(filename, encoding='utf-8')
# If we don't need these just open a dummy file.
sources = open(self.test_source, encoding='utf-8') \
if os.path.exists(self.test_source) else open(filename, encoding='utf-8')
gt_responses = open(self.test_target, encoding='utf-8') \
if os.path.exists(self.test_target) else open(filename, encoding='utf-8')
# Some metrics require pre-computation.
self.objects['distinct'].calculate_metrics(filename)
if self.objects.get('divergence'):
self.objects['divergence'].setup(filename)
# Loop through the test and ground truth responses, calculate metrics.
for source, response, target in zip(sources, responses, gt_responses):
gt_words = target.split()
resp_words = response.split()
source_words = source.split()
self.metrics[filename]['length'].append(len(resp_words))
for key in self.objects:
self.objects[key].update_metrics(resp_words, gt_words, source_words)
sources.close()
gt_responses.close()
responses.close()
# Save individual metrics to self.metrics
for key in self.objects:
for metric_name, metric in self.objects[key].metrics.items():
self.metrics[filename][metric_name] = list(metric)
self.objects[key].metrics[metric_name].clear()
self.write_metrics()
# Compute mean, std and confidence, and write all metrics to output file.
def write_metrics(self):
w | ith open(self.output_path, 'w') as output:
output.write('filename ')
output.write(' '.join([k for k, v in self.which_metrics.items() if v]))
output.write('\n')
''' The first row contains the names of the metrics, then each row
contains the name of the file and its metrics separated by spaces.
Each metric contains 3 numbers separated by ',': mean,std,confidence. '''
for filename, metrics in self.metrics.items():
output.write(filename.split('/')[-1] + ' ')
for metric_name, metric in metrics.items():
if self.which_metrics[metric_name]:
length = len(metric)
avg = sum(metric) / length
std = np.std(metric) if length > 1 else 0
confidence = self.config.t * std / math.sqrt(length)
# Write the metric to file.
m = str(avg) + ',' + str(std) + ',' + str(confidence)
output.write(m + ' ')
output.write('\n')
| identifier_body |
|
cmh_test.py | """
Perform Cochran-Mantel-Haenszel chi-squared tests on stratified contingency tables.
Each stratum is a population's contingency table; each population has a case and a control.
Each contingency table is 2x2 - case and control x REF and ALT allele counts.
ALT and REF allele counts are calculated by multiplying the ploidy of the population by ...
... either the ALT freq or (1-ALT_freq), for each of case and control - unless any of ...
... the counts are np.nan, then skip population.
TODO: allow user to select specific populations (whichpops) for get_ploidy()
"""
import os, sys, argparse, shutil, subprocess, pandas as pd, threading, ipyparallel, time
import pickle
from os import path as op
def check_pyversion() -> None:
"""Make sure python is 3.6 <= version < 3.8."""
pyversion = float(str(sys.version_info[0]) + '.' + str(sys.version_info[1]))
if not pyversion >= 3.6:
text = f'''FAIL: You are using python {pyversion}. This pipeline was built with python 3.7.
FAIL: use 3.6 <= python version < 3.8
FAIL: exiting cmh_test.py'''
print(ColorText(text).fail())
exit()
if not pyversion < 3.8:
print(ColorText("FAIL: python 3.8 has issues with the ipyparallel engine returns.").fail())
print(ColorText("FAIL: use 3.6 <= python version < 3.8").fail())
print(ColorText("FAIL: exiting cmh_test.py").fail())
exit()
def pklload(path:str):
"""Load object from a .pkl file."""
pkl = pickle.load(open(path, 'rb'))
return pkl
def get_client(profile='default') -> tuple:
"""Get lview,dview from ipcluster."""
rc = ipyparallel.Client(profile=profile)
dview = rc[:]
lview = rc.load_balanced_view()
return lview, dview
def attach_data(**kwargs) -> None:
"""Load object to engines."""
import time
num_engines = len(kwargs['dview'])
print(ColorText("\nAdding data to engines ...").bold())
print(ColorText("\tWARN: Watch available mem in another terminal window: 'watch free -h'").warn())
print(ColorText("\tWARN: If available mem gets too low, kill engines and restart cmh_test.py with fewer engines: 'ipcluster stop'").warn())
for key,value in kwargs.items():
if key != 'dview':
print(f'\tLoading {key} ({value.__class__.__name__}) to {num_engines} engines')
kwargs['dview'][key] = value
time.sleep(1)
time.sleep(10)
return None
def watch_async(jobs:list, phase=None) -> None:
"""Wait until jobs are done executing, show progress bar."""
from tqdm import trange
print(ColorText(f"\nWatching {len(jobs)} {phase} jobs ...").bold())
job_idx = list(range(len(jobs)))
for i in trange(len(jobs)):
count = 0
while count < (i+1):
count = len(jobs) - len(job_idx)
for j in job_idx:
if jobs[j].ready():
count += 1
job_idx.remove(j)
pass
class ColorText():
"""
Use ANSI escape sequences to print colors +/- bold/underline to bash terminal.
"""
def __init__(self, text:str):
self.text = text
self.ending = '\033[0m'
self.colors = []
def __str__(self):
return self.text
def bold(self):
self.text = '\033[1m' + self.text + self.ending
return self
def underline(self):
self.text = '\033[4m' + self.text + self.ending
return self
def green(self):
self.text = '\033[92m' + self.text + self.ending
self.colors.append('green')
return self
def purple(self):
self.text = '\033[95m' + self.text + self.ending
self.colors.append('purple')
return self
def blue(self):
self.text = '\033[94m' + self.text + self.ending
self.colors.append('blue')
return self
def warn(self):
self.text = '\033[93m' + self.text + self.ending
self.colors.append('yellow')
return self
def fail(self):
self.text = '\033[91m' + self.text + self.ending
self.colors.append('red')
return self
pass
def askforinput(msg='Do you want to proceed?', tab='', newline='\n'):
"""Ask for input; if msg is default and input is no, exit."""
while True:
inp = input(ColorText(f"{newline}{tab}INPUT NEEDED: {msg} \n{tab}(yes | no): ").warn().__str__()).lower()
if inp in ['yes', 'no']:
if inp == 'no' and msg=='Do you want to proceed?':
print(ColorText('exiting %s' % sys.argv[0]).fail())
exit()
break
else:
print(ColorText("Please respond with 'yes' or 'no'").fail())
return inp
def wait_for_engines(engines:int, profile:str):
"""Reload engines until number matches input engines arg."""
lview = []
dview = []
count = 1
while any([len(lview) != engines, len(dview) != engines]):
if count % 30 == 0:
# if waiting too long..
# TODO: if found engines = 0, no reason to ask, if they continue it will fail
print('count = ', count)
print(ColorText("\tFAIL: Waited too long for engines.").fail())
print(ColorText("\tFAIL: Make sure that if any cluster is running, the -e arg matches the number of engines.").fail())
print(ColorText("\tFAIL: In some cases, not all expected engines can start on a busy server.").fail())
print(ColorText("\tFAIL: Therefore, it may be the case that available engines will be less than requested.").fail())
print(ColorText("\tFAIL: cmh_test.py found %s engines, with -e set to %s" % (len(lview), engines)).fail())
answer = askforinput(msg='Would you like to continue with %s engines? (choosing no will wait another 60 seconds)' % len(lview), tab='\t', newline='')
if answer == 'yes':
break
try:
lview,dview = get_client(profile=profile)
except (OSError, ipyparallel.error.NoEnginesRegistered, ipyparallel.error.TimeoutError):
lview = []
dview = []
time.sleep(2)
count += 1
print('\tReturning lview,dview (%s engines) ...' % len(lview))
return lview,dview
def launch_engines(engines:int, profile:str):
"""Launch ipcluster with engines under profile."""
print(ColorText(f"\nLaunching ipcluster with {engines} engines...").bold())
def _launch(engines, profile):
subprocess.call([shutil.which('ipcluster'), 'start', '-n', str(engines), '--daemonize'])
# first see if a cluster has already been started
started = False
try:
print("\tLooking for existing engines ...")
lview,dview = get_client(profile=profile)
if len(lview) != engines:
lview,dview = wait_for_engines(engines, profile)
started = True
except (OSError, ipyparallel.error.NoEnginesRegistered, ipyparallel.error.TimeoutError):
print("\tNo engines found ...")
# if not, launch 'em
if started is False:
print("\tLaunching engines ...")
# pid = subprocess.Popen([shutil.which('ipcluster'), 'start', '-n', str(engines)]).pid
x = threading.Thread(target=_launch, args=(engines,profile,), daemon=True)
x.daemon=True
x.start()
lview,dview = wait_for_engines(engines, profile)
return lview,dview
def get_freq(string:str) -> float:
"""Convert VarScan FREQ to floating decimal [0,1]."""
import numpy
try:
freq = float(string.replace("%", "")) / 100
except AttributeError as e:
# if string is np.nan
freq = numpy.nan
return freq
def get_table(casedata, controldata, locus):
"""Create stratified contingency tables (each 2x2) for a given locus.
Each stratum is a population.
Contingency table has treatment (case or control) as rows, and
allele (REF or ALT) as columns.
Example table
-------------
# in python
[1] mat = np.asarray([[0, 6, 0, 5],
[3, 3, 0, 6],
[6, 0, 2, 4],
[5, 1, 6, 0],
[2, 0, 5, 0]])
[2] [np.reshape(x.tolist(), (2, 2)) for x in mat]
[out]
[array([[0, 6],
[0, 5]]),
array([[3, 3],
[0, 6]]),
array([[6, 0],
[2, 4]]),
array([[5, 1],
[6, 0]]),
array([[2, 0],
[5, 0]])]
# from R - see https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/mantelhaen.test
c(0, 0, 6, 5,
...)
Response
Delay Cured Died
None 0 6
1.5h 0 5
...
"""
import numpy, pandas
tables = [] # - a list of lists
for casecol,controlcol in pairs.items():
# get ploidy of pop
pop = casecol.split('.FREQ')[0]
pop_ploidy = ploidy[pop]
# get case-control frequencies of ALT allele
case_freq = get_freq(casedata.loc[locus, casecol])
cntrl_freq = get_freq(controldata.loc[locus, controlcol])
# see if either freq is np.nan, if so, skip this pop
if sum([x!=x for x in [case_freq, cntrl_freq]]) > 0:
continue
# collate info for locus (create contingency table data)
t = []
for freq in [cntrl_freq, case_freq]:
t.extend([(1-freq)*pop_ploidy,
freq*pop_ploidy])
tables.append(t)
# return contingency tables (elements of list) for this locus stratified by population (list index)
return [numpy.reshape(x.tolist(), (2, 2)) for x in numpy.asarray(tables)]
def create_tables(*args):
"""Get stratified contingency tables for all loci in cmh_test.py input file."""
import pandas
tables = {}
for locus in args[0].index:
tables[locus] = get_table(*args, locus)
return tables
def cmh_test(*args):
"""Perform Cochran-Mantel-Haenszel chi-squared test on stratified contingency tables."""
import pandas, math
from statsmodels.stats.contingency_tables import StratifiedTable as cmh
# set up data logging
ignored = {}
# get contingency tables for pops with case and control data
tables = create_tables(*args)
# fill in a dataframe with cmh test results, one locus at a time
results = pandas.DataFrame(columns=['locus', 'odds_ratio', 'p-value',
'lower_confidence', 'upper_confidence', 'num_pops'])
for locus,table in tables.items():
if len(table) == 0:
# if none of the populations for a locus provide a contingency table (due to missing data)
# ... then continue to the next locus.
ignored[locus] = 'there were no populations that provided contingency tables'
continue
# cmh results for stratified contingency tables (called "table" = an array of tables)
cmh_res = cmh(table)
res = cmh_res.test_null_odds(True) # statistic and p-value
odds_ratio = cmh_res.oddsratio_pooled # odds ratio
conf = cmh_res.oddsratio_pooled_confint() # lower and upper confidence
locus_results = locus, odds_ratio, res.pvalue, *conf, len(table)
# look for fixed states across all tables
if sum([math.isnan(x) for x in conf]) > 0:
# if the upper and lower estimat of the confidence interval are NA, ignore
# this can happen when all of the tables returned for a specific locus are fixed
# ... for either the REF or ALT. This happens rarely for loci with low MAF, where
# ... the populations that have variable case or control, do not have a frequency
# ... estimated for the other treatment (case or control) and therefore don't
# ... make it into the list of stratified tables and the remaining tables
# ... (populations) are all fixed for the REF or ALT - again, this happens for
# ... some low MAF loci and may happen if input file has few pops to stratify.
# log reason
ignored[locus] = 'the upper and lower confidence interval for the odds ratio was NA'
ignored[locus] = ignored[locus] + '\t' + '\t'.join(map(str, locus_results[1:]))
continue
results.loc[len(results.index), :] = locus_results
return results, ignored
def parallelize_cmh(casedata, controldata, lview):
"""Parallelize Cochran-Mantel-Haenszel chi-squared tests by groups of loci."""
print(ColorText('\nParallelizing CMH calls ...').bold())
import math, tqdm, pandas
jobsize = math.ceil(len(casedata.index)/len(lview))
# send jobs to engines
numjobs = (len(casedata.index)/jobsize)+1
print(ColorText("\nSending %d jobs to engines ..." % numjobs ).bold())
jobs = []
loci_to_send = []
count = 0
for locus in tqdm.tqdm(casedata.index):
count += 1
loci_to_send.append(locus)
if len(loci_to_send) == jobsize or count == len(casedata.index):
jobs.append(lview.apply_async(cmh_test, *(casedata.loc[loci_to_send, :],
controldata.loc[loci_to_send, :])))
# jobs.append(cmh_test(casedata.loc[loci_to_send, :],
# controldata.loc[loci_to_send, :])) # for testing
loci_to_send = []
# wait until jobs finish
watch_async(jobs, phase='CMH test')
# gather output, concatenate into one datafram
print(ColorText('\nGathering parallelized results ...').bold())
logs = dict((locus,reason) for j in jobs for (locus,reason) in j.r[1].items())
output = pandas.concat([j.r[0] for j in jobs])
# output = pandas.concat([j for j in jobs]) # for testing
return output, logs
def get_cc_pairs(casecols, controlcols, case, control):
"""For a given population, pair its case column with its control column."""
badcols = []
# global pairs # for debugging
pairs = {}
for casecol in casecols:
|
if len(badcols) > 0:
print(ColorText('FAIL: The following case populations to not have a valid control column in dataframe.').fail())
for cs,ct in badcols:
print(ColorText(f'FAIL: no match for {cs} named {ct} in dataframe').fail())
print(ColorText('FAIL: These case columns have not been paired and will be excluded from analyses.').fail())
askforinput()
return pairs
def get_data(df, case, control):
"""Separate input dataframe into case-only and control-only dataframes."""
# get columns for case and control
casecols = [col for col in df if case in col and 'FREQ' in col]
cntrlcols = [col for col in df if control in col and 'FREQ' in col]
# isolate data to separate dfs
casedata = df[casecols]
controldata = df[cntrlcols]
assert casedata.shape == controldata.shape
# pair up case-control pops
pairs = get_cc_pairs(casecols, cntrlcols, case, control)
return casedata, controldata, pairs
def get_parse():
"""
Parse input flags.
# TODO check arg descriptions, and if they're actually used.
"""
parser = argparse.ArgumentParser(description=print(mytext),
add_help=True,
formatter_class=argparse.RawTextHelpFormatter)
requiredNAMED = parser.add_argument_group('required arguments')
requiredNAMED.add_argument("-i", "--input",
required=True,
default=None,
dest="input",
type=str,
help='''/path/to/VariantsToTable_output.txt
It is assumed that there is either a 'locus' or 'unstitched_locus' column.
The 'locus' column elements are the hyphen-separated
CHROM-POS. If the 'unstitched_chrom' column is present, the code will use the
'unstitched_locus' column for SNP names, otherwise 'CHROM' and 'locus'. The
'unstitched_locus' elements are therefore the hyphen-separated
unstitched_locus-unstitched_pos. FREQ columns from VarScan are also
assumed.
''')
requiredNAMED.add_argument("-o","--outdir",
required=True,
default=None,
dest="outdir",
type=str,
help='''/path/to/cmh_test_output_dir/
File output from cmh_test.py will be saved in the outdir, with the original
name of the input file, but with the suffix "_CMH-test-results.txt"''')
requiredNAMED.add_argument("--case",
required=True,
default=None,
dest="case",
type=str,
help='''The string present in every column for pools in "case" treatments.''')
requiredNAMED.add_argument("--control",
required=True,
default=None,
dest="control",
type=str,
help='''The string present in every column for pools in "control" treatments.''')
requiredNAMED.add_argument("-p","--ploidy",
required=True,
default=None,
dest="ploidyfile",
type=str,
help='''/path/to/the/ploidy.pkl file output by the VarScan pipeline. This is a python
dictionary with key=pool_name, value=dict with key=pop, value=ploidy. The code
will prompt for pool_name if necessary.''')
requiredNAMED.add_argument("-e","--engines",
required=True,
default=None,
dest="engines",
type=int,
help="The number of ipcluster engines that will be launched.")
parser.add_argument("--ipcluster-profile",
required=False,
default='default',
dest="profile",
type=str,
help="The ipcluster profile name with which to start engines. Default: 'default'")
parser.add_argument('--keep-engines',
required=False,
action='store_true',
dest="keep_engines",
help='''Boolean: true if used, false otherwise. If you want to keep
the ipcluster engines alive, use this flag. Otherwise engines will be killed automatically.
(default: False)''')
# check flags
args = parser.parse_args()
if not op.exists(args.outdir):
print(ColorText(f"FAIL: the directory for the output file(s) does not exist.").fail())
print(ColorText(f"FAIL: please create this directory: %s" % args.outdir).fail())
print(ColorText("exiting cmh_test.py").fail())
exit()
# make sure input and ploidyfile exist
nopath = []
for x in [args.input, args.ploidyfile]: # TODO: check for $HOME or other bash vars in path
if not op.exists(x):
nopath.append(x)
# if input or ploidy file do not exist:
if len(nopath) > 0:
print(ColorText("FAIL: The following path(s) do not exist:").fail())
for f in nopath:
print(ColorText("\tFAIL: %s" % f).fail())
print(ColorText('\nexiting cmh_test.py').fail())
exit()
print('args = ', args)
return args
def choose_pool(ploidy:dict) -> dict:
"""Choose which the pool to use as a key to the ploidy dict."""
keys = list(ploidy.keys())
if len(keys) == 1:
# return the value of the dict using the only key
return ploidy[keys[0]]
print(ColorText('\nPlease choose a pool that contains the population of interest.').bold())
nums = []
for i,pool in enumerate(keys):
print('\t%s %s' % (i, pool))
nums.append(i)
while True:
inp = int(input(ColorText("\tINPUT NEEDED: Choose file by number: ").warn()).lower())
if inp in nums:
pool = keys[inp]
break
else:
print(ColorText("\tPlease respond with a number from above.").fail())
# make sure they've chosen at least one account
while pool is None:
print(ColorText("\tFAIL: You need to specify at least one pool. Revisiting options...").fail())
pool = choose_pool(ploidy, args, keep=None)
return ploidy[pool]
def get_ploidy(ploidyfile) -> dict:
"""Get the ploidy of the populations of interest, reduce ploidy pkl."""
print(ColorText('\nLoading ploidy information ...').bold())
# have user choose key to dict
return choose_pool(pklload(ploidyfile))
def read_input(inputfile):
"""Read in inputfile, set index to locus names."""
print(ColorText('\nReading input file ...').bold())
# read in datatable
df = pd.read_table(inputfile, sep='\t')
# set df index
locuscol = 'unstitched_locus' if 'unstitched_locus' in df.columns else 'locus'
if locuscol not in df:
print(ColorText('\nFAIL: There must be a column for locus IDs - either "unstitched_locus" or "locus"').fail())
print(ColorText('FAIL: The column is the hyphen-separated CHROM and POS.').fail())
print(ColorText('exiting cmh_test.py').fail())
exit()
df.index = df[locuscol].tolist()
return df
def main():
# make sure it's not python3.8
check_pyversion()
# parse input arguments
args = get_parse()
# read in datatable
df = read_input(args.input)
# get ploidy for each pool to use to correct read counts for pseudoreplication
# global ploidy # for debugging
ploidy = get_ploidy(args.ploidyfile)
# isolate case/control data
casedata, controldata, pairs = get_data(df, args.case, args.control)
# get ipcluster engines
lview,dview = launch_engines(args.engines, args.profile)
# attach data and functions to engines
attach_data(ploidy=ploidy,
case=args.case,
control=args.control,
pairs=pairs,
cmh_test=cmh_test,
get_freq=get_freq,
get_table=get_table,
create_tables=create_tables,
dview=dview)
# run cmh tests in parallel
output,logs = parallelize_cmh(casedata, controldata, lview)
# write to outfile
outfile = op.join(args.outdir, op.basename(args.input).split(".")[0] + '_CMH-test-results.txt')
print(ColorText(f'\nWriting all results to: ').bold().__str__()+ f'{outfile} ...')
output.to_csv(outfile,
sep='\t', index=False)
# write logs
logfile = outfile.replace(".txt", ".log")
print(ColorText(f'\nWriting logs to: ').bold().__str__()+ f'{logfile} ...')
if len(logs) > 0:
with open(logfile, 'w') as o:
o.write('locus\treason_for_exclusion\todds_ratio\tp-value\tlower_confidence\tupper_confidence\tnum_pops\n')
lines = []
for locus,reason in logs.items():
lines.append(f'{locus}\t{reason}')
o.write("%s" % '\n'.join(lines))
# kill ipcluster to avoid mem problems
if args.keep_engines is False:
print(ColorText("\nStopping ipcluster ...").bold())
subprocess.call([shutil.which('ipcluster'), 'stop'])
print(ColorText('\nDONE!!\n').green().bold())
pass
if __name__ == '__main__':
mytext = ColorText('''
*****************************************************************************
CoAdapTree's
______ __ ___ __ __ ________ _
| ____| | \\ / | | | | | |__ __| ____ _____ __| |__
| | | \\/ | | |__| | | | / __ \\ | ____| |__ __|
| | | |\\ /| | | __ | | | | /__\\_| |___ | |
| |____ | | \\/ | | | | | | | | | \____ ___| | | |
|______| |__| |__| |__| |__| |_| \\____/ |_____| |_|
Cochran-Mantel-Haenszel chi-squared test
*****************************************************************************''').green().bold().__str__()
main()
| controlcol = casecol.replace(case, control)
if not controlcol in controlcols:
badcols.append((casecol, controlcol))
continue
pairs[casecol] = controlcol | conditional_block |
cmh_test.py | """
Perform Cochran-Mantel-Haenszel chi-squared tests on stratified contingency tables.
Each stratum is a population's contingency table; each population has a case and a control.
Each contingency table is 2x2 - case and control x REF and ALT allele counts.
ALT and REF allele counts are calculated by multiplying the ploidy of the population by ...
... either the ALT freq or (1-ALT_freq), for each of case and control - unless any of ...
... the counts are np.nan, then skip population.
TODO: allow user to select specific populations (whichpops) for get_ploidy()
"""
import os, sys, argparse, shutil, subprocess, pandas as pd, threading, ipyparallel, time
import pickle
from os import path as op
def check_pyversion() -> None:
"""Make sure python is 3.6 <= version < 3.8."""
pyversion = float(str(sys.version_info[0]) + '.' + str(sys.version_info[1]))
if not pyversion >= 3.6:
text = f'''FAIL: You are using python {pyversion}. This pipeline was built with python 3.7.
FAIL: use 3.6 <= python version < 3.8
FAIL: exiting cmh_test.py'''
print(ColorText(text).fail())
exit()
if not pyversion < 3.8:
print(ColorText("FAIL: python 3.8 has issues with the ipyparallel engine returns.").fail())
print(ColorText("FAIL: use 3.6 <= python version < 3.8").fail())
print(ColorText("FAIL: exiting cmh_test.py").fail())
exit()
def pklload(path:str):
"""Load object from a .pkl file."""
pkl = pickle.load(open(path, 'rb'))
return pkl
def get_client(profile='default') -> tuple:
"""Get lview,dview from ipcluster."""
rc = ipyparallel.Client(profile=profile)
dview = rc[:]
lview = rc.load_balanced_view()
return lview, dview
def attach_data(**kwargs) -> None:
"""Load object to engines."""
import time
num_engines = len(kwargs['dview'])
print(ColorText("\nAdding data to engines ...").bold())
print(ColorText("\tWARN: Watch available mem in another terminal window: 'watch free -h'").warn())
print(ColorText("\tWARN: If available mem gets too low, kill engines and restart cmh_test.py with fewer engines: 'ipcluster stop'").warn())
for key,value in kwargs.items():
if key != 'dview':
print(f'\tLoading {key} ({value.__class__.__name__}) to {num_engines} engines')
kwargs['dview'][key] = value
time.sleep(1)
time.sleep(10)
return None
def watch_async(jobs:list, phase=None) -> None:
"""Wait until jobs are done executing, show progress bar."""
from tqdm import trange
print(ColorText(f"\nWatching {len(jobs)} {phase} jobs ...").bold())
job_idx = list(range(len(jobs)))
for i in trange(len(jobs)):
count = 0
while count < (i+1):
count = len(jobs) - len(job_idx)
for j in job_idx:
if jobs[j].ready():
count += 1
job_idx.remove(j)
pass
class ColorText():
"""
Use ANSI escape sequences to print colors +/- bold/underline to bash terminal.
"""
def __init__(self, text:str):
|
def __str__(self):
return self.text
def bold(self):
self.text = '\033[1m' + self.text + self.ending
return self
def underline(self):
self.text = '\033[4m' + self.text + self.ending
return self
def green(self):
self.text = '\033[92m' + self.text + self.ending
self.colors.append('green')
return self
def purple(self):
self.text = '\033[95m' + self.text + self.ending
self.colors.append('purple')
return self
def blue(self):
self.text = '\033[94m' + self.text + self.ending
self.colors.append('blue')
return self
def warn(self):
self.text = '\033[93m' + self.text + self.ending
self.colors.append('yellow')
return self
def fail(self):
self.text = '\033[91m' + self.text + self.ending
self.colors.append('red')
return self
pass
def askforinput(msg='Do you want to proceed?', tab='', newline='\n'):
"""Ask for input; if msg is default and input is no, exit."""
while True:
inp = input(ColorText(f"{newline}{tab}INPUT NEEDED: {msg} \n{tab}(yes | no): ").warn().__str__()).lower()
if inp in ['yes', 'no']:
if inp == 'no' and msg=='Do you want to proceed?':
print(ColorText('exiting %s' % sys.argv[0]).fail())
exit()
break
else:
print(ColorText("Please respond with 'yes' or 'no'").fail())
return inp
def wait_for_engines(engines:int, profile:str):
"""Reload engines until number matches input engines arg."""
lview = []
dview = []
count = 1
while any([len(lview) != engines, len(dview) != engines]):
if count % 30 == 0:
# if waiting too long..
# TODO: if found engines = 0, no reason to ask, if they continue it will fail
print('count = ', count)
print(ColorText("\tFAIL: Waited too long for engines.").fail())
print(ColorText("\tFAIL: Make sure that if any cluster is running, the -e arg matches the number of engines.").fail())
print(ColorText("\tFAIL: In some cases, not all expected engines can start on a busy server.").fail())
print(ColorText("\tFAIL: Therefore, it may be the case that available engines will be less than requested.").fail())
print(ColorText("\tFAIL: cmh_test.py found %s engines, with -e set to %s" % (len(lview), engines)).fail())
answer = askforinput(msg='Would you like to continue with %s engines? (choosing no will wait another 60 seconds)' % len(lview), tab='\t', newline='')
if answer == 'yes':
break
try:
lview,dview = get_client(profile=profile)
except (OSError, ipyparallel.error.NoEnginesRegistered, ipyparallel.error.TimeoutError):
lview = []
dview = []
time.sleep(2)
count += 1
print('\tReturning lview,dview (%s engines) ...' % len(lview))
return lview,dview
def launch_engines(engines:int, profile:str):
"""Launch ipcluster with engines under profile."""
print(ColorText(f"\nLaunching ipcluster with {engines} engines...").bold())
def _launch(engines, profile):
subprocess.call([shutil.which('ipcluster'), 'start', '-n', str(engines), '--daemonize'])
# first see if a cluster has already been started
started = False
try:
print("\tLooking for existing engines ...")
lview,dview = get_client(profile=profile)
if len(lview) != engines:
lview,dview = wait_for_engines(engines, profile)
started = True
except (OSError, ipyparallel.error.NoEnginesRegistered, ipyparallel.error.TimeoutError):
print("\tNo engines found ...")
# if not, launch 'em
if started is False:
print("\tLaunching engines ...")
# pid = subprocess.Popen([shutil.which('ipcluster'), 'start', '-n', str(engines)]).pid
x = threading.Thread(target=_launch, args=(engines,profile,), daemon=True)
x.daemon=True
x.start()
lview,dview = wait_for_engines(engines, profile)
return lview,dview
def get_freq(string:str) -> float:
"""Convert VarScan FREQ to floating decimal [0,1]."""
import numpy
try:
freq = float(string.replace("%", "")) / 100
except AttributeError as e:
# if string is np.nan
freq = numpy.nan
return freq
def get_table(casedata, controldata, locus):
"""Create stratified contingency tables (each 2x2) for a given locus.
Each stratum is a population.
Contingency table has treatment (case or control) as rows, and
allele (REF or ALT) as columns.
Example table
-------------
# in python
[1] mat = np.asarray([[0, 6, 0, 5],
[3, 3, 0, 6],
[6, 0, 2, 4],
[5, 1, 6, 0],
[2, 0, 5, 0]])
[2] [np.reshape(x.tolist(), (2, 2)) for x in mat]
[out]
[array([[0, 6],
[0, 5]]),
array([[3, 3],
[0, 6]]),
array([[6, 0],
[2, 4]]),
array([[5, 1],
[6, 0]]),
array([[2, 0],
[5, 0]])]
# from R - see https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/mantelhaen.test
c(0, 0, 6, 5,
...)
Response
Delay Cured Died
None 0 6
1.5h 0 5
...
"""
import numpy, pandas
tables = [] # - a list of lists
for casecol,controlcol in pairs.items():
# get ploidy of pop
pop = casecol.split('.FREQ')[0]
pop_ploidy = ploidy[pop]
# get case-control frequencies of ALT allele
case_freq = get_freq(casedata.loc[locus, casecol])
cntrl_freq = get_freq(controldata.loc[locus, controlcol])
# see if either freq is np.nan, if so, skip this pop
if sum([x!=x for x in [case_freq, cntrl_freq]]) > 0:
continue
# collate info for locus (create contingency table data)
t = []
for freq in [cntrl_freq, case_freq]:
t.extend([(1-freq)*pop_ploidy,
freq*pop_ploidy])
tables.append(t)
# return contingency tables (elements of list) for this locus stratified by population (list index)
return [numpy.reshape(x.tolist(), (2, 2)) for x in numpy.asarray(tables)]
def create_tables(*args):
"""Get stratified contingency tables for all loci in cmh_test.py input file."""
import pandas
tables = {}
for locus in args[0].index:
tables[locus] = get_table(*args, locus)
return tables
def cmh_test(*args):
"""Perform Cochran-Mantel-Haenszel chi-squared test on stratified contingency tables."""
import pandas, math
from statsmodels.stats.contingency_tables import StratifiedTable as cmh
# set up data logging
ignored = {}
# get contingency tables for pops with case and control data
tables = create_tables(*args)
# fill in a dataframe with cmh test results, one locus at a time
results = pandas.DataFrame(columns=['locus', 'odds_ratio', 'p-value',
'lower_confidence', 'upper_confidence', 'num_pops'])
for locus,table in tables.items():
if len(table) == 0:
# if none of the populations for a locus provide a contingency table (due to missing data)
# ... then continue to the next locus.
ignored[locus] = 'there were no populations that provided contingency tables'
continue
# cmh results for stratified contingency tables (called "table" = an array of tables)
cmh_res = cmh(table)
res = cmh_res.test_null_odds(True) # statistic and p-value
odds_ratio = cmh_res.oddsratio_pooled # odds ratio
conf = cmh_res.oddsratio_pooled_confint() # lower and upper confidence
locus_results = locus, odds_ratio, res.pvalue, *conf, len(table)
# look for fixed states across all tables
if sum([math.isnan(x) for x in conf]) > 0:
# if the upper and lower estimat of the confidence interval are NA, ignore
# this can happen when all of the tables returned for a specific locus are fixed
# ... for either the REF or ALT. This happens rarely for loci with low MAF, where
# ... the populations that have variable case or control, do not have a frequency
# ... estimated for the other treatment (case or control) and therefore don't
# ... make it into the list of stratified tables and the remaining tables
# ... (populations) are all fixed for the REF or ALT - again, this happens for
# ... some low MAF loci and may happen if input file has few pops to stratify.
# log reason
ignored[locus] = 'the upper and lower confidence interval for the odds ratio was NA'
ignored[locus] = ignored[locus] + '\t' + '\t'.join(map(str, locus_results[1:]))
continue
results.loc[len(results.index), :] = locus_results
return results, ignored
def parallelize_cmh(casedata, controldata, lview):
"""Parallelize Cochran-Mantel-Haenszel chi-squared tests by groups of loci."""
print(ColorText('\nParallelizing CMH calls ...').bold())
import math, tqdm, pandas
jobsize = math.ceil(len(casedata.index)/len(lview))
# send jobs to engines
numjobs = (len(casedata.index)/jobsize)+1
print(ColorText("\nSending %d jobs to engines ..." % numjobs ).bold())
jobs = []
loci_to_send = []
count = 0
for locus in tqdm.tqdm(casedata.index):
count += 1
loci_to_send.append(locus)
if len(loci_to_send) == jobsize or count == len(casedata.index):
jobs.append(lview.apply_async(cmh_test, *(casedata.loc[loci_to_send, :],
controldata.loc[loci_to_send, :])))
# jobs.append(cmh_test(casedata.loc[loci_to_send, :],
# controldata.loc[loci_to_send, :])) # for testing
loci_to_send = []
# wait until jobs finish
watch_async(jobs, phase='CMH test')
# gather output, concatenate into one datafram
print(ColorText('\nGathering parallelized results ...').bold())
logs = dict((locus,reason) for j in jobs for (locus,reason) in j.r[1].items())
output = pandas.concat([j.r[0] for j in jobs])
# output = pandas.concat([j for j in jobs]) # for testing
return output, logs
def get_cc_pairs(casecols, controlcols, case, control):
"""For a given population, pair its case column with its control column."""
badcols = []
# global pairs # for debugging
pairs = {}
for casecol in casecols:
controlcol = casecol.replace(case, control)
if not controlcol in controlcols:
badcols.append((casecol, controlcol))
continue
pairs[casecol] = controlcol
if len(badcols) > 0:
print(ColorText('FAIL: The following case populations to not have a valid control column in dataframe.').fail())
for cs,ct in badcols:
print(ColorText(f'FAIL: no match for {cs} named {ct} in dataframe').fail())
print(ColorText('FAIL: These case columns have not been paired and will be excluded from analyses.').fail())
askforinput()
return pairs
def get_data(df, case, control):
"""Separate input dataframe into case-only and control-only dataframes."""
# get columns for case and control
casecols = [col for col in df if case in col and 'FREQ' in col]
cntrlcols = [col for col in df if control in col and 'FREQ' in col]
# isolate data to separate dfs
casedata = df[casecols]
controldata = df[cntrlcols]
assert casedata.shape == controldata.shape
# pair up case-control pops
pairs = get_cc_pairs(casecols, cntrlcols, case, control)
return casedata, controldata, pairs
def get_parse():
"""
Parse input flags.
# TODO check arg descriptions, and if they're actually used.
"""
parser = argparse.ArgumentParser(description=print(mytext),
add_help=True,
formatter_class=argparse.RawTextHelpFormatter)
requiredNAMED = parser.add_argument_group('required arguments')
requiredNAMED.add_argument("-i", "--input",
required=True,
default=None,
dest="input",
type=str,
help='''/path/to/VariantsToTable_output.txt
It is assumed that there is either a 'locus' or 'unstitched_locus' column.
The 'locus' column elements are the hyphen-separated
CHROM-POS. If the 'unstitched_chrom' column is present, the code will use the
'unstitched_locus' column for SNP names, otherwise 'CHROM' and 'locus'. The
'unstitched_locus' elements are therefore the hyphen-separated
unstitched_locus-unstitched_pos. FREQ columns from VarScan are also
assumed.
''')
requiredNAMED.add_argument("-o","--outdir",
required=True,
default=None,
dest="outdir",
type=str,
help='''/path/to/cmh_test_output_dir/
File output from cmh_test.py will be saved in the outdir, with the original
name of the input file, but with the suffix "_CMH-test-results.txt"''')
requiredNAMED.add_argument("--case",
required=True,
default=None,
dest="case",
type=str,
help='''The string present in every column for pools in "case" treatments.''')
requiredNAMED.add_argument("--control",
required=True,
default=None,
dest="control",
type=str,
help='''The string present in every column for pools in "control" treatments.''')
requiredNAMED.add_argument("-p","--ploidy",
required=True,
default=None,
dest="ploidyfile",
type=str,
help='''/path/to/the/ploidy.pkl file output by the VarScan pipeline. This is a python
dictionary with key=pool_name, value=dict with key=pop, value=ploidy. The code
will prompt for pool_name if necessary.''')
requiredNAMED.add_argument("-e","--engines",
required=True,
default=None,
dest="engines",
type=int,
help="The number of ipcluster engines that will be launched.")
parser.add_argument("--ipcluster-profile",
required=False,
default='default',
dest="profile",
type=str,
help="The ipcluster profile name with which to start engines. Default: 'default'")
parser.add_argument('--keep-engines',
required=False,
action='store_true',
dest="keep_engines",
help='''Boolean: true if used, false otherwise. If you want to keep
the ipcluster engines alive, use this flag. Otherwise engines will be killed automatically.
(default: False)''')
# check flags
args = parser.parse_args()
if not op.exists(args.outdir):
print(ColorText(f"FAIL: the directory for the output file(s) does not exist.").fail())
print(ColorText(f"FAIL: please create this directory: %s" % args.outdir).fail())
print(ColorText("exiting cmh_test.py").fail())
exit()
# make sure input and ploidyfile exist
nopath = []
for x in [args.input, args.ploidyfile]: # TODO: check for $HOME or other bash vars in path
if not op.exists(x):
nopath.append(x)
# if input or ploidy file do not exist:
if len(nopath) > 0:
print(ColorText("FAIL: The following path(s) do not exist:").fail())
for f in nopath:
print(ColorText("\tFAIL: %s" % f).fail())
print(ColorText('\nexiting cmh_test.py').fail())
exit()
print('args = ', args)
return args
def choose_pool(ploidy:dict) -> dict:
"""Choose which the pool to use as a key to the ploidy dict."""
keys = list(ploidy.keys())
if len(keys) == 1:
# return the value of the dict using the only key
return ploidy[keys[0]]
print(ColorText('\nPlease choose a pool that contains the population of interest.').bold())
nums = []
for i,pool in enumerate(keys):
print('\t%s %s' % (i, pool))
nums.append(i)
while True:
inp = int(input(ColorText("\tINPUT NEEDED: Choose file by number: ").warn()).lower())
if inp in nums:
pool = keys[inp]
break
else:
print(ColorText("\tPlease respond with a number from above.").fail())
# make sure they've chosen at least one account
while pool is None:
print(ColorText("\tFAIL: You need to specify at least one pool. Revisiting options...").fail())
pool = choose_pool(ploidy, args, keep=None)
return ploidy[pool]
def get_ploidy(ploidyfile) -> dict:
"""Get the ploidy of the populations of interest, reduce ploidy pkl."""
print(ColorText('\nLoading ploidy information ...').bold())
# have user choose key to dict
return choose_pool(pklload(ploidyfile))
def read_input(inputfile):
"""Read in inputfile, set index to locus names."""
print(ColorText('\nReading input file ...').bold())
# read in datatable
df = pd.read_table(inputfile, sep='\t')
# set df index
locuscol = 'unstitched_locus' if 'unstitched_locus' in df.columns else 'locus'
if locuscol not in df:
print(ColorText('\nFAIL: There must be a column for locus IDs - either "unstitched_locus" or "locus"').fail())
print(ColorText('FAIL: The column is the hyphen-separated CHROM and POS.').fail())
print(ColorText('exiting cmh_test.py').fail())
exit()
df.index = df[locuscol].tolist()
return df
def main():
# make sure it's not python3.8
check_pyversion()
# parse input arguments
args = get_parse()
# read in datatable
df = read_input(args.input)
# get ploidy for each pool to use to correct read counts for pseudoreplication
# global ploidy # for debugging
ploidy = get_ploidy(args.ploidyfile)
# isolate case/control data
casedata, controldata, pairs = get_data(df, args.case, args.control)
# get ipcluster engines
lview,dview = launch_engines(args.engines, args.profile)
# attach data and functions to engines
attach_data(ploidy=ploidy,
case=args.case,
control=args.control,
pairs=pairs,
cmh_test=cmh_test,
get_freq=get_freq,
get_table=get_table,
create_tables=create_tables,
dview=dview)
# run cmh tests in parallel
output,logs = parallelize_cmh(casedata, controldata, lview)
# write to outfile
outfile = op.join(args.outdir, op.basename(args.input).split(".")[0] + '_CMH-test-results.txt')
print(ColorText(f'\nWriting all results to: ').bold().__str__()+ f'{outfile} ...')
output.to_csv(outfile,
sep='\t', index=False)
# write logs
logfile = outfile.replace(".txt", ".log")
print(ColorText(f'\nWriting logs to: ').bold().__str__()+ f'{logfile} ...')
if len(logs) > 0:
with open(logfile, 'w') as o:
o.write('locus\treason_for_exclusion\todds_ratio\tp-value\tlower_confidence\tupper_confidence\tnum_pops\n')
lines = []
for locus,reason in logs.items():
lines.append(f'{locus}\t{reason}')
o.write("%s" % '\n'.join(lines))
# kill ipcluster to avoid mem problems
if args.keep_engines is False:
print(ColorText("\nStopping ipcluster ...").bold())
subprocess.call([shutil.which('ipcluster'), 'stop'])
print(ColorText('\nDONE!!\n').green().bold())
pass
if __name__ == '__main__':
mytext = ColorText('''
*****************************************************************************
CoAdapTree's
______ __ ___ __ __ ________ _
| ____| | \\ / | | | | | |__ __| ____ _____ __| |__
| | | \\/ | | |__| | | | / __ \\ | ____| |__ __|
| | | |\\ /| | | __ | | | | /__\\_| |___ | |
| |____ | | \\/ | | | | | | | | | \____ ___| | | |
|______| |__| |__| |__| |__| |_| \\____/ |_____| |_|
Cochran-Mantel-Haenszel chi-squared test
*****************************************************************************''').green().bold().__str__()
main()
| self.text = text
self.ending = '\033[0m'
self.colors = [] | identifier_body |
cmh_test.py | """
Perform Cochran-Mantel-Haenszel chi-squared tests on stratified contingency tables.
Each stratum is a population's contingency table; each population has a case and a control.
Each contingency table is 2x2 - case and control x REF and ALT allele counts.
ALT and REF allele counts are calculated by multiplying the ploidy of the population by ...
... either the ALT freq or (1-ALT_freq), for each of case and control - unless any of ...
... the counts are np.nan, then skip population.
TODO: allow user to select specific populations (whichpops) for get_ploidy()
"""
import os, sys, argparse, shutil, subprocess, pandas as pd, threading, ipyparallel, time
import pickle
from os import path as op
def check_pyversion() -> None:
"""Make sure python is 3.6 <= version < 3.8."""
pyversion = float(str(sys.version_info[0]) + '.' + str(sys.version_info[1]))
if not pyversion >= 3.6:
text = f'''FAIL: You are using python {pyversion}. This pipeline was built with python 3.7.
FAIL: use 3.6 <= python version < 3.8
FAIL: exiting cmh_test.py'''
print(ColorText(text).fail())
exit()
if not pyversion < 3.8:
print(ColorText("FAIL: python 3.8 has issues with the ipyparallel engine returns.").fail())
print(ColorText("FAIL: use 3.6 <= python version < 3.8").fail())
print(ColorText("FAIL: exiting cmh_test.py").fail())
exit()
def pklload(path:str):
"""Load object from a .pkl file."""
pkl = pickle.load(open(path, 'rb'))
return pkl
def get_client(profile='default') -> tuple:
"""Get lview,dview from ipcluster."""
rc = ipyparallel.Client(profile=profile)
dview = rc[:]
lview = rc.load_balanced_view()
return lview, dview
def attach_data(**kwargs) -> None:
"""Load object to engines."""
import time
num_engines = len(kwargs['dview'])
print(ColorText("\nAdding data to engines ...").bold())
print(ColorText("\tWARN: Watch available mem in another terminal window: 'watch free -h'").warn())
print(ColorText("\tWARN: If available mem gets too low, kill engines and restart cmh_test.py with fewer engines: 'ipcluster stop'").warn())
for key,value in kwargs.items():
if key != 'dview':
print(f'\tLoading {key} ({value.__class__.__name__}) to {num_engines} engines')
kwargs['dview'][key] = value
time.sleep(1)
time.sleep(10)
return None
def watch_async(jobs:list, phase=None) -> None:
"""Wait until jobs are done executing, show progress bar."""
from tqdm import trange
print(ColorText(f"\nWatching {len(jobs)} {phase} jobs ...").bold())
job_idx = list(range(len(jobs)))
for i in trange(len(jobs)):
count = 0
while count < (i+1):
count = len(jobs) - len(job_idx)
for j in job_idx:
if jobs[j].ready():
count += 1
job_idx.remove(j)
pass
class ColorText():
"""
Use ANSI escape sequences to print colors +/- bold/underline to bash terminal.
"""
def __init__(self, text:str):
self.text = text
self.ending = '\033[0m'
self.colors = []
def __str__(self):
return self.text
def bold(self):
self.text = '\033[1m' + self.text + self.ending
return self
def underline(self):
self.text = '\033[4m' + self.text + self.ending
return self
def green(self):
self.text = '\033[92m' + self.text + self.ending
self.colors.append('green')
return self
def purple(self):
self.text = '\033[95m' + self.text + self.ending
self.colors.append('purple')
return self
def blue(self):
self.text = '\033[94m' + self.text + self.ending
self.colors.append('blue')
return self
def warn(self):
self.text = '\033[93m' + self.text + self.ending
self.colors.append('yellow')
return self
def fail(self):
self.text = '\033[91m' + self.text + self.ending
self.colors.append('red')
return self
pass
def askforinput(msg='Do you want to proceed?', tab='', newline='\n'):
"""Ask for input; if msg is default and input is no, exit."""
while True:
inp = input(ColorText(f"{newline}{tab}INPUT NEEDED: {msg} \n{tab}(yes | no): ").warn().__str__()).lower()
if inp in ['yes', 'no']:
if inp == 'no' and msg=='Do you want to proceed?':
print(ColorText('exiting %s' % sys.argv[0]).fail())
exit()
break
else:
print(ColorText("Please respond with 'yes' or 'no'").fail())
return inp
def | (engines:int, profile:str):
"""Reload engines until number matches input engines arg."""
lview = []
dview = []
count = 1
while any([len(lview) != engines, len(dview) != engines]):
if count % 30 == 0:
# if waiting too long..
# TODO: if found engines = 0, no reason to ask, if they continue it will fail
print('count = ', count)
print(ColorText("\tFAIL: Waited too long for engines.").fail())
print(ColorText("\tFAIL: Make sure that if any cluster is running, the -e arg matches the number of engines.").fail())
print(ColorText("\tFAIL: In some cases, not all expected engines can start on a busy server.").fail())
print(ColorText("\tFAIL: Therefore, it may be the case that available engines will be less than requested.").fail())
print(ColorText("\tFAIL: cmh_test.py found %s engines, with -e set to %s" % (len(lview), engines)).fail())
answer = askforinput(msg='Would you like to continue with %s engines? (choosing no will wait another 60 seconds)' % len(lview), tab='\t', newline='')
if answer == 'yes':
break
try:
lview,dview = get_client(profile=profile)
except (OSError, ipyparallel.error.NoEnginesRegistered, ipyparallel.error.TimeoutError):
lview = []
dview = []
time.sleep(2)
count += 1
print('\tReturning lview,dview (%s engines) ...' % len(lview))
return lview,dview
def launch_engines(engines:int, profile:str):
"""Launch ipcluster with engines under profile."""
print(ColorText(f"\nLaunching ipcluster with {engines} engines...").bold())
def _launch(engines, profile):
subprocess.call([shutil.which('ipcluster'), 'start', '-n', str(engines), '--daemonize'])
# first see if a cluster has already been started
started = False
try:
print("\tLooking for existing engines ...")
lview,dview = get_client(profile=profile)
if len(lview) != engines:
lview,dview = wait_for_engines(engines, profile)
started = True
except (OSError, ipyparallel.error.NoEnginesRegistered, ipyparallel.error.TimeoutError):
print("\tNo engines found ...")
# if not, launch 'em
if started is False:
print("\tLaunching engines ...")
# pid = subprocess.Popen([shutil.which('ipcluster'), 'start', '-n', str(engines)]).pid
x = threading.Thread(target=_launch, args=(engines,profile,), daemon=True)
x.daemon=True
x.start()
lview,dview = wait_for_engines(engines, profile)
return lview,dview
def get_freq(string:str) -> float:
"""Convert VarScan FREQ to floating decimal [0,1]."""
import numpy
try:
freq = float(string.replace("%", "")) / 100
except AttributeError as e:
# if string is np.nan
freq = numpy.nan
return freq
def get_table(casedata, controldata, locus):
"""Create stratified contingency tables (each 2x2) for a given locus.
Each stratum is a population.
Contingency table has treatment (case or control) as rows, and
allele (REF or ALT) as columns.
Example table
-------------
# in python
[1] mat = np.asarray([[0, 6, 0, 5],
[3, 3, 0, 6],
[6, 0, 2, 4],
[5, 1, 6, 0],
[2, 0, 5, 0]])
[2] [np.reshape(x.tolist(), (2, 2)) for x in mat]
[out]
[array([[0, 6],
[0, 5]]),
array([[3, 3],
[0, 6]]),
array([[6, 0],
[2, 4]]),
array([[5, 1],
[6, 0]]),
array([[2, 0],
[5, 0]])]
# from R - see https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/mantelhaen.test
c(0, 0, 6, 5,
...)
Response
Delay Cured Died
None 0 6
1.5h 0 5
...
"""
import numpy, pandas
tables = [] # - a list of lists
for casecol,controlcol in pairs.items():
# get ploidy of pop
pop = casecol.split('.FREQ')[0]
pop_ploidy = ploidy[pop]
# get case-control frequencies of ALT allele
case_freq = get_freq(casedata.loc[locus, casecol])
cntrl_freq = get_freq(controldata.loc[locus, controlcol])
# see if either freq is np.nan, if so, skip this pop
if sum([x!=x for x in [case_freq, cntrl_freq]]) > 0:
continue
# collate info for locus (create contingency table data)
t = []
for freq in [cntrl_freq, case_freq]:
t.extend([(1-freq)*pop_ploidy,
freq*pop_ploidy])
tables.append(t)
# return contingency tables (elements of list) for this locus stratified by population (list index)
return [numpy.reshape(x.tolist(), (2, 2)) for x in numpy.asarray(tables)]
def create_tables(*args):
"""Get stratified contingency tables for all loci in cmh_test.py input file."""
import pandas
tables = {}
for locus in args[0].index:
tables[locus] = get_table(*args, locus)
return tables
def cmh_test(*args):
"""Perform Cochran-Mantel-Haenszel chi-squared test on stratified contingency tables."""
import pandas, math
from statsmodels.stats.contingency_tables import StratifiedTable as cmh
# set up data logging
ignored = {}
# get contingency tables for pops with case and control data
tables = create_tables(*args)
# fill in a dataframe with cmh test results, one locus at a time
results = pandas.DataFrame(columns=['locus', 'odds_ratio', 'p-value',
'lower_confidence', 'upper_confidence', 'num_pops'])
for locus,table in tables.items():
if len(table) == 0:
# if none of the populations for a locus provide a contingency table (due to missing data)
# ... then continue to the next locus.
ignored[locus] = 'there were no populations that provided contingency tables'
continue
# cmh results for stratified contingency tables (called "table" = an array of tables)
cmh_res = cmh(table)
res = cmh_res.test_null_odds(True) # statistic and p-value
odds_ratio = cmh_res.oddsratio_pooled # odds ratio
conf = cmh_res.oddsratio_pooled_confint() # lower and upper confidence
locus_results = locus, odds_ratio, res.pvalue, *conf, len(table)
# look for fixed states across all tables
if sum([math.isnan(x) for x in conf]) > 0:
# if the upper and lower estimat of the confidence interval are NA, ignore
# this can happen when all of the tables returned for a specific locus are fixed
# ... for either the REF or ALT. This happens rarely for loci with low MAF, where
# ... the populations that have variable case or control, do not have a frequency
# ... estimated for the other treatment (case or control) and therefore don't
# ... make it into the list of stratified tables and the remaining tables
# ... (populations) are all fixed for the REF or ALT - again, this happens for
# ... some low MAF loci and may happen if input file has few pops to stratify.
# log reason
ignored[locus] = 'the upper and lower confidence interval for the odds ratio was NA'
ignored[locus] = ignored[locus] + '\t' + '\t'.join(map(str, locus_results[1:]))
continue
results.loc[len(results.index), :] = locus_results
return results, ignored
def parallelize_cmh(casedata, controldata, lview):
"""Parallelize Cochran-Mantel-Haenszel chi-squared tests by groups of loci."""
print(ColorText('\nParallelizing CMH calls ...').bold())
import math, tqdm, pandas
jobsize = math.ceil(len(casedata.index)/len(lview))
# send jobs to engines
numjobs = (len(casedata.index)/jobsize)+1
print(ColorText("\nSending %d jobs to engines ..." % numjobs ).bold())
jobs = []
loci_to_send = []
count = 0
for locus in tqdm.tqdm(casedata.index):
count += 1
loci_to_send.append(locus)
if len(loci_to_send) == jobsize or count == len(casedata.index):
jobs.append(lview.apply_async(cmh_test, *(casedata.loc[loci_to_send, :],
controldata.loc[loci_to_send, :])))
# jobs.append(cmh_test(casedata.loc[loci_to_send, :],
# controldata.loc[loci_to_send, :])) # for testing
loci_to_send = []
# wait until jobs finish
watch_async(jobs, phase='CMH test')
# gather output, concatenate into one datafram
print(ColorText('\nGathering parallelized results ...').bold())
logs = dict((locus,reason) for j in jobs for (locus,reason) in j.r[1].items())
output = pandas.concat([j.r[0] for j in jobs])
# output = pandas.concat([j for j in jobs]) # for testing
return output, logs
def get_cc_pairs(casecols, controlcols, case, control):
"""For a given population, pair its case column with its control column."""
badcols = []
# global pairs # for debugging
pairs = {}
for casecol in casecols:
controlcol = casecol.replace(case, control)
if not controlcol in controlcols:
badcols.append((casecol, controlcol))
continue
pairs[casecol] = controlcol
if len(badcols) > 0:
print(ColorText('FAIL: The following case populations to not have a valid control column in dataframe.').fail())
for cs,ct in badcols:
print(ColorText(f'FAIL: no match for {cs} named {ct} in dataframe').fail())
print(ColorText('FAIL: These case columns have not been paired and will be excluded from analyses.').fail())
askforinput()
return pairs
def get_data(df, case, control):
"""Separate input dataframe into case-only and control-only dataframes."""
# get columns for case and control
casecols = [col for col in df if case in col and 'FREQ' in col]
cntrlcols = [col for col in df if control in col and 'FREQ' in col]
# isolate data to separate dfs
casedata = df[casecols]
controldata = df[cntrlcols]
assert casedata.shape == controldata.shape
# pair up case-control pops
pairs = get_cc_pairs(casecols, cntrlcols, case, control)
return casedata, controldata, pairs
def get_parse():
"""
Parse input flags.
# TODO check arg descriptions, and if they're actually used.
"""
parser = argparse.ArgumentParser(description=print(mytext),
add_help=True,
formatter_class=argparse.RawTextHelpFormatter)
requiredNAMED = parser.add_argument_group('required arguments')
requiredNAMED.add_argument("-i", "--input",
required=True,
default=None,
dest="input",
type=str,
help='''/path/to/VariantsToTable_output.txt
It is assumed that there is either a 'locus' or 'unstitched_locus' column.
The 'locus' column elements are the hyphen-separated
CHROM-POS. If the 'unstitched_chrom' column is present, the code will use the
'unstitched_locus' column for SNP names, otherwise 'CHROM' and 'locus'. The
'unstitched_locus' elements are therefore the hyphen-separated
unstitched_locus-unstitched_pos. FREQ columns from VarScan are also
assumed.
''')
requiredNAMED.add_argument("-o","--outdir",
required=True,
default=None,
dest="outdir",
type=str,
help='''/path/to/cmh_test_output_dir/
File output from cmh_test.py will be saved in the outdir, with the original
name of the input file, but with the suffix "_CMH-test-results.txt"''')
requiredNAMED.add_argument("--case",
required=True,
default=None,
dest="case",
type=str,
help='''The string present in every column for pools in "case" treatments.''')
requiredNAMED.add_argument("--control",
required=True,
default=None,
dest="control",
type=str,
help='''The string present in every column for pools in "control" treatments.''')
requiredNAMED.add_argument("-p","--ploidy",
required=True,
default=None,
dest="ploidyfile",
type=str,
help='''/path/to/the/ploidy.pkl file output by the VarScan pipeline. This is a python
dictionary with key=pool_name, value=dict with key=pop, value=ploidy. The code
will prompt for pool_name if necessary.''')
requiredNAMED.add_argument("-e","--engines",
required=True,
default=None,
dest="engines",
type=int,
help="The number of ipcluster engines that will be launched.")
parser.add_argument("--ipcluster-profile",
required=False,
default='default',
dest="profile",
type=str,
help="The ipcluster profile name with which to start engines. Default: 'default'")
parser.add_argument('--keep-engines',
required=False,
action='store_true',
dest="keep_engines",
help='''Boolean: true if used, false otherwise. If you want to keep
the ipcluster engines alive, use this flag. Otherwise engines will be killed automatically.
(default: False)''')
# check flags
args = parser.parse_args()
if not op.exists(args.outdir):
print(ColorText(f"FAIL: the directory for the output file(s) does not exist.").fail())
print(ColorText(f"FAIL: please create this directory: %s" % args.outdir).fail())
print(ColorText("exiting cmh_test.py").fail())
exit()
# make sure input and ploidyfile exist
nopath = []
for x in [args.input, args.ploidyfile]: # TODO: check for $HOME or other bash vars in path
if not op.exists(x):
nopath.append(x)
# if input or ploidy file do not exist:
if len(nopath) > 0:
print(ColorText("FAIL: The following path(s) do not exist:").fail())
for f in nopath:
print(ColorText("\tFAIL: %s" % f).fail())
print(ColorText('\nexiting cmh_test.py').fail())
exit()
print('args = ', args)
return args
def choose_pool(ploidy:dict) -> dict:
"""Choose which the pool to use as a key to the ploidy dict."""
keys = list(ploidy.keys())
if len(keys) == 1:
# return the value of the dict using the only key
return ploidy[keys[0]]
print(ColorText('\nPlease choose a pool that contains the population of interest.').bold())
nums = []
for i,pool in enumerate(keys):
print('\t%s %s' % (i, pool))
nums.append(i)
while True:
inp = int(input(ColorText("\tINPUT NEEDED: Choose file by number: ").warn()).lower())
if inp in nums:
pool = keys[inp]
break
else:
print(ColorText("\tPlease respond with a number from above.").fail())
# make sure they've chosen at least one account
while pool is None:
print(ColorText("\tFAIL: You need to specify at least one pool. Revisiting options...").fail())
pool = choose_pool(ploidy, args, keep=None)
return ploidy[pool]
def get_ploidy(ploidyfile) -> dict:
"""Get the ploidy of the populations of interest, reduce ploidy pkl."""
print(ColorText('\nLoading ploidy information ...').bold())
# have user choose key to dict
return choose_pool(pklload(ploidyfile))
def read_input(inputfile):
"""Read in inputfile, set index to locus names."""
print(ColorText('\nReading input file ...').bold())
# read in datatable
df = pd.read_table(inputfile, sep='\t')
# set df index
locuscol = 'unstitched_locus' if 'unstitched_locus' in df.columns else 'locus'
if locuscol not in df:
print(ColorText('\nFAIL: There must be a column for locus IDs - either "unstitched_locus" or "locus"').fail())
print(ColorText('FAIL: The column is the hyphen-separated CHROM and POS.').fail())
print(ColorText('exiting cmh_test.py').fail())
exit()
df.index = df[locuscol].tolist()
return df
def main():
# make sure it's not python3.8
check_pyversion()
# parse input arguments
args = get_parse()
# read in datatable
df = read_input(args.input)
# get ploidy for each pool to use to correct read counts for pseudoreplication
# global ploidy # for debugging
ploidy = get_ploidy(args.ploidyfile)
# isolate case/control data
casedata, controldata, pairs = get_data(df, args.case, args.control)
# get ipcluster engines
lview,dview = launch_engines(args.engines, args.profile)
# attach data and functions to engines
attach_data(ploidy=ploidy,
case=args.case,
control=args.control,
pairs=pairs,
cmh_test=cmh_test,
get_freq=get_freq,
get_table=get_table,
create_tables=create_tables,
dview=dview)
# run cmh tests in parallel
output,logs = parallelize_cmh(casedata, controldata, lview)
# write to outfile
outfile = op.join(args.outdir, op.basename(args.input).split(".")[0] + '_CMH-test-results.txt')
print(ColorText(f'\nWriting all results to: ').bold().__str__()+ f'{outfile} ...')
output.to_csv(outfile,
sep='\t', index=False)
# write logs
logfile = outfile.replace(".txt", ".log")
print(ColorText(f'\nWriting logs to: ').bold().__str__()+ f'{logfile} ...')
if len(logs) > 0:
with open(logfile, 'w') as o:
o.write('locus\treason_for_exclusion\todds_ratio\tp-value\tlower_confidence\tupper_confidence\tnum_pops\n')
lines = []
for locus,reason in logs.items():
lines.append(f'{locus}\t{reason}')
o.write("%s" % '\n'.join(lines))
# kill ipcluster to avoid mem problems
if args.keep_engines is False:
print(ColorText("\nStopping ipcluster ...").bold())
subprocess.call([shutil.which('ipcluster'), 'stop'])
print(ColorText('\nDONE!!\n').green().bold())
pass
if __name__ == '__main__':
mytext = ColorText('''
*****************************************************************************
CoAdapTree's
______ __ ___ __ __ ________ _
| ____| | \\ / | | | | | |__ __| ____ _____ __| |__
| | | \\/ | | |__| | | | / __ \\ | ____| |__ __|
| | | |\\ /| | | __ | | | | /__\\_| |___ | |
| |____ | | \\/ | | | | | | | | | \____ ___| | | |
|______| |__| |__| |__| |__| |_| \\____/ |_____| |_|
Cochran-Mantel-Haenszel chi-squared test
*****************************************************************************''').green().bold().__str__()
main()
| wait_for_engines | identifier_name |
cmh_test.py | """
Perform Cochran-Mantel-Haenszel chi-squared tests on stratified contingency tables.
Each stratum is a population's contingency table; each population has a case and a control.
Each contingency table is 2x2 - case and control x REF and ALT allele counts.
ALT and REF allele counts are calculated by multiplying the ploidy of the population by ...
... either the ALT freq or (1-ALT_freq), for each of case and control - unless any of ...
... the counts are np.nan, then skip population.
TODO: allow user to select specific populations (whichpops) for get_ploidy()
"""
import os, sys, argparse, shutil, subprocess, pandas as pd, threading, ipyparallel, time
import pickle
from os import path as op
def check_pyversion() -> None:
"""Make sure python is 3.6 <= version < 3.8."""
pyversion = float(str(sys.version_info[0]) + '.' + str(sys.version_info[1]))
if not pyversion >= 3.6:
text = f'''FAIL: You are using python {pyversion}. This pipeline was built with python 3.7.
FAIL: use 3.6 <= python version < 3.8
FAIL: exiting cmh_test.py'''
print(ColorText(text).fail())
exit()
if not pyversion < 3.8:
print(ColorText("FAIL: python 3.8 has issues with the ipyparallel engine returns.").fail())
print(ColorText("FAIL: use 3.6 <= python version < 3.8").fail())
print(ColorText("FAIL: exiting cmh_test.py").fail())
exit()
def pklload(path:str):
"""Load object from a .pkl file."""
pkl = pickle.load(open(path, 'rb'))
return pkl
def get_client(profile='default') -> tuple:
"""Get lview,dview from ipcluster."""
rc = ipyparallel.Client(profile=profile)
dview = rc[:]
lview = rc.load_balanced_view()
return lview, dview
def attach_data(**kwargs) -> None:
"""Load object to engines."""
import time
num_engines = len(kwargs['dview'])
print(ColorText("\nAdding data to engines ...").bold())
print(ColorText("\tWARN: Watch available mem in another terminal window: 'watch free -h'").warn())
print(ColorText("\tWARN: If available mem gets too low, kill engines and restart cmh_test.py with fewer engines: 'ipcluster stop'").warn())
for key,value in kwargs.items():
if key != 'dview':
print(f'\tLoading {key} ({value.__class__.__name__}) to {num_engines} engines')
kwargs['dview'][key] = value
time.sleep(1)
time.sleep(10)
return None
def watch_async(jobs:list, phase=None) -> None:
"""Wait until jobs are done executing, show progress bar."""
from tqdm import trange
print(ColorText(f"\nWatching {len(jobs)} {phase} jobs ...").bold())
job_idx = list(range(len(jobs)))
for i in trange(len(jobs)):
count = 0
while count < (i+1):
count = len(jobs) - len(job_idx)
for j in job_idx:
if jobs[j].ready():
count += 1
job_idx.remove(j)
pass
class ColorText():
"""
Use ANSI escape sequences to print colors +/- bold/underline to bash terminal.
"""
def __init__(self, text:str):
self.text = text
self.ending = '\033[0m'
self.colors = []
def __str__(self):
return self.text
def bold(self):
self.text = '\033[1m' + self.text + self.ending
return self
def underline(self):
self.text = '\033[4m' + self.text + self.ending
return self
def green(self):
self.text = '\033[92m' + self.text + self.ending
self.colors.append('green')
return self
def purple(self):
self.text = '\033[95m' + self.text + self.ending
self.colors.append('purple')
return self
def blue(self):
self.text = '\033[94m' + self.text + self.ending
self.colors.append('blue')
return self
def warn(self):
self.text = '\033[93m' + self.text + self.ending
self.colors.append('yellow')
return self
def fail(self):
self.text = '\033[91m' + self.text + self.ending
self.colors.append('red')
return self
pass
def askforinput(msg='Do you want to proceed?', tab='', newline='\n'):
"""Ask for input; if msg is default and input is no, exit."""
while True:
inp = input(ColorText(f"{newline}{tab}INPUT NEEDED: {msg} \n{tab}(yes | no): ").warn().__str__()).lower()
if inp in ['yes', 'no']:
if inp == 'no' and msg=='Do you want to proceed?':
print(ColorText('exiting %s' % sys.argv[0]).fail())
exit()
break
else:
print(ColorText("Please respond with 'yes' or 'no'").fail())
return inp
def wait_for_engines(engines:int, profile:str):
"""Reload engines until number matches input engines arg."""
lview = []
dview = []
count = 1
while any([len(lview) != engines, len(dview) != engines]):
if count % 30 == 0:
# if waiting too long..
# TODO: if found engines = 0, no reason to ask, if they continue it will fail
print('count = ', count)
print(ColorText("\tFAIL: Waited too long for engines.").fail())
print(ColorText("\tFAIL: Make sure that if any cluster is running, the -e arg matches the number of engines.").fail())
print(ColorText("\tFAIL: In some cases, not all expected engines can start on a busy server.").fail())
print(ColorText("\tFAIL: Therefore, it may be the case that available engines will be less than requested.").fail())
print(ColorText("\tFAIL: cmh_test.py found %s engines, with -e set to %s" % (len(lview), engines)).fail())
answer = askforinput(msg='Would you like to continue with %s engines? (choosing no will wait another 60 seconds)' % len(lview), tab='\t', newline='')
if answer == 'yes':
break
try:
lview,dview = get_client(profile=profile)
except (OSError, ipyparallel.error.NoEnginesRegistered, ipyparallel.error.TimeoutError):
lview = []
dview = []
time.sleep(2)
count += 1
print('\tReturning lview,dview (%s engines) ...' % len(lview))
return lview,dview
def launch_engines(engines:int, profile:str):
"""Launch ipcluster with engines under profile."""
print(ColorText(f"\nLaunching ipcluster with {engines} engines...").bold())
def _launch(engines, profile):
subprocess.call([shutil.which('ipcluster'), 'start', '-n', str(engines), '--daemonize'])
# first see if a cluster has already been started
started = False
try:
print("\tLooking for existing engines ...")
lview,dview = get_client(profile=profile)
if len(lview) != engines:
lview,dview = wait_for_engines(engines, profile)
started = True
except (OSError, ipyparallel.error.NoEnginesRegistered, ipyparallel.error.TimeoutError):
print("\tNo engines found ...")
# if not, launch 'em
if started is False:
print("\tLaunching engines ...")
# pid = subprocess.Popen([shutil.which('ipcluster'), 'start', '-n', str(engines)]).pid
x = threading.Thread(target=_launch, args=(engines,profile,), daemon=True)
x.daemon=True
x.start()
lview,dview = wait_for_engines(engines, profile)
return lview,dview
def get_freq(string:str) -> float:
"""Convert VarScan FREQ to floating decimal [0,1]."""
import numpy
try:
freq = float(string.replace("%", "")) / 100
except AttributeError as e:
# if string is np.nan
freq = numpy.nan
return freq
def get_table(casedata, controldata, locus):
"""Create stratified contingency tables (each 2x2) for a given locus.
Each stratum is a population.
Contingency table has treatment (case or control) as rows, and
allele (REF or ALT) as columns.
Example table
-------------
# in python
[1] mat = np.asarray([[0, 6, 0, 5],
[3, 3, 0, 6],
[6, 0, 2, 4],
[5, 1, 6, 0],
[2, 0, 5, 0]])
[2] [np.reshape(x.tolist(), (2, 2)) for x in mat]
[out]
[array([[0, 6],
[0, 5]]),
array([[3, 3],
[0, 6]]),
array([[6, 0],
[2, 4]]),
array([[5, 1],
[6, 0]]),
array([[2, 0],
[5, 0]])]
# from R - see https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/mantelhaen.test
c(0, 0, 6, 5,
...)
Response
Delay Cured Died
None 0 6
1.5h 0 5
...
"""
import numpy, pandas
tables = [] # - a list of lists
for casecol,controlcol in pairs.items():
# get ploidy of pop
pop = casecol.split('.FREQ')[0]
pop_ploidy = ploidy[pop]
# get case-control frequencies of ALT allele
case_freq = get_freq(casedata.loc[locus, casecol])
cntrl_freq = get_freq(controldata.loc[locus, controlcol])
# see if either freq is np.nan, if so, skip this pop
if sum([x!=x for x in [case_freq, cntrl_freq]]) > 0:
continue
# collate info for locus (create contingency table data)
t = []
for freq in [cntrl_freq, case_freq]:
t.extend([(1-freq)*pop_ploidy,
freq*pop_ploidy])
tables.append(t)
# return contingency tables (elements of list) for this locus stratified by population (list index)
return [numpy.reshape(x.tolist(), (2, 2)) for x in numpy.asarray(tables)]
def create_tables(*args):
"""Get stratified contingency tables for all loci in cmh_test.py input file."""
import pandas
tables = {}
for locus in args[0].index:
tables[locus] = get_table(*args, locus)
return tables
def cmh_test(*args):
"""Perform Cochran-Mantel-Haenszel chi-squared test on stratified contingency tables."""
import pandas, math
from statsmodels.stats.contingency_tables import StratifiedTable as cmh
# set up data logging
ignored = {}
# get contingency tables for pops with case and control data
tables = create_tables(*args)
# fill in a dataframe with cmh test results, one locus at a time
results = pandas.DataFrame(columns=['locus', 'odds_ratio', 'p-value',
'lower_confidence', 'upper_confidence', 'num_pops'])
for locus,table in tables.items():
if len(table) == 0:
# if none of the populations for a locus provide a contingency table (due to missing data)
# ... then continue to the next locus.
ignored[locus] = 'there were no populations that provided contingency tables'
continue
# cmh results for stratified contingency tables (called "table" = an array of tables)
cmh_res = cmh(table)
res = cmh_res.test_null_odds(True) # statistic and p-value
odds_ratio = cmh_res.oddsratio_pooled # odds ratio
conf = cmh_res.oddsratio_pooled_confint() # lower and upper confidence
locus_results = locus, odds_ratio, res.pvalue, *conf, len(table)
# look for fixed states across all tables
if sum([math.isnan(x) for x in conf]) > 0:
# if the upper and lower estimat of the confidence interval are NA, ignore
# this can happen when all of the tables returned for a specific locus are fixed
# ... for either the REF or ALT. This happens rarely for loci with low MAF, where
# ... the populations that have variable case or control, do not have a frequency
# ... estimated for the other treatment (case or control) and therefore don't
# ... make it into the list of stratified tables and the remaining tables
# ... (populations) are all fixed for the REF or ALT - again, this happens for
# ... some low MAF loci and may happen if input file has few pops to stratify.
# log reason
ignored[locus] = 'the upper and lower confidence interval for the odds ratio was NA'
ignored[locus] = ignored[locus] + '\t' + '\t'.join(map(str, locus_results[1:]))
continue
results.loc[len(results.index), :] = locus_results
return results, ignored
def parallelize_cmh(casedata, controldata, lview):
"""Parallelize Cochran-Mantel-Haenszel chi-squared tests by groups of loci."""
print(ColorText('\nParallelizing CMH calls ...').bold())
import math, tqdm, pandas
jobsize = math.ceil(len(casedata.index)/len(lview))
# send jobs to engines
numjobs = (len(casedata.index)/jobsize)+1
print(ColorText("\nSending %d jobs to engines ..." % numjobs ).bold())
jobs = []
loci_to_send = []
count = 0
for locus in tqdm.tqdm(casedata.index):
count += 1
loci_to_send.append(locus)
if len(loci_to_send) == jobsize or count == len(casedata.index):
jobs.append(lview.apply_async(cmh_test, *(casedata.loc[loci_to_send, :],
controldata.loc[loci_to_send, :])))
# jobs.append(cmh_test(casedata.loc[loci_to_send, :],
# controldata.loc[loci_to_send, :])) # for testing
loci_to_send = []
# wait until jobs finish
watch_async(jobs, phase='CMH test')
# gather output, concatenate into one datafram
print(ColorText('\nGathering parallelized results ...').bold())
logs = dict((locus,reason) for j in jobs for (locus,reason) in j.r[1].items())
output = pandas.concat([j.r[0] for j in jobs])
# output = pandas.concat([j for j in jobs]) # for testing
return output, logs
def get_cc_pairs(casecols, controlcols, case, control):
"""For a given population, pair its case column with its control column."""
badcols = []
# global pairs # for debugging
pairs = {}
for casecol in casecols:
controlcol = casecol.replace(case, control)
if not controlcol in controlcols:
badcols.append((casecol, controlcol))
continue
pairs[casecol] = controlcol
if len(badcols) > 0:
print(ColorText('FAIL: The following case populations to not have a valid control column in dataframe.').fail())
for cs,ct in badcols:
print(ColorText(f'FAIL: no match for {cs} named {ct} in dataframe').fail())
print(ColorText('FAIL: These case columns have not been paired and will be excluded from analyses.').fail())
askforinput()
return pairs
def get_data(df, case, control):
"""Separate input dataframe into case-only and control-only dataframes."""
# get columns for case and control
casecols = [col for col in df if case in col and 'FREQ' in col]
cntrlcols = [col for col in df if control in col and 'FREQ' in col]
# isolate data to separate dfs
casedata = df[casecols]
controldata = df[cntrlcols]
assert casedata.shape == controldata.shape
# pair up case-control pops
pairs = get_cc_pairs(casecols, cntrlcols, case, control)
return casedata, controldata, pairs
def get_parse():
"""
Parse input flags.
# TODO check arg descriptions, and if they're actually used.
"""
parser = argparse.ArgumentParser(description=print(mytext),
add_help=True,
formatter_class=argparse.RawTextHelpFormatter)
requiredNAMED = parser.add_argument_group('required arguments')
requiredNAMED.add_argument("-i", "--input",
required=True,
default=None,
dest="input",
type=str,
help='''/path/to/VariantsToTable_output.txt
It is assumed that there is either a 'locus' or 'unstitched_locus' column.
The 'locus' column elements are the hyphen-separated
CHROM-POS. If the 'unstitched_chrom' column is present, the code will use the
'unstitched_locus' column for SNP names, otherwise 'CHROM' and 'locus'. The
'unstitched_locus' elements are therefore the hyphen-separated
unstitched_locus-unstitched_pos. FREQ columns from VarScan are also
assumed.
''')
requiredNAMED.add_argument("-o","--outdir",
required=True,
default=None,
dest="outdir",
type=str,
help='''/path/to/cmh_test_output_dir/
File output from cmh_test.py will be saved in the outdir, with the original
name of the input file, but with the suffix "_CMH-test-results.txt"''')
requiredNAMED.add_argument("--case",
required=True,
default=None,
dest="case",
type=str,
help='''The string present in every column for pools in "case" treatments.''')
requiredNAMED.add_argument("--control",
required=True,
default=None,
dest="control",
type=str,
help='''The string present in every column for pools in "control" treatments.''')
requiredNAMED.add_argument("-p","--ploidy",
required=True,
default=None,
dest="ploidyfile",
type=str,
help='''/path/to/the/ploidy.pkl file output by the VarScan pipeline. This is a python
dictionary with key=pool_name, value=dict with key=pop, value=ploidy. The code
will prompt for pool_name if necessary.''')
requiredNAMED.add_argument("-e","--engines",
required=True,
default=None,
dest="engines",
type=int,
help="The number of ipcluster engines that will be launched.")
parser.add_argument("--ipcluster-profile",
required=False,
default='default',
dest="profile",
type=str,
help="The ipcluster profile name with which to start engines. Default: 'default'")
parser.add_argument('--keep-engines',
required=False,
action='store_true',
dest="keep_engines",
help='''Boolean: true if used, false otherwise. If you want to keep
the ipcluster engines alive, use this flag. Otherwise engines will be killed automatically.
(default: False)''')
# check flags
args = parser.parse_args()
if not op.exists(args.outdir):
print(ColorText(f"FAIL: the directory for the output file(s) does not exist.").fail())
print(ColorText(f"FAIL: please create this directory: %s" % args.outdir).fail())
print(ColorText("exiting cmh_test.py").fail())
exit()
# make sure input and ploidyfile exist
nopath = []
for x in [args.input, args.ploidyfile]: # TODO: check for $HOME or other bash vars in path
if not op.exists(x):
nopath.append(x)
# if input or ploidy file do not exist:
if len(nopath) > 0:
print(ColorText("FAIL: The following path(s) do not exist:").fail())
for f in nopath:
print(ColorText("\tFAIL: %s" % f).fail())
print(ColorText('\nexiting cmh_test.py').fail())
exit()
print('args = ', args)
return args
def choose_pool(ploidy:dict) -> dict:
"""Choose which the pool to use as a key to the ploidy dict."""
keys = list(ploidy.keys())
if len(keys) == 1:
# return the value of the dict using the only key
return ploidy[keys[0]]
print(ColorText('\nPlease choose a pool that contains the population of interest.').bold())
nums = []
for i,pool in enumerate(keys):
print('\t%s %s' % (i, pool))
nums.append(i)
while True:
inp = int(input(ColorText("\tINPUT NEEDED: Choose file by number: ").warn()).lower())
if inp in nums:
pool = keys[inp]
break
else:
print(ColorText("\tPlease respond with a number from above.").fail())
# make sure they've chosen at least one account
while pool is None:
print(ColorText("\tFAIL: You need to specify at least one pool. Revisiting options...").fail())
pool = choose_pool(ploidy, args, keep=None)
return ploidy[pool]
def get_ploidy(ploidyfile) -> dict:
"""Get the ploidy of the populations of interest, reduce ploidy pkl."""
print(ColorText('\nLoading ploidy information ...').bold())
# have user choose key to dict
return choose_pool(pklload(ploidyfile))
def read_input(inputfile):
"""Read in inputfile, set index to locus names."""
print(ColorText('\nReading input file ...').bold())
# read in datatable
df = pd.read_table(inputfile, sep='\t')
# set df index
locuscol = 'unstitched_locus' if 'unstitched_locus' in df.columns else 'locus'
if locuscol not in df:
print(ColorText('\nFAIL: There must be a column for locus IDs - either "unstitched_locus" or "locus"').fail())
print(ColorText('FAIL: The column is the hyphen-separated CHROM and POS.').fail())
print(ColorText('exiting cmh_test.py').fail())
exit()
df.index = df[locuscol].tolist()
return df
def main():
# make sure it's not python3.8
check_pyversion()
# parse input arguments
args = get_parse()
# read in datatable
df = read_input(args.input)
# get ploidy for each pool to use to correct read counts for pseudoreplication
# global ploidy # for debugging
ploidy = get_ploidy(args.ploidyfile)
# isolate case/control data
casedata, controldata, pairs = get_data(df, args.case, args.control)
# get ipcluster engines
lview,dview = launch_engines(args.engines, args.profile)
# attach data and functions to engines
attach_data(ploidy=ploidy,
case=args.case,
control=args.control,
pairs=pairs,
cmh_test=cmh_test,
get_freq=get_freq,
get_table=get_table,
create_tables=create_tables,
dview=dview)
| # write to outfile
outfile = op.join(args.outdir, op.basename(args.input).split(".")[0] + '_CMH-test-results.txt')
print(ColorText(f'\nWriting all results to: ').bold().__str__()+ f'{outfile} ...')
output.to_csv(outfile,
sep='\t', index=False)
# write logs
logfile = outfile.replace(".txt", ".log")
print(ColorText(f'\nWriting logs to: ').bold().__str__()+ f'{logfile} ...')
if len(logs) > 0:
with open(logfile, 'w') as o:
o.write('locus\treason_for_exclusion\todds_ratio\tp-value\tlower_confidence\tupper_confidence\tnum_pops\n')
lines = []
for locus,reason in logs.items():
lines.append(f'{locus}\t{reason}')
o.write("%s" % '\n'.join(lines))
# kill ipcluster to avoid mem problems
if args.keep_engines is False:
print(ColorText("\nStopping ipcluster ...").bold())
subprocess.call([shutil.which('ipcluster'), 'stop'])
print(ColorText('\nDONE!!\n').green().bold())
pass
if __name__ == '__main__':
mytext = ColorText('''
*****************************************************************************
CoAdapTree's
______ __ ___ __ __ ________ _
| ____| | \\ / | | | | | |__ __| ____ _____ __| |__
| | | \\/ | | |__| | | | / __ \\ | ____| |__ __|
| | | |\\ /| | | __ | | | | /__\\_| |___ | |
| |____ | | \\/ | | | | | | | | | \____ ___| | | |
|______| |__| |__| |__| |__| |_| \\____/ |_____| |_|
Cochran-Mantel-Haenszel chi-squared test
*****************************************************************************''').green().bold().__str__()
main() | # run cmh tests in parallel
output,logs = parallelize_cmh(casedata, controldata, lview)
| random_line_split |
regexp.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The testing package implements a simple regular expression library.
// It is a reduced version of the regular expression package suitable
// for use in tests; it avoids many dependencies.
//
// The syntax of the regular expressions accepted is:
//
// regexp:
// concatenation { '|' concatenation }
// concatenation:
// { closure }
// closure:
// term [ '*' | '+' | '?' ]
// term:
// '^'
// '$'
// '.'
// character
// '[' [ '^' ] character-ranges ']'
// '(' regexp ')'
//
package testing
import (
"utf8";
)
var debug = false
// Error codes returned by failures to parse an expression.
var (
ErrInternal = "internal error";
ErrUnmatchedLpar = "unmatched ''";
ErrUnmatchedRpar = "unmatched ''";
ErrUnmatchedLbkt = "unmatched '['";
ErrUnmatchedRbkt = "unmatched ']'";
ErrBadRange = "bad range in character class";
ErrExtraneousBackslash = "extraneous backslash";
ErrBadClosure = "repeated closure **, ++, etc.";
ErrBareClosure = "closure applies to nothing";
ErrBadBackslash = "illegal backslash escape";
)
// An instruction executed by the NFA
type instr interface {
kind() int; // the type of this instruction: _CHAR, _ANY, etc.
next() instr; // the instruction to execute after this one
setNext(i instr);
index() int;
setIndex(i int);
print();
}
// Fields and methods common to all instructions
type common struct {
_next instr;
_index int;
}
func (c *common) next() instr { return c._next }
func (c *common) setNext(i instr) { c._next = i }
func (c *common) index() int { return c._index }
func (c *common) setIndex(i int) { c._index = i }
// The representation of a compiled regular expression.
// The public interface is entirely through methods.
type Regexp struct {
expr string; // the original expression
inst []instr;
start instr;
nbra int; // number of brackets in expression, for subexpressions
}
const (
_START = // beginning of program
iota;
_END; // end of program: success
_BOT; // '^' beginning of text
_EOT; // '$' end of text
_CHAR; // 'a' regular character
_CHARCLASS; // [a-z] character class
_ANY; // '.' any character including newline
_NOTNL; // [^\n] special case: any character but newline
_BRA; // '(' parenthesized expression
_EBRA; // ')'; end of '(' parenthesized expression
_ALT; // '|' alternation
_NOP; // do nothing; makes it easy to link without patching
)
// --- START start of program
type _Start struct {
common;
}
func (start *_Start) kind() int { return _START }
func (start *_Start) print() { print("start") }
// --- END end of program
type _End struct {
common;
}
func (end *_End) kind() int { return _END }
func (end *_End) print() { print("end") }
// --- BOT beginning of text
type _Bot struct {
common;
}
func (bot *_Bot) kind() int { return _BOT }
func (bot *_Bot) print() { print("bot") }
// --- EOT end of text
type _Eot struct {
common;
}
func (eot *_Eot) kind() int { return _EOT }
func (eot *_Eot) print() { print("eot") }
// --- CHAR a regular character
type _Char struct {
common;
char int;
}
func (char *_Char) kind() int { return _CHAR }
func (char *_Char) print() { print("char ", string(char.char)) }
func newChar(char int) *_Char {
c := new(_Char);
c.char = char;
return c;
}
// --- CHARCLASS [a-z]
type _CharClass struct {
common;
char int;
negate bool; // is character class negated? ([^a-z])
// stored pairwise: [a-z] is (a,z); x is (x,x):
ranges []int;
}
func (cclass *_CharClass) kind() int { return _CHARCLASS }
func (cclass *_CharClass) print() {
print("charclass");
if cclass.negate {
print(" (negated)")
}
for i := 0; i < len(cclass.ranges); i += 2 {
l := cclass.ranges[i];
r := cclass.ranges[i+1];
if l == r {
print(" [", string(l), "]")
} else {
print(" [", string(l), "-", string(r), "]")
}
}
}
func (cclass *_CharClass) addRange(a, b int) {
// range is a through b inclusive
n := len(cclass.ranges);
if n >= cap(cclass.ranges) {
nr := make([]int, n, 2*n);
for i, j := range nr {
nr[i] = j
}
cclass.ranges = nr;
}
cclass.ranges = cclass.ranges[0 : n+2];
cclass.ranges[n] = a;
n++;
cclass.ranges[n] = b;
n++;
}
func (cclass *_CharClass) matches(c int) bool {
for i := 0; i < len(cclass.ranges); i = i + 2 {
min := cclass.ranges[i];
max := cclass.ranges[i+1];
if min <= c && c <= max {
return !cclass.negate
}
}
return cclass.negate;
}
func newCharClass() *_CharClass {
c := new(_CharClass);
c.ranges = make([]int, 0, 20);
return c;
}
// --- ANY any character
type _Any struct {
common;
}
func (any *_Any) kind() int { return _ANY }
func (any *_Any) print() { print("any") }
// --- NOTNL any character but newline
type _NotNl struct {
common;
}
func (notnl *_NotNl) kind() int { return _NOTNL }
func (notnl *_NotNl) print() { print("notnl") }
// --- BRA parenthesized expression
type _Bra struct {
common;
n int; // subexpression number
}
func (bra *_Bra) kind() int { return _BRA }
func (bra *_Bra) print() { print("bra", bra.n) }
// --- EBRA end of parenthesized expression
type _Ebra struct {
common;
n int; // subexpression number
}
func (ebra *_Ebra) kind() int { return _EBRA }
func (ebra *_Ebra) print() { print("ebra ", ebra.n) }
// --- ALT alternation
type _Alt struct {
common;
left instr; // other branch
}
func (alt *_Alt) kind() int { return _ALT }
func (alt *_Alt) print() { print("alt(", alt.left.index(), ")") }
// --- NOP no operation
type _Nop struct {
common;
}
func (nop *_Nop) kind() int { return _NOP }
func (nop *_Nop) print() { print("nop") }
func (re *Regexp) add(i instr) instr {
n := len(re.inst);
i.setIndex(len(re.inst));
if n >= cap(re.inst) {
ni := make([]instr, n, 2*n);
for i, j := range re.inst {
ni[i] = j
}
re.inst = ni;
}
re.inst = re.inst[0 : n+1];
re.inst[n] = i;
return i;
}
type parser struct {
re *Regexp;
error string;
nlpar int; // number of unclosed lpars
pos int;
ch int;
}
const endOfFile = -1
func (p *parser) c() int { return p.ch }
func (p *parser) nextc() int {
if p.pos >= len(p.re.expr) {
p.ch = endOfFile
} else {
c, w := utf8.DecodeRuneInString(p.re.expr[p.pos:len(p.re.expr)]);
p.ch = c;
p.pos += w;
}
return p.ch;
}
func newParser(re *Regexp) *parser {
p := new(parser);
p.re = re;
p.nextc(); // load p.ch
return p;
}
func special(c int) bool {
s := `\.+*?()|[]^$`;
for i := 0; i < len(s); i++ {
if c == int(s[i]) {
return true
}
}
return false;
}
func specialcclass(c int) bool {
s := `\-[]`;
for i := 0; i < len(s); i++ {
if c == int(s[i]) {
return true
}
}
return false;
}
func (p *parser) charClass() instr {
cc := newCharClass();
if p.c() == '^' {
cc.negate = true;
p.nextc();
}
left := -1;
for {
switch c := p.c(); c {
case ']', endOfFile:
if left >= 0 {
p.error = ErrBadRange;
return nil;
}
// Is it [^\n]?
if cc.negate && len(cc.ranges) == 2 &&
cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
nl := new(_NotNl);
p.re.add(nl);
return nl;
}
p.re.add(cc);
return cc;
case '-': // do this before backslash processing
p.error = ErrBadRange;
return nil;
case '\\':
c = p.nextc();
switch {
case c == endOfFile:
p.error = ErrExtraneousBackslash;
return nil;
case c == 'n':
c = '\n'
case specialcclass(c):
// c is as delivered
default:
p.error = ErrBadBackslash;
return nil;
}
fallthrough;
default:
p.nextc();
switch {
case left < 0: // first of pair
if p.c() == '-' { // range
p.nextc();
left = c;
} else { // single char
cc.addRange(c, c)
}
case left <= c: // second of pair
cc.addRange(left, c);
left = -1;
default:
p.error = ErrBadRange;
return nil;
}
}
}
return nil;
}
func (p *parser) term() (start, end instr) {
// term() is the leaf of the recursion, so it's sufficient to pick off the
// error state here for early exit.
// The other functions (closure(), concatenation() etc.) assume
// it's safe to recur to here.
if p.error != "" {
return
}
switch c := p.c(); c {
case '|', endOfFile:
return nil, nil
case '*', '+':
p.error = ErrBareClosure;
return;
case ')':
if p.nlpar == 0 {
p.error = ErrUnmatchedRpar;
return;
}
return nil, nil;
case ']':
p.error = ErrUnmatchedRbkt;
return;
case '^':
p.nextc();
start = p.re.add(new(_Bot));
return start, start;
case '$':
p.nextc();
start = p.re.add(new(_Eot));
return start, start;
case '.':
p.nextc();
start = p.re.add(new(_Any));
return start, start;
case '[':
p.nextc();
start = p.charClass();
if p.error != "" {
return
}
if p.c() != ']' {
p.error = ErrUnmatchedLbkt;
return;
}
p.nextc();
return start, start;
case '(':
p.nextc();
p.nlpar++;
p.re.nbra++; // increment first so first subexpr is \1
nbra := p.re.nbra;
start, end = p.regexp();
if p.c() != ')' {
p.error = ErrUnmatchedLpar;
return;
}
p.nlpar--;
p.nextc();
bra := new(_Bra);
p.re.add(bra);
ebra := new(_Ebra);
p.re.add(ebra);
bra.n = nbra;
ebra.n = nbra;
if start == nil {
if end == nil {
p.error = ErrInternal;
return;
}
start = ebra;
} else {
end.setNext(ebra)
}
bra.setNext(start);
return bra, ebra;
case '\\':
c = p.nextc();
switch {
case c == endOfFile:
p.error = ErrExtraneousBackslash;
return;
case c == 'n':
c = '\n'
case special(c):
// c is as delivered
default:
p.error = ErrBadBackslash;
return;
}
fallthrough;
default:
p.nextc();
start = newChar(c);
p.re.add(start);
return start, start;
}
panic("unreachable");
}
func (p *parser) closure() (start, end instr) { | return
}
switch p.c() {
case '*':
// (start,end)*:
alt := new(_Alt);
p.re.add(alt);
end.setNext(alt); // after end, do alt
alt.left = start; // alternate brach: return to start
start = alt; // alt becomes new (start, end)
end = alt;
case '+':
// (start,end)+:
alt := new(_Alt);
p.re.add(alt);
end.setNext(alt); // after end, do alt
alt.left = start; // alternate brach: return to start
end = alt; // start is unchanged; end is alt
case '?':
// (start,end)?:
alt := new(_Alt);
p.re.add(alt);
nop := new(_Nop);
p.re.add(nop);
alt.left = start; // alternate branch is start
alt.setNext(nop); // follow on to nop
end.setNext(nop); // after end, go to nop
start = alt; // start is now alt
end = nop; // end is nop pointed to by both branches
default:
return
}
switch p.nextc() {
case '*', '+', '?':
p.error = ErrBadClosure
}
return;
}
func (p *parser) concatenation() (start, end instr) {
for {
nstart, nend := p.closure();
if p.error != "" {
return
}
switch {
case nstart == nil: // end of this concatenation
if start == nil { // this is the empty string
nop := p.re.add(new(_Nop));
return nop, nop;
}
return;
case start == nil: // this is first element of concatenation
start, end = nstart, nend
default:
end.setNext(nstart);
end = nend;
}
}
panic("unreachable");
}
func (p *parser) regexp() (start, end instr) {
start, end = p.concatenation();
if p.error != "" {
return
}
for {
switch p.c() {
default:
return
case '|':
p.nextc();
nstart, nend := p.concatenation();
if p.error != "" {
return
}
alt := new(_Alt);
p.re.add(alt);
alt.left = start;
alt.setNext(nstart);
nop := new(_Nop);
p.re.add(nop);
end.setNext(nop);
nend.setNext(nop);
start, end = alt, nop;
}
}
panic("unreachable");
}
func unNop(i instr) instr {
for i.kind() == _NOP {
i = i.next()
}
return i;
}
func (re *Regexp) eliminateNops() {
for i := 0; i < len(re.inst); i++ {
inst := re.inst[i];
if inst.kind() == _END {
continue
}
inst.setNext(unNop(inst.next()));
if inst.kind() == _ALT {
alt := inst.(*_Alt);
alt.left = unNop(alt.left);
}
}
}
func (re *Regexp) doParse() string {
p := newParser(re);
start := new(_Start);
re.add(start);
s, e := p.regexp();
if p.error != "" {
return p.error
}
start.setNext(s);
re.start = start;
e.setNext(re.add(new(_End)));
re.eliminateNops();
return p.error;
}
// CompileRegexp parses a regular expression and returns, if successful, a Regexp
// object that can be used to match against text.
func CompileRegexp(str string) (regexp *Regexp, error string) {
regexp = new(Regexp);
regexp.expr = str;
regexp.inst = make([]instr, 0, 20);
error = regexp.doParse();
return;
}
// MustCompileRegexp is like CompileRegexp but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled regular
// expressions.
func MustCompile(str string) *Regexp {
regexp, error := CompileRegexp(str);
if error != "" {
panicln(`regexp: compiling "`, str, `": `, error)
}
return regexp;
}
type state struct {
inst instr; // next instruction to execute
match []int; // pairs of bracketing submatches. 0th is start,end
}
// Append new state to to-do list. Leftmost-longest wins so avoid
// adding a state that's already active.
func addState(s []state, inst instr, match []int) []state {
index := inst.index();
l := len(s);
pos := match[0];
// TODO: Once the state is a vector and we can do insert, have inputs always
// go in order correctly and this "earlier" test is never necessary,
for i := 0; i < l; i++ {
if s[i].inst.index() == index && // same instruction
s[i].match[0] < pos { // earlier match already going; lefmost wins
return s
}
}
if l == cap(s) {
s1 := make([]state, 2*l)[0:l];
for i := 0; i < l; i++ {
s1[i] = s[i]
}
s = s1;
}
s = s[0 : l+1];
s[l].inst = inst;
s[l].match = match;
return s;
}
// Accepts either string or bytes - the logic is identical either way.
// If bytes == nil, scan str.
func (re *Regexp) doExecute(str string, bytes []byte, pos int) []int {
var s [2][]state; // TODO: use a vector when state values (not ptrs) can be vector elements
s[0] = make([]state, 10)[0:0];
s[1] = make([]state, 10)[0:0];
in, out := 0, 1;
var final state;
found := false;
end := len(str);
if bytes != nil {
end = len(bytes)
}
for pos <= end {
if !found {
// prime the pump if we haven't seen a match yet
match := make([]int, 2*(re.nbra+1));
for i := 0; i < len(match); i++ {
match[i] = -1 // no match seen; catches cases like "a(b)?c" on "ac"
}
match[0] = pos;
s[out] = addState(s[out], re.start.next(), match);
}
in, out = out, in; // old out state is new in state
s[out] = s[out][0:0]; // clear out state
if len(s[in]) == 0 {
// machine has completed
break
}
charwidth := 1;
c := endOfFile;
if pos < end {
if bytes == nil {
c, charwidth = utf8.DecodeRuneInString(str[pos:end])
} else {
c, charwidth = utf8.DecodeRune(bytes[pos:end])
}
}
for i := 0; i < len(s[in]); i++ {
st := s[in][i];
switch s[in][i].inst.kind() {
case _BOT:
if pos == 0 {
s[in] = addState(s[in], st.inst.next(), st.match)
}
case _EOT:
if pos == end {
s[in] = addState(s[in], st.inst.next(), st.match)
}
case _CHAR:
if c == st.inst.(*_Char).char {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _CHARCLASS:
if st.inst.(*_CharClass).matches(c) {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _ANY:
if c != endOfFile {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _NOTNL:
if c != endOfFile && c != '\n' {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _BRA:
n := st.inst.(*_Bra).n;
st.match[2*n] = pos;
s[in] = addState(s[in], st.inst.next(), st.match);
case _EBRA:
n := st.inst.(*_Ebra).n;
st.match[2*n+1] = pos;
s[in] = addState(s[in], st.inst.next(), st.match);
case _ALT:
s[in] = addState(s[in], st.inst.(*_Alt).left, st.match);
// give other branch a copy of this match vector
s1 := make([]int, 2*(re.nbra+1));
for i := 0; i < len(s1); i++ {
s1[i] = st.match[i]
}
s[in] = addState(s[in], st.inst.next(), s1);
case _END:
// choose leftmost longest
if !found || // first
st.match[0] < final.match[0] || // leftmost
(st.match[0] == final.match[0] && pos > final.match[1]) { // longest
final = st;
final.match[1] = pos;
}
found = true;
default:
st.inst.print();
panic("unknown instruction in execute");
}
}
pos += charwidth;
}
return final.match;
}
// ExecuteString matches the Regexp against the string s.
// The return value is an array of integers, in pairs, identifying the positions of
// substrings matched by the expression.
// s[a[0]:a[1]] is the substring matched by the entire expression.
// s[a[2*i]:a[2*i+1]] for i > 0 is the substring matched by the ith parenthesized subexpression.
// A negative value means the subexpression did not match any element of the string.
// An empty array means "no match".
func (re *Regexp) ExecuteString(s string) (a []int) {
return re.doExecute(s, nil, 0)
}
// Execute matches the Regexp against the byte slice b.
// The return value is an array of integers, in pairs, identifying the positions of
// subslices matched by the expression.
// b[a[0]:a[1]] is the subslice matched by the entire expression.
// b[a[2*i]:a[2*i+1]] for i > 0 is the subslice matched by the ith parenthesized subexpression.
// A negative value means the subexpression did not match any element of the slice.
// An empty array means "no match".
func (re *Regexp) Execute(b []byte) (a []int) { return re.doExecute("", b, 0) }
// MatchString returns whether the Regexp matches the string s.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) MatchString(s string) bool { return len(re.doExecute(s, nil, 0)) > 0 }
// Match returns whether the Regexp matches the byte slice b.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) Match(b []byte) bool { return len(re.doExecute("", b, 0)) > 0 }
// MatchStrings matches the Regexp against the string s.
// The return value is an array of strings matched by the expression.
// a[0] is the substring matched by the entire expression.
// a[i] for i > 0 is the substring matched by the ith parenthesized subexpression.
// An empty array means ``no match''.
func (re *Regexp) MatchStrings(s string) (a []string) {
r := re.doExecute(s, nil, 0);
if r == nil {
return nil
}
a = make([]string, len(r)/2);
for i := 0; i < len(r); i += 2 {
if r[i] != -1 { // -1 means no match for this subexpression
a[i/2] = s[r[i]:r[i+1]]
}
}
return;
}
// MatchSlices matches the Regexp against the byte slice b.
// The return value is an array of subslices matched by the expression.
// a[0] is the subslice matched by the entire expression.
// a[i] for i > 0 is the subslice matched by the ith parenthesized subexpression.
// An empty array means ``no match''.
func (re *Regexp) MatchSlices(b []byte) (a [][]byte) {
r := re.doExecute("", b, 0);
if r == nil {
return nil
}
a = make([][]byte, len(r)/2);
for i := 0; i < len(r); i += 2 {
if r[i] != -1 { // -1 means no match for this subexpression
a[i/2] = b[r[i]:r[i+1]]
}
}
return;
}
// MatchString checks whether a textual regular expression
// matches a string. More complicated queries need
// to use Compile and the full Regexp interface.
func MatchString(pattern string, s string) (matched bool, error string) {
re, err := CompileRegexp(pattern);
if err != "" {
return false, err
}
return re.MatchString(s), "";
}
// Match checks whether a textual regular expression
// matches a byte slice. More complicated queries need
// to use Compile and the full Regexp interface.
func Match(pattern string, b []byte) (matched bool, error string) {
re, err := CompileRegexp(pattern);
if err != "" {
return false, err
}
return re.Match(b), "";
} | start, end = p.term();
if start == nil || p.error != "" { | random_line_split |
regexp.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The testing package implements a simple regular expression library.
// It is a reduced version of the regular expression package suitable
// for use in tests; it avoids many dependencies.
//
// The syntax of the regular expressions accepted is:
//
// regexp:
// concatenation { '|' concatenation }
// concatenation:
// { closure }
// closure:
// term [ '*' | '+' | '?' ]
// term:
// '^'
// '$'
// '.'
// character
// '[' [ '^' ] character-ranges ']'
// '(' regexp ')'
//
package testing
import (
"utf8";
)
var debug = false
// Error codes returned by failures to parse an expression.
var (
ErrInternal = "internal error";
ErrUnmatchedLpar = "unmatched ''";
ErrUnmatchedRpar = "unmatched ''";
ErrUnmatchedLbkt = "unmatched '['";
ErrUnmatchedRbkt = "unmatched ']'";
ErrBadRange = "bad range in character class";
ErrExtraneousBackslash = "extraneous backslash";
ErrBadClosure = "repeated closure **, ++, etc.";
ErrBareClosure = "closure applies to nothing";
ErrBadBackslash = "illegal backslash escape";
)
// An instruction executed by the NFA
type instr interface {
kind() int; // the type of this instruction: _CHAR, _ANY, etc.
next() instr; // the instruction to execute after this one
setNext(i instr);
index() int;
setIndex(i int);
print();
}
// Fields and methods common to all instructions
type common struct {
_next instr;
_index int;
}
func (c *common) next() instr { return c._next }
func (c *common) setNext(i instr) { c._next = i }
func (c *common) index() int { return c._index }
func (c *common) setIndex(i int) { c._index = i }
// The representation of a compiled regular expression.
// The public interface is entirely through methods.
type Regexp struct {
expr string; // the original expression
inst []instr;
start instr;
nbra int; // number of brackets in expression, for subexpressions
}
const (
_START = // beginning of program
iota;
_END; // end of program: success
_BOT; // '^' beginning of text
_EOT; // '$' end of text
_CHAR; // 'a' regular character
_CHARCLASS; // [a-z] character class
_ANY; // '.' any character including newline
_NOTNL; // [^\n] special case: any character but newline
_BRA; // '(' parenthesized expression
_EBRA; // ')'; end of '(' parenthesized expression
_ALT; // '|' alternation
_NOP; // do nothing; makes it easy to link without patching
)
// --- START start of program
type _Start struct {
common;
}
func (start *_Start) kind() int { return _START }
func (start *_Start) print() { print("start") }
// --- END end of program
type _End struct {
common;
}
func (end *_End) kind() int { return _END }
func (end *_End) print() { print("end") }
// --- BOT beginning of text
type _Bot struct {
common;
}
func (bot *_Bot) kind() int { return _BOT }
func (bot *_Bot) print() { print("bot") }
// --- EOT end of text
type _Eot struct {
common;
}
func (eot *_Eot) kind() int { return _EOT }
func (eot *_Eot) print() { print("eot") }
// --- CHAR a regular character
type _Char struct {
common;
char int;
}
func (char *_Char) kind() int { return _CHAR }
func (char *_Char) print() { print("char ", string(char.char)) }
func newChar(char int) *_Char {
c := new(_Char);
c.char = char;
return c;
}
// --- CHARCLASS [a-z]
type _CharClass struct {
common;
char int;
negate bool; // is character class negated? ([^a-z])
// stored pairwise: [a-z] is (a,z); x is (x,x):
ranges []int;
}
func (cclass *_CharClass) kind() int { return _CHARCLASS }
func (cclass *_CharClass) print() {
print("charclass");
if cclass.negate {
print(" (negated)")
}
for i := 0; i < len(cclass.ranges); i += 2 {
l := cclass.ranges[i];
r := cclass.ranges[i+1];
if l == r {
print(" [", string(l), "]")
} else {
print(" [", string(l), "-", string(r), "]")
}
}
}
func (cclass *_CharClass) addRange(a, b int) {
// range is a through b inclusive
n := len(cclass.ranges);
if n >= cap(cclass.ranges) {
nr := make([]int, n, 2*n);
for i, j := range nr {
nr[i] = j
}
cclass.ranges = nr;
}
cclass.ranges = cclass.ranges[0 : n+2];
cclass.ranges[n] = a;
n++;
cclass.ranges[n] = b;
n++;
}
func (cclass *_CharClass) matches(c int) bool {
for i := 0; i < len(cclass.ranges); i = i + 2 {
min := cclass.ranges[i];
max := cclass.ranges[i+1];
if min <= c && c <= max {
return !cclass.negate
}
}
return cclass.negate;
}
func newCharClass() *_CharClass {
c := new(_CharClass);
c.ranges = make([]int, 0, 20);
return c;
}
// --- ANY any character
type _Any struct {
common;
}
func (any *_Any) kind() int { return _ANY }
func (any *_Any) print() { print("any") }
// --- NOTNL any character but newline
type _NotNl struct {
common;
}
func (notnl *_NotNl) kind() int { return _NOTNL }
func (notnl *_NotNl) print() { print("notnl") }
// --- BRA parenthesized expression
type _Bra struct {
common;
n int; // subexpression number
}
func (bra *_Bra) kind() int { return _BRA }
func (bra *_Bra) print() { print("bra", bra.n) }
// --- EBRA end of parenthesized expression
type _Ebra struct {
common;
n int; // subexpression number
}
func (ebra *_Ebra) kind() int { return _EBRA }
func (ebra *_Ebra) print() { print("ebra ", ebra.n) }
// --- ALT alternation
type _Alt struct {
common;
left instr; // other branch
}
func (alt *_Alt) kind() int { return _ALT }
func (alt *_Alt) print() { print("alt(", alt.left.index(), ")") }
// --- NOP no operation
type _Nop struct {
common;
}
func (nop *_Nop) kind() int { return _NOP }
func (nop *_Nop) print() { print("nop") }
func (re *Regexp) add(i instr) instr {
n := len(re.inst);
i.setIndex(len(re.inst));
if n >= cap(re.inst) {
ni := make([]instr, n, 2*n);
for i, j := range re.inst {
ni[i] = j
}
re.inst = ni;
}
re.inst = re.inst[0 : n+1];
re.inst[n] = i;
return i;
}
type parser struct {
re *Regexp;
error string;
nlpar int; // number of unclosed lpars
pos int;
ch int;
}
const endOfFile = -1
func (p *parser) c() int { return p.ch }
func (p *parser) nextc() int {
if p.pos >= len(p.re.expr) {
p.ch = endOfFile
} else {
c, w := utf8.DecodeRuneInString(p.re.expr[p.pos:len(p.re.expr)]);
p.ch = c;
p.pos += w;
}
return p.ch;
}
func newParser(re *Regexp) *parser {
p := new(parser);
p.re = re;
p.nextc(); // load p.ch
return p;
}
func special(c int) bool {
s := `\.+*?()|[]^$`;
for i := 0; i < len(s); i++ {
if c == int(s[i]) {
return true
}
}
return false;
}
func specialcclass(c int) bool {
s := `\-[]`;
for i := 0; i < len(s); i++ {
if c == int(s[i]) {
return true
}
}
return false;
}
func (p *parser) charClass() instr {
cc := newCharClass();
if p.c() == '^' {
cc.negate = true;
p.nextc();
}
left := -1;
for {
switch c := p.c(); c {
case ']', endOfFile:
if left >= 0 {
p.error = ErrBadRange;
return nil;
}
// Is it [^\n]?
if cc.negate && len(cc.ranges) == 2 &&
cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
nl := new(_NotNl);
p.re.add(nl);
return nl;
}
p.re.add(cc);
return cc;
case '-': // do this before backslash processing
p.error = ErrBadRange;
return nil;
case '\\':
c = p.nextc();
switch {
case c == endOfFile:
p.error = ErrExtraneousBackslash;
return nil;
case c == 'n':
c = '\n'
case specialcclass(c):
// c is as delivered
default:
p.error = ErrBadBackslash;
return nil;
}
fallthrough;
default:
p.nextc();
switch {
case left < 0: // first of pair
if p.c() == '-' { // range
p.nextc();
left = c;
} else { // single char
cc.addRange(c, c)
}
case left <= c: // second of pair
cc.addRange(left, c);
left = -1;
default:
p.error = ErrBadRange;
return nil;
}
}
}
return nil;
}
func (p *parser) term() (start, end instr) {
// term() is the leaf of the recursion, so it's sufficient to pick off the
// error state here for early exit.
// The other functions (closure(), concatenation() etc.) assume
// it's safe to recur to here.
if p.error != "" {
return
}
switch c := p.c(); c {
case '|', endOfFile:
return nil, nil
case '*', '+':
p.error = ErrBareClosure;
return;
case ')':
if p.nlpar == 0 {
p.error = ErrUnmatchedRpar;
return;
}
return nil, nil;
case ']':
p.error = ErrUnmatchedRbkt;
return;
case '^':
p.nextc();
start = p.re.add(new(_Bot));
return start, start;
case '$':
p.nextc();
start = p.re.add(new(_Eot));
return start, start;
case '.':
p.nextc();
start = p.re.add(new(_Any));
return start, start;
case '[':
p.nextc();
start = p.charClass();
if p.error != "" {
return
}
if p.c() != ']' {
p.error = ErrUnmatchedLbkt;
return;
}
p.nextc();
return start, start;
case '(':
p.nextc();
p.nlpar++;
p.re.nbra++; // increment first so first subexpr is \1
nbra := p.re.nbra;
start, end = p.regexp();
if p.c() != ')' {
p.error = ErrUnmatchedLpar;
return;
}
p.nlpar--;
p.nextc();
bra := new(_Bra);
p.re.add(bra);
ebra := new(_Ebra);
p.re.add(ebra);
bra.n = nbra;
ebra.n = nbra;
if start == nil {
if end == nil {
p.error = ErrInternal;
return;
}
start = ebra;
} else {
end.setNext(ebra)
}
bra.setNext(start);
return bra, ebra;
case '\\':
c = p.nextc();
switch {
case c == endOfFile:
p.error = ErrExtraneousBackslash;
return;
case c == 'n':
c = '\n'
case special(c):
// c is as delivered
default:
p.error = ErrBadBackslash;
return;
}
fallthrough;
default:
p.nextc();
start = newChar(c);
p.re.add(start);
return start, start;
}
panic("unreachable");
}
func (p *parser) closure() (start, end instr) {
start, end = p.term();
if start == nil || p.error != "" {
return
}
switch p.c() {
case '*':
// (start,end)*:
alt := new(_Alt);
p.re.add(alt);
end.setNext(alt); // after end, do alt
alt.left = start; // alternate brach: return to start
start = alt; // alt becomes new (start, end)
end = alt;
case '+':
// (start,end)+:
alt := new(_Alt);
p.re.add(alt);
end.setNext(alt); // after end, do alt
alt.left = start; // alternate brach: return to start
end = alt; // start is unchanged; end is alt
case '?':
// (start,end)?:
alt := new(_Alt);
p.re.add(alt);
nop := new(_Nop);
p.re.add(nop);
alt.left = start; // alternate branch is start
alt.setNext(nop); // follow on to nop
end.setNext(nop); // after end, go to nop
start = alt; // start is now alt
end = nop; // end is nop pointed to by both branches
default:
return
}
switch p.nextc() {
case '*', '+', '?':
p.error = ErrBadClosure
}
return;
}
func (p *parser) concatenation() (start, end instr) {
for {
nstart, nend := p.closure();
if p.error != "" {
return
}
switch {
case nstart == nil: // end of this concatenation
if start == nil { // this is the empty string
nop := p.re.add(new(_Nop));
return nop, nop;
}
return;
case start == nil: // this is first element of concatenation
start, end = nstart, nend
default:
end.setNext(nstart);
end = nend;
}
}
panic("unreachable");
}
func (p *parser) regexp() (start, end instr) {
start, end = p.concatenation();
if p.error != "" {
return
}
for {
switch p.c() {
default:
return
case '|':
p.nextc();
nstart, nend := p.concatenation();
if p.error != "" {
return
}
alt := new(_Alt);
p.re.add(alt);
alt.left = start;
alt.setNext(nstart);
nop := new(_Nop);
p.re.add(nop);
end.setNext(nop);
nend.setNext(nop);
start, end = alt, nop;
}
}
panic("unreachable");
}
func unNop(i instr) instr {
for i.kind() == _NOP {
i = i.next()
}
return i;
}
func (re *Regexp) eliminateNops() {
for i := 0; i < len(re.inst); i++ {
inst := re.inst[i];
if inst.kind() == _END {
continue
}
inst.setNext(unNop(inst.next()));
if inst.kind() == _ALT {
alt := inst.(*_Alt);
alt.left = unNop(alt.left);
}
}
}
func (re *Regexp) doParse() string {
p := newParser(re);
start := new(_Start);
re.add(start);
s, e := p.regexp();
if p.error != "" {
return p.error
}
start.setNext(s);
re.start = start;
e.setNext(re.add(new(_End)));
re.eliminateNops();
return p.error;
}
// CompileRegexp parses a regular expression and returns, if successful, a Regexp
// object that can be used to match against text.
func CompileRegexp(str string) (regexp *Regexp, error string) {
regexp = new(Regexp);
regexp.expr = str;
regexp.inst = make([]instr, 0, 20);
error = regexp.doParse();
return;
}
// MustCompileRegexp is like CompileRegexp but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled regular
// expressions.
func MustCompile(str string) *Regexp {
regexp, error := CompileRegexp(str);
if error != "" {
panicln(`regexp: compiling "`, str, `": `, error)
}
return regexp;
}
type state struct {
inst instr; // next instruction to execute
match []int; // pairs of bracketing submatches. 0th is start,end
}
// Append new state to to-do list. Leftmost-longest wins so avoid
// adding a state that's already active.
func addState(s []state, inst instr, match []int) []state {
index := inst.index();
l := len(s);
pos := match[0];
// TODO: Once the state is a vector and we can do insert, have inputs always
// go in order correctly and this "earlier" test is never necessary,
for i := 0; i < l; i++ {
if s[i].inst.index() == index && // same instruction
s[i].match[0] < pos { // earlier match already going; lefmost wins
return s
}
}
if l == cap(s) {
s1 := make([]state, 2*l)[0:l];
for i := 0; i < l; i++ {
s1[i] = s[i]
}
s = s1;
}
s = s[0 : l+1];
s[l].inst = inst;
s[l].match = match;
return s;
}
// Accepts either string or bytes - the logic is identical either way.
// If bytes == nil, scan str.
func (re *Regexp) doExecute(str string, bytes []byte, pos int) []int {
var s [2][]state; // TODO: use a vector when state values (not ptrs) can be vector elements
s[0] = make([]state, 10)[0:0];
s[1] = make([]state, 10)[0:0];
in, out := 0, 1;
var final state;
found := false;
end := len(str);
if bytes != nil {
end = len(bytes)
}
for pos <= end {
if !found {
// prime the pump if we haven't seen a match yet
match := make([]int, 2*(re.nbra+1));
for i := 0; i < len(match); i++ {
match[i] = -1 // no match seen; catches cases like "a(b)?c" on "ac"
}
match[0] = pos;
s[out] = addState(s[out], re.start.next(), match);
}
in, out = out, in; // old out state is new in state
s[out] = s[out][0:0]; // clear out state
if len(s[in]) == 0 {
// machine has completed
break
}
charwidth := 1;
c := endOfFile;
if pos < end {
if bytes == nil {
c, charwidth = utf8.DecodeRuneInString(str[pos:end])
} else {
c, charwidth = utf8.DecodeRune(bytes[pos:end])
}
}
for i := 0; i < len(s[in]); i++ {
st := s[in][i];
switch s[in][i].inst.kind() {
case _BOT:
if pos == 0 {
s[in] = addState(s[in], st.inst.next(), st.match)
}
case _EOT:
if pos == end {
s[in] = addState(s[in], st.inst.next(), st.match)
}
case _CHAR:
if c == st.inst.(*_Char).char {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _CHARCLASS:
if st.inst.(*_CharClass).matches(c) {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _ANY:
if c != endOfFile {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _NOTNL:
if c != endOfFile && c != '\n' {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _BRA:
n := st.inst.(*_Bra).n;
st.match[2*n] = pos;
s[in] = addState(s[in], st.inst.next(), st.match);
case _EBRA:
n := st.inst.(*_Ebra).n;
st.match[2*n+1] = pos;
s[in] = addState(s[in], st.inst.next(), st.match);
case _ALT:
s[in] = addState(s[in], st.inst.(*_Alt).left, st.match);
// give other branch a copy of this match vector
s1 := make([]int, 2*(re.nbra+1));
for i := 0; i < len(s1); i++ {
s1[i] = st.match[i]
}
s[in] = addState(s[in], st.inst.next(), s1);
case _END:
// choose leftmost longest
if !found || // first
st.match[0] < final.match[0] || // leftmost
(st.match[0] == final.match[0] && pos > final.match[1]) { // longest
final = st;
final.match[1] = pos;
}
found = true;
default:
st.inst.print();
panic("unknown instruction in execute");
}
}
pos += charwidth;
}
return final.match;
}
// ExecuteString matches the Regexp against the string s.
// The return value is an array of integers, in pairs, identifying the positions of
// substrings matched by the expression.
// s[a[0]:a[1]] is the substring matched by the entire expression.
// s[a[2*i]:a[2*i+1]] for i > 0 is the substring matched by the ith parenthesized subexpression.
// A negative value means the subexpression did not match any element of the string.
// An empty array means "no match".
func (re *Regexp) ExecuteString(s string) (a []int) {
return re.doExecute(s, nil, 0)
}
// Execute matches the Regexp against the byte slice b.
// The return value is an array of integers, in pairs, identifying the positions of
// subslices matched by the expression.
// b[a[0]:a[1]] is the subslice matched by the entire expression.
// b[a[2*i]:a[2*i+1]] for i > 0 is the subslice matched by the ith parenthesized subexpression.
// A negative value means the subexpression did not match any element of the slice.
// An empty array means "no match".
func (re *Regexp) Execute(b []byte) (a []int) { return re.doExecute("", b, 0) }
// MatchString returns whether the Regexp matches the string s.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) MatchString(s string) bool { return len(re.doExecute(s, nil, 0)) > 0 }
// Match returns whether the Regexp matches the byte slice b.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) Match(b []byte) bool { return len(re.doExecute("", b, 0)) > 0 }
// MatchStrings matches the Regexp against the string s.
// The return value is an array of strings matched by the expression.
// a[0] is the substring matched by the entire expression.
// a[i] for i > 0 is the substring matched by the ith parenthesized subexpression.
// An empty array means ``no match''.
func (re *Regexp) MatchStrings(s string) (a []string) {
r := re.doExecute(s, nil, 0);
if r == nil {
return nil
}
a = make([]string, len(r)/2);
for i := 0; i < len(r); i += 2 |
return;
}
// MatchSlices matches the Regexp against the byte slice b.
// The return value is an array of subslices matched by the expression.
// a[0] is the subslice matched by the entire expression.
// a[i] for i > 0 is the subslice matched by the ith parenthesized subexpression.
// An empty array means ``no match''.
func (re *Regexp) MatchSlices(b []byte) (a [][]byte) {
r := re.doExecute("", b, 0);
if r == nil {
return nil
}
a = make([][]byte, len(r)/2);
for i := 0; i < len(r); i += 2 {
if r[i] != -1 { // -1 means no match for this subexpression
a[i/2] = b[r[i]:r[i+1]]
}
}
return;
}
// MatchString checks whether a textual regular expression
// matches a string. More complicated queries need
// to use Compile and the full Regexp interface.
func MatchString(pattern string, s string) (matched bool, error string) {
re, err := CompileRegexp(pattern);
if err != "" {
return false, err
}
return re.MatchString(s), "";
}
// Match checks whether a textual regular expression
// matches a byte slice. More complicated queries need
// to use Compile and the full Regexp interface.
func Match(pattern string, b []byte) (matched bool, error string) {
re, err := CompileRegexp(pattern);
if err != "" {
return false, err
}
return re.Match(b), "";
}
| {
if r[i] != -1 { // -1 means no match for this subexpression
a[i/2] = s[r[i]:r[i+1]]
}
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.