file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
flocking-node.js | = require("fs"),
url = require("url"),
fluid = fluid || require("infusion"),
flock = fluid.registerNamespace("flock"),
Speaker = require("speaker"),
Readable = require("stream").Readable,
midi = require("midi");
(function () {
"use strict";
/*********************************************************
* Override default clocks with same-thread alternatives *
*********************************************************/
fluid.demands("flock.scheduler.webWorkerIntervalClock", ["flock.platform.nodejs", "flock.scheduler.async"], {
funcName: "flock.scheduler.intervalClock"
});
fluid.demands("flock.scheduler.webWorkerScheduleClock", ["flock.platform.nodejs", "flock.scheduler.async"], {
funcName: "flock.scheduler.scheduleClock"
});
/********************************************
* Override buffer loading implementations. *
********************************************/
fluid.registerNamespace("flock.file");
flock.file.readFromPath = function (options) {
var path = options.src;
fs.exists(path, function (exists) {
if (!exists && options.error) {
options.error(path + " doesn't exist.");
return;
}
fs.stat(path, function (error, stats) {
fs.open(path, "r", function (error, fd) {
var buf = new Buffer(stats.size);
fs.read(fd, buf, 0, buf.length, null, function () {
var type = flock.file.parseFileExtension(path);
var arr = new Int8Array(buf);
options.success(arr.buffer, type);
});
});
});
});
};
fluid.registerNamespace("flock.net");
flock.net.readBufferFromUrl = function () {
throw new Error("Loading files from URLs is not currently supported in Node.js.");
};
fluid.registerNamespace("flock.audio.loadBuffer");
flock.audio.loadBuffer.readerForSource = function (src) {
if (typeof (src) !== "string") {
throw new Error("Flocking error: Can't load a buffer from an unknown type of source. " +
"Only paths and URLs are currently supported on Node.js.");
}
var parsed = url.parse(src);
return parsed.protocol === "data:" ? flock.file.readBufferFromDataUrl :
!parsed.protocol ? flock.file.readFromPath : flock.net.readBufferFromUrl;
};
fluid.registerNamespace("flock.audio.decode");
// TODO: Use a stream-style interface for decoding rather than just dumping the whole job on nextTick().
flock.audio.decode.node = function (options) {
process.nextTick(function () {
flock.audio.decode.sync(options);
});
};
flock.audio.registerDecoderStrategy({
"default": flock.audio.decode.node,
"aiff": flock.audio.decode.node
});
/*********************************************
* Node.js-based Environment implementation. *
*********************************************/
fluid.defaults("flock.audioStrategy.nodejs", {
gradeNames: ["flock.audioStrategy", "autoInit"],
bytesPerSample: 4, // Flocking uses Float32s, hence 4 bytes.
model: {
bytesPerBlock: {
expander: {
funcName: "flock.audioStrategy.nodejs.calcBlockBytes",
args: ["{that}.options.audioSettings", "{that}.options.bytesPerSample"]
}
}
},
members: {
speaker: "@expand:flock.audioStrategy.nodejs.createSpeaker({that}.options.audioSettings)",
outputStream: {
expander: {
funcName: "flock.audioStrategy.nodejs.createOutputStream",
args: "{that}.options.audioSettings"
}
}
},
invokers: {
start: {
funcName: "flock.audioStrategy.nodejs.startGeneratingSamples",
args: ["{that}.outputStream", "{that}.speaker", "{that}.writeSamples"]
},
stop: {
funcName: "flock.audioStrategy.nodejs.stopGeneratingSamples",
args: ["{that}.outputStream", "{that}.speaker"]
},
// TODO: De-thatify.
writeSamples: {
funcName: "flock.audioStrategy.nodejs.writeSamples",
args: ["{arguments}.0", "{that}"]
},
startReadingAudioInput: {
funcName: "flock.fail",
args: "Audio input is not currently supported on Node.js"
},
stopReadingAudioInput: "{that}.startReadingAudioInput"
}
});
flock.audioStrategy.nodejs.calcBlockBytes = function (audioSettings, bytesPerSample) {
return audioSettings.blockSize * audioSettings.chans * bytesPerSample;
};
flock.audioStrategy.nodejs.createSpeaker = function (audioSettings) {
return new Speaker({
channels: audioSettings.chans,
bitDepth: 32,
sampleRate: audioSettings.rates.audio,
signed: true,
float: true,
samplesPerFrame: audioSettings.blockSize,
endianness: "LE"
});
};
flock.audioStrategy.nodejs.createOutputStream = function () {
return new Readable();
};
flock.audioStrategy.nodejs.startGeneratingSamples = function (outputStream, speaker, writeFn) {
outputStream._read = writeFn;
outputStream.pipe(speaker);
};
flock.audioStrategy.nodejs.stopGeneratingSamples = function (outputStream, speaker) {
outputStream.unpipe(speaker);
outputStream._read = undefined;
};
flock.audioStrategy.nodejs.writeSamples = function (numBytes, that) {
var settings = that.options.audioSettings,
m = that.model,
bytesPerSample = that.options.bytesPerSample,
blockSize = settings.blockSize,
chans = settings.chans,
krPeriods = numBytes / m.bytesPerBlock,
evaluator = that.nodeEvaluator,
outputStream = that.outputStream,
out = new Buffer(numBytes);
if (numBytes < m.bytesPerBlock) {
return;
}
if (evaluator.nodes.length < 1) {
// If there are no nodes providing samples, write out silence.
flock.generate.silence(out);
} else |
outputStream.push(out);
};
fluid.demands("flock.audioStrategy.platform", "flock.platform.nodejs", {
funcName: "flock.audioStrategy.nodejs"
});
/****************************
* Web MIDI Pseudo-Polyfill *
****************************/
fluid.registerNamespace("flock.midi.nodejs");
/**
* MIDIAccess represents access to the midi system.
* @constructor
*/
flock.midi.nodejs.MIDIAccess = function (options) {
this.sysex = options.sysex !== undefined ? options.sysex : false;
this.input = new midi.input();
this.output = new midi.output();
this.input.ignoreTypes(this.sysex, false, false);
};
var p = flock.midi.nodejs.MIDIAccess.prototype = {};
p.constructor = flock.midi.nodejs.MIDIAccess;
p.inputs = function () {
return flock.midi.nodejs.getAllPorts("input", this.input);
};
p.outputs = function () {
return flock.midi.nodejs.getAllPorts("output", this.output);
};
flock.midi.nodejs.getAllPorts = function (type, midi) {
var numPorts = midi.getPortCount(),
ports = new Array(numPorts);
for (var i = 0; i < numPorts; i++) {
ports[i] = new flock.midi.nodejs.MIDIPort(type, i);
}
return ports;
};
/**
* MIDIPort represents a MIDI input or output port.
* @constructor
*/
flock.midi.nodejs.MIDIPort = function (type, portNum) {
this.type = type;
this.midi = new midi[this.type]();
this.portNum = portNum;
this.name = this.midi.getPortName(this.portNum);
this.listeners = {};
};
p = flock.midi.nodejs.MIDIPort.prototype = {};
p.constructor = flock.midi.nodejs.MIDIPort;
p.addEventListener = function (evtName, fn) {
flock.midi.nodejs.throwIfNotMIDIMessage(evtName);
this.midi.on("message", flock.midi.nodejs.wrapMessageListener(this, fn));
this.midi.openPort(this.portNum);
};
p.removeEventListener = function (evtName, fn) {
flock.midi.nodejs.throwIfNotMIDIMessage(evtName);
var listenerGUID = fn.__flock_midi_id,
wrapper = this.listeners[listenerGUID];
if (wrapper) {
this.midi.removeListener("message", wrapper);
this.listeners[listenerGUID] = undefined;
}
// TODO: Should we close the port when we have no listeners?
};
p.send = function (data) {
if (this.type !== "output") {
throw new Error("An input port can't be used to send | {
for (var i = 0, offset = 0; i < krPeriods; i++, offset += m.bytesPerBlock) {
evaluator.clearBuses();
evaluator.gen();
// Interleave each output channel.
for (var chan = 0; chan < chans; chan++) {
var bus = evaluator.buses[chan];
for (var sampIdx = 0; sampIdx < blockSize; sampIdx++) {
var frameIdx = (sampIdx * chans + chan) * bytesPerSample;
out.writeFloatLE(bus[sampIdx], offset + frameIdx);
}
}
}
} | conditional_block |
flocking-node.js | fs = require("fs"),
url = require("url"),
fluid = fluid || require("infusion"),
flock = fluid.registerNamespace("flock"),
Speaker = require("speaker"),
Readable = require("stream").Readable,
midi = require("midi");
(function () {
"use strict";
/*********************************************************
* Override default clocks with same-thread alternatives *
*********************************************************/
fluid.demands("flock.scheduler.webWorkerIntervalClock", ["flock.platform.nodejs", "flock.scheduler.async"], {
funcName: "flock.scheduler.intervalClock"
});
fluid.demands("flock.scheduler.webWorkerScheduleClock", ["flock.platform.nodejs", "flock.scheduler.async"], {
funcName: "flock.scheduler.scheduleClock"
});
/********************************************
* Override buffer loading implementations. *
********************************************/
fluid.registerNamespace("flock.file");
flock.file.readFromPath = function (options) {
var path = options.src;
fs.exists(path, function (exists) {
if (!exists && options.error) {
options.error(path + " doesn't exist.");
return;
}
fs.stat(path, function (error, stats) {
fs.open(path, "r", function (error, fd) {
var buf = new Buffer(stats.size);
fs.read(fd, buf, 0, buf.length, null, function () {
var type = flock.file.parseFileExtension(path);
var arr = new Int8Array(buf);
options.success(arr.buffer, type);
});
});
});
});
};
fluid.registerNamespace("flock.net");
flock.net.readBufferFromUrl = function () {
throw new Error("Loading files from URLs is not currently supported in Node.js.");
};
fluid.registerNamespace("flock.audio.loadBuffer");
flock.audio.loadBuffer.readerForSource = function (src) {
if (typeof (src) !== "string") {
throw new Error("Flocking error: Can't load a buffer from an unknown type of source. " +
"Only paths and URLs are currently supported on Node.js.");
}
var parsed = url.parse(src);
return parsed.protocol === "data:" ? flock.file.readBufferFromDataUrl :
!parsed.protocol ? flock.file.readFromPath : flock.net.readBufferFromUrl;
};
fluid.registerNamespace("flock.audio.decode");
// TODO: Use a stream-style interface for decoding rather than just dumping the whole job on nextTick().
flock.audio.decode.node = function (options) {
process.nextTick(function () {
flock.audio.decode.sync(options);
});
};
flock.audio.registerDecoderStrategy({
"default": flock.audio.decode.node,
"aiff": flock.audio.decode.node
});
/*********************************************
* Node.js-based Environment implementation. *
*********************************************/
fluid.defaults("flock.audioStrategy.nodejs", {
gradeNames: ["flock.audioStrategy", "autoInit"],
bytesPerSample: 4, // Flocking uses Float32s, hence 4 bytes.
model: {
bytesPerBlock: {
expander: {
funcName: "flock.audioStrategy.nodejs.calcBlockBytes",
args: ["{that}.options.audioSettings", "{that}.options.bytesPerSample"]
}
}
},
members: {
speaker: "@expand:flock.audioStrategy.nodejs.createSpeaker({that}.options.audioSettings)",
outputStream: {
expander: {
funcName: "flock.audioStrategy.nodejs.createOutputStream",
args: "{that}.options.audioSettings"
}
}
},
invokers: {
start: {
funcName: "flock.audioStrategy.nodejs.startGeneratingSamples",
args: ["{that}.outputStream", "{that}.speaker", "{that}.writeSamples"]
},
stop: {
funcName: "flock.audioStrategy.nodejs.stopGeneratingSamples", | funcName: "flock.audioStrategy.nodejs.writeSamples",
args: ["{arguments}.0", "{that}"]
},
startReadingAudioInput: {
funcName: "flock.fail",
args: "Audio input is not currently supported on Node.js"
},
stopReadingAudioInput: "{that}.startReadingAudioInput"
}
});
flock.audioStrategy.nodejs.calcBlockBytes = function (audioSettings, bytesPerSample) {
return audioSettings.blockSize * audioSettings.chans * bytesPerSample;
};
flock.audioStrategy.nodejs.createSpeaker = function (audioSettings) {
return new Speaker({
channels: audioSettings.chans,
bitDepth: 32,
sampleRate: audioSettings.rates.audio,
signed: true,
float: true,
samplesPerFrame: audioSettings.blockSize,
endianness: "LE"
});
};
flock.audioStrategy.nodejs.createOutputStream = function () {
return new Readable();
};
flock.audioStrategy.nodejs.startGeneratingSamples = function (outputStream, speaker, writeFn) {
outputStream._read = writeFn;
outputStream.pipe(speaker);
};
flock.audioStrategy.nodejs.stopGeneratingSamples = function (outputStream, speaker) {
outputStream.unpipe(speaker);
outputStream._read = undefined;
};
flock.audioStrategy.nodejs.writeSamples = function (numBytes, that) {
var settings = that.options.audioSettings,
m = that.model,
bytesPerSample = that.options.bytesPerSample,
blockSize = settings.blockSize,
chans = settings.chans,
krPeriods = numBytes / m.bytesPerBlock,
evaluator = that.nodeEvaluator,
outputStream = that.outputStream,
out = new Buffer(numBytes);
if (numBytes < m.bytesPerBlock) {
return;
}
if (evaluator.nodes.length < 1) {
// If there are no nodes providing samples, write out silence.
flock.generate.silence(out);
} else {
for (var i = 0, offset = 0; i < krPeriods; i++, offset += m.bytesPerBlock) {
evaluator.clearBuses();
evaluator.gen();
// Interleave each output channel.
for (var chan = 0; chan < chans; chan++) {
var bus = evaluator.buses[chan];
for (var sampIdx = 0; sampIdx < blockSize; sampIdx++) {
var frameIdx = (sampIdx * chans + chan) * bytesPerSample;
out.writeFloatLE(bus[sampIdx], offset + frameIdx);
}
}
}
}
outputStream.push(out);
};
fluid.demands("flock.audioStrategy.platform", "flock.platform.nodejs", {
funcName: "flock.audioStrategy.nodejs"
});
/****************************
* Web MIDI Pseudo-Polyfill *
****************************/
fluid.registerNamespace("flock.midi.nodejs");
/**
* MIDIAccess represents access to the midi system.
* @constructor
*/
flock.midi.nodejs.MIDIAccess = function (options) {
this.sysex = options.sysex !== undefined ? options.sysex : false;
this.input = new midi.input();
this.output = new midi.output();
this.input.ignoreTypes(this.sysex, false, false);
};
var p = flock.midi.nodejs.MIDIAccess.prototype = {};
p.constructor = flock.midi.nodejs.MIDIAccess;
p.inputs = function () {
return flock.midi.nodejs.getAllPorts("input", this.input);
};
p.outputs = function () {
return flock.midi.nodejs.getAllPorts("output", this.output);
};
flock.midi.nodejs.getAllPorts = function (type, midi) {
var numPorts = midi.getPortCount(),
ports = new Array(numPorts);
for (var i = 0; i < numPorts; i++) {
ports[i] = new flock.midi.nodejs.MIDIPort(type, i);
}
return ports;
};
/**
* MIDIPort represents a MIDI input or output port.
* @constructor
*/
flock.midi.nodejs.MIDIPort = function (type, portNum) {
this.type = type;
this.midi = new midi[this.type]();
this.portNum = portNum;
this.name = this.midi.getPortName(this.portNum);
this.listeners = {};
};
p = flock.midi.nodejs.MIDIPort.prototype = {};
p.constructor = flock.midi.nodejs.MIDIPort;
p.addEventListener = function (evtName, fn) {
flock.midi.nodejs.throwIfNotMIDIMessage(evtName);
this.midi.on("message", flock.midi.nodejs.wrapMessageListener(this, fn));
this.midi.openPort(this.portNum);
};
p.removeEventListener = function (evtName, fn) {
flock.midi.nodejs.throwIfNotMIDIMessage(evtName);
var listenerGUID = fn.__flock_midi_id,
wrapper = this.listeners[listenerGUID];
if (wrapper) {
this.midi.removeListener("message", wrapper);
this.listeners[listenerGUID] = undefined;
}
// TODO: Should we close the port when we have no listeners?
};
p.send = function (data) {
if (this.type !== "output") {
throw new Error("An input port can't be used to send MIDI | args: ["{that}.outputStream", "{that}.speaker"]
},
// TODO: De-thatify.
writeSamples: { | random_line_split |
app.py | levelname:<8} {message}",
style='{',
filename='logs.log',
filemode='a'
)
f = open("logs.log", "w+")
logging.info('Main application has been initialized!')
"""
Check if environment variables are set
"""
if os.environ.get("MONGO_URI") and os.environ.get("SECRET_KEY"):
MONGO_URI = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
logging.info('Environmental variables found and loaded')
else:
logging.critical('Environmental variables NOT FOUND!')
logging.critical(
'Ensure environmental variables are set before running again!')
exit()
def mongo_connect(url):
"""
Function to perform initial MongoDB connection
:type url:
:param url:
"""
try:
conn = pymongo.MongoClient(url)
logging.info('MongoDB Connected successfully!')
return conn
except pymongo.errors.ConnectionFailure as e:
logging.critical('Could not connect to MongoDB: %s', e)
"""
Initialization block: | DATABASE = "TheInterviewMasterDeck"
date_today = datetime.datetime.now()
conn = mongo_connect(MONGO_URI)
mongo_database = conn[DATABASE]
logging.info('MongoDB Server version: %s', conn.server_info()["version"])
mongo_collection = mongo_database["questions"]
index_name = 'question_1'
if index_name not in mongo_collection.index_information():
logging.info(
'MongoDB Text Search index has not yet been created... creating.')
mongo_collection.create_index(
name='question_1',
keys=[('question', TEXT)],
default_language='none'
)
else:
logging.info(
'MongoDB Text Search index has already been created... skipping.')
dblist = conn.list_database_names()
if DATABASE in dblist:
logging.info("Database 'TheInterviewMasterDeck' detected in MongoDB!")
else:
logging.critical("Database 'TheInterviewMasterDeck' NOT detected!")
logging.critical(
"Ensure you have followed \
https://github.com/patrickpulfer/code_insitute_m3#steps"
)
exit()
"""
App Routings
"""
@app.errorhandler(404)
def page_not_found(e):
"""
Return 404 if page not found
"""
return render_template("404.html"), 404
@app.errorhandler(500)
def page_not_found(e):
"""
Return 500 if internal error
"""
return render_template("500.html"), 500
@ app.route("/")
def index():
"""
End User Index Page
"""
mongo_collection = mongo_database["settings"]
doc_instructions = mongo_collection.find_one({"id": "instructions"})
instructions = markdown.markdown(doc_instructions['text'])
return render_template("index.html", instructions=instructions)
@ app.route("/start")
def start():
"""
End User Start the Game Page
"""
mongo_collection = mongo_database["questions"]
all_cards = mongo_collection.find({"visible": "Yes"})
objects = []
for object in all_cards:
objects.append(object)
random.shuffle(objects)
return render_template("start.html", cards=objects)
@ app.route("/admin", methods=["GET", "POST"])
def admin():
"""
Admin Page function:
1. Will attempt login procedures (compare admin & password hash)
if user is not yet logged in and method is POST
2. Will display the admin console if admin is logged in
"""
settings = ''
mongo_collection = mongo_database["admin"]
if request.method == "POST" and session.get('logged_in') is None:
existing_user = mongo_collection.find_one(
{"email": request.form.get("email").lower()})
if existing_user:
if check_password_hash(
existing_user["password"], request.form.get("password")):
logging.info('Admin Login attempt successful')
flash("Welcome, {}".format(existing_user["name"]))
session["admin"] = existing_user["name"]
session["email"] = existing_user["email"]
session["logged_in"] = True
else:
logging.warning(
'Admin Login attempt failed with wrong password')
flash("Incorrect Email and/or Password")
return redirect(url_for("admin"))
else:
flash("Incorrect Email and/or Password")
logging.warning('Admin Login attempt failed with incorrect email')
return redirect(url_for("admin"))
mongo_collection = mongo_database["settings"]
settings = mongo_collection.find_one({"id": "instructions"})
return render_template(
"admin.html",
admin_logged=session.get('logged_in'),
admin_session=session,
settings=settings
)
@ app.route("/admin_logout")
def logout():
"""
Logout function
"""
flash("You have been logged out")
logging.info('Admin Logout')
session.pop("logged_in")
return redirect(url_for("admin"))
@ app.route("/admin_cards", methods=["GET", "POST"])
def admin_cards():
"""
Admin Cards Overview Page:
1. Will check if logged in, then show page
2. Will get values from database for template render
"""
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
cards = list(mongo_collection.find({"visible": "Yes"}))
cards_not_visible = list(
mongo_collection.find({"visible": {'$ne': 'Yes'}})
)
mongo_collection = mongo_database["settings"]
cards_count = mongo_collection.find_one({"id": "cards_count"})
cards_count = cards_count['integer']
return render_template(
"admin_cards.html",
cards=cards,
cards_not_visible=cards_not_visible,
cards_count=cards_count,
datetime=date_today.strftime("%x"),
admin_logged=session.get('logged_in'),
admin_session=session
)
else:
return admin()
@ app.route("/admin_new_card", methods=["GET", "POST"])
def admin_new_card():
"""
Question Card Creation:
1. Will check if logged in and method is POST
2. Will attempt to add the new card & update the card counter
"""
if request.method == "POST":
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
new_admin_card_details = {
"id": request.form.get("id"),
"question": request.form.get("question"),
"tip": request.form.get("tip"),
"visible": request.form.get("visible"),
"added_date": request.form.get("date")
}
mongo_collection.insert_one(new_admin_card_details)
flash("New Questions Card added!")
logging.info('Admin has added a new card')
mongo_collection = mongo_database["settings"]
cards_count_collection = mongo_collection.find_one(
{"id": "cards_count"})
cards_count_incrementing = cards_count_collection['integer'] + 1
mongo_collection.replace_one(
{"id": "cards_count"},
{
"id": "cards_count",
"integer": cards_count_incrementing
},
)
return redirect(url_for("admin_cards"))
else:
return admin()
@ app.route("/admin_card_update/<card_id>", methods=["GET", "POST"])
def admin_card_update(card_id):
"""
Questions Card Update Form
"""
mongo_collection = mongo_database["questions"]
card = mongo_collection.find_one({"id": card_id})
return render_template(
"admin_card_update.html",
card=card,
datetime=date_today.strftime("%x"),
admin_logged=session.get('logged_in'),
admin_session=session
)
@ app.route("/admin_card_update_execute/<card_id>", methods=["GET", "POST"])
def admin_card_update_execute(card_id):
"""
Questions Card Update Execution:
1. Will check if logged in and method is POST
2. Will attempt to update the Question Card accordingly
:type card_id:
:param card_id:
"""
if request.method == "POST":
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
submit = {
"id": request.form.get("id"),
"question": request.form.get("question"),
"tip": request.form.get("tip"),
"visible": request.form.get("visible_update"),
"added_date": request.form.get("date")
}
mongo_collection.replace_one({"_id": ObjectId(card_id)}, submit)
flash("Questions Card %s has been updated." %
request.form.get("id"))
logging.info('Questions Card %s has been updated.' %
request.form.get("id"))
return redirect(url_for("admin_cards"))
else:
return admin()
@ app.route("/admin_card_delete/<card_id>", methods=["GET", "POST"])
def admin_card_delete(card_id):
"""
Questions Card Update Form
:type card_id:
:param card_id:
"""
if request.method == "GET":
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
mongo_collection.delete_one({"_id": ObjectId(card_id)})
flash("Questions Card with _id %s been deleted." % card_id)
logging.info('Questions Card with _id %s been deleted.' % card_id)
return redirect(url_for("admin_cards"))
else:
logging.info('Card deletion has been attempted without a session')
index()
@ app.route("/instructions_update", | 1. Will connect to MongoDB using the environmental variable
2. Will create a search index if not yet created
3. Will detect if database has been setup
""" | random_line_split |
app.py | :<8} {message}",
style='{',
filename='logs.log',
filemode='a'
)
f = open("logs.log", "w+")
logging.info('Main application has been initialized!')
"""
Check if environment variables are set
"""
if os.environ.get("MONGO_URI") and os.environ.get("SECRET_KEY"):
MONGO_URI = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
logging.info('Environmental variables found and loaded')
else:
logging.critical('Environmental variables NOT FOUND!')
logging.critical(
'Ensure environmental variables are set before running again!')
exit()
def mongo_connect(url):
"""
Function to perform initial MongoDB connection
:type url:
:param url:
"""
try:
conn = pymongo.MongoClient(url)
logging.info('MongoDB Connected successfully!')
return conn
except pymongo.errors.ConnectionFailure as e:
logging.critical('Could not connect to MongoDB: %s', e)
"""
Initialization block:
1. Will connect to MongoDB using the environmental variable
2. Will create a search index if not yet created
3. Will detect if database has been setup
"""
DATABASE = "TheInterviewMasterDeck"
date_today = datetime.datetime.now()
conn = mongo_connect(MONGO_URI)
mongo_database = conn[DATABASE]
logging.info('MongoDB Server version: %s', conn.server_info()["version"])
mongo_collection = mongo_database["questions"]
index_name = 'question_1'
if index_name not in mongo_collection.index_information():
logging.info(
'MongoDB Text Search index has not yet been created... creating.')
mongo_collection.create_index(
name='question_1',
keys=[('question', TEXT)],
default_language='none'
)
else:
logging.info(
'MongoDB Text Search index has already been created... skipping.')
dblist = conn.list_database_names()
if DATABASE in dblist:
logging.info("Database 'TheInterviewMasterDeck' detected in MongoDB!")
else:
logging.critical("Database 'TheInterviewMasterDeck' NOT detected!")
logging.critical(
"Ensure you have followed \
https://github.com/patrickpulfer/code_insitute_m3#steps"
)
exit()
"""
App Routings
"""
@app.errorhandler(404)
def page_not_found(e):
"""
Return 404 if page not found
"""
return render_template("404.html"), 404
@app.errorhandler(500)
def page_not_found(e):
"""
Return 500 if internal error
"""
return render_template("500.html"), 500
@ app.route("/")
def index():
"""
End User Index Page
"""
mongo_collection = mongo_database["settings"]
doc_instructions = mongo_collection.find_one({"id": "instructions"})
instructions = markdown.markdown(doc_instructions['text'])
return render_template("index.html", instructions=instructions)
@ app.route("/start")
def start():
"""
End User Start the Game Page
"""
mongo_collection = mongo_database["questions"]
all_cards = mongo_collection.find({"visible": "Yes"})
objects = []
for object in all_cards:
objects.append(object)
random.shuffle(objects)
return render_template("start.html", cards=objects)
@ app.route("/admin", methods=["GET", "POST"])
def admin():
"""
Admin Page function:
1. Will attempt login procedures (compare admin & password hash)
if user is not yet logged in and method is POST
2. Will display the admin console if admin is logged in
"""
settings = ''
mongo_collection = mongo_database["admin"]
if request.method == "POST" and session.get('logged_in') is None:
existing_user = mongo_collection.find_one(
{"email": request.form.get("email").lower()})
if existing_user:
if check_password_hash(
existing_user["password"], request.form.get("password")):
logging.info('Admin Login attempt successful')
flash("Welcome, {}".format(existing_user["name"]))
session["admin"] = existing_user["name"]
session["email"] = existing_user["email"]
session["logged_in"] = True
else:
logging.warning(
'Admin Login attempt failed with wrong password')
flash("Incorrect Email and/or Password")
return redirect(url_for("admin"))
else:
flash("Incorrect Email and/or Password")
logging.warning('Admin Login attempt failed with incorrect email')
return redirect(url_for("admin"))
mongo_collection = mongo_database["settings"]
settings = mongo_collection.find_one({"id": "instructions"})
return render_template(
"admin.html",
admin_logged=session.get('logged_in'),
admin_session=session,
settings=settings
)
@ app.route("/admin_logout")
def logout():
"""
Logout function
"""
flash("You have been logged out")
logging.info('Admin Logout')
session.pop("logged_in")
return redirect(url_for("admin"))
@ app.route("/admin_cards", methods=["GET", "POST"])
def admin_cards():
"""
Admin Cards Overview Page:
1. Will check if logged in, then show page
2. Will get values from database for template render
"""
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
cards = list(mongo_collection.find({"visible": "Yes"}))
cards_not_visible = list(
mongo_collection.find({"visible": {'$ne': 'Yes'}})
)
mongo_collection = mongo_database["settings"]
cards_count = mongo_collection.find_one({"id": "cards_count"})
cards_count = cards_count['integer']
return render_template(
"admin_cards.html",
cards=cards,
cards_not_visible=cards_not_visible,
cards_count=cards_count,
datetime=date_today.strftime("%x"),
admin_logged=session.get('logged_in'),
admin_session=session
)
else:
return admin()
@ app.route("/admin_new_card", methods=["GET", "POST"])
def admin_new_card():
"""
Question Card Creation:
1. Will check if logged in and method is POST
2. Will attempt to add the new card & update the card counter
"""
if request.method == "POST":
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
new_admin_card_details = {
"id": request.form.get("id"),
"question": request.form.get("question"),
"tip": request.form.get("tip"),
"visible": request.form.get("visible"),
"added_date": request.form.get("date")
}
mongo_collection.insert_one(new_admin_card_details)
flash("New Questions Card added!")
logging.info('Admin has added a new card')
mongo_collection = mongo_database["settings"]
cards_count_collection = mongo_collection.find_one(
{"id": "cards_count"})
cards_count_incrementing = cards_count_collection['integer'] + 1
mongo_collection.replace_one(
{"id": "cards_count"},
{
"id": "cards_count",
"integer": cards_count_incrementing
},
)
return redirect(url_for("admin_cards"))
else:
return admin()
@ app.route("/admin_card_update/<card_id>", methods=["GET", "POST"])
def admin_card_update(card_id):
"""
Questions Card Update Form
"""
mongo_collection = mongo_database["questions"]
card = mongo_collection.find_one({"id": card_id})
return render_template(
"admin_card_update.html",
card=card,
datetime=date_today.strftime("%x"),
admin_logged=session.get('logged_in'),
admin_session=session
)
@ app.route("/admin_card_update_execute/<card_id>", methods=["GET", "POST"])
def admin_card_update_execute(card_id):
"""
Questions Card Update Execution:
1. Will check if logged in and method is POST
2. Will attempt to update the Question Card accordingly
:type card_id:
:param card_id:
"""
if request.method == "POST":
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
submit = {
"id": request.form.get("id"),
"question": request.form.get("question"),
"tip": request.form.get("tip"),
"visible": request.form.get("visible_update"),
"added_date": request.form.get("date")
}
mongo_collection.replace_one({"_id": ObjectId(card_id)}, submit)
flash("Questions Card %s has been updated." %
request.form.get("id"))
logging.info('Questions Card %s has been updated.' %
request.form.get("id"))
return redirect(url_for("admin_cards"))
else:
|
@ app.route("/admin_card_delete/<card_id>", methods=["GET", "POST"])
def admin_card_delete(card_id):
"""
Questions Card Update Form
:type card_id:
:param card_id:
"""
if request.method == "GET":
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
mongo_collection.delete_one({"_id": ObjectId(card_id)})
flash("Questions Card with _id %s been deleted." % card_id)
logging.info('Questions Card with _id %s been deleted.' % card_id)
return redirect(url_for("admin_cards"))
else:
logging.info('Card deletion has been attempted without a session')
index()
@ app.route("/instructions_update", | return admin() | conditional_block |
app.py | levelname:<8} {message}",
style='{',
filename='logs.log',
filemode='a'
)
f = open("logs.log", "w+")
logging.info('Main application has been initialized!')
"""
Check if environment variables are set
"""
if os.environ.get("MONGO_URI") and os.environ.get("SECRET_KEY"):
MONGO_URI = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
logging.info('Environmental variables found and loaded')
else:
logging.critical('Environmental variables NOT FOUND!')
logging.critical(
'Ensure environmental variables are set before running again!')
exit()
def mongo_connect(url):
"""
Function to perform initial MongoDB connection
:type url:
:param url:
"""
try:
conn = pymongo.MongoClient(url)
logging.info('MongoDB Connected successfully!')
return conn
except pymongo.errors.ConnectionFailure as e:
logging.critical('Could not connect to MongoDB: %s', e)
"""
Initialization block:
1. Will connect to MongoDB using the environmental variable
2. Will create a search index if not yet created
3. Will detect if database has been setup
"""
DATABASE = "TheInterviewMasterDeck"
date_today = datetime.datetime.now()
conn = mongo_connect(MONGO_URI)
mongo_database = conn[DATABASE]
logging.info('MongoDB Server version: %s', conn.server_info()["version"])
mongo_collection = mongo_database["questions"]
index_name = 'question_1'
if index_name not in mongo_collection.index_information():
logging.info(
'MongoDB Text Search index has not yet been created... creating.')
mongo_collection.create_index(
name='question_1',
keys=[('question', TEXT)],
default_language='none'
)
else:
logging.info(
'MongoDB Text Search index has already been created... skipping.')
dblist = conn.list_database_names()
if DATABASE in dblist:
logging.info("Database 'TheInterviewMasterDeck' detected in MongoDB!")
else:
logging.critical("Database 'TheInterviewMasterDeck' NOT detected!")
logging.critical(
"Ensure you have followed \
https://github.com/patrickpulfer/code_insitute_m3#steps"
)
exit()
"""
App Routings
"""
@app.errorhandler(404)
def page_not_found(e):
"""
Return 404 if page not found
"""
return render_template("404.html"), 404
@app.errorhandler(500)
def page_not_found(e):
"""
Return 500 if internal error
"""
return render_template("500.html"), 500
@ app.route("/")
def index():
"""
End User Index Page
"""
mongo_collection = mongo_database["settings"]
doc_instructions = mongo_collection.find_one({"id": "instructions"})
instructions = markdown.markdown(doc_instructions['text'])
return render_template("index.html", instructions=instructions)
@ app.route("/start")
def start():
"""
End User Start the Game Page
"""
mongo_collection = mongo_database["questions"]
all_cards = mongo_collection.find({"visible": "Yes"})
objects = []
for object in all_cards:
objects.append(object)
random.shuffle(objects)
return render_template("start.html", cards=objects)
@ app.route("/admin", methods=["GET", "POST"])
def admin():
"""
Admin Page function:
1. Will attempt login procedures (compare admin & password hash)
if user is not yet logged in and method is POST
2. Will display the admin console if admin is logged in
"""
settings = ''
mongo_collection = mongo_database["admin"]
if request.method == "POST" and session.get('logged_in') is None:
existing_user = mongo_collection.find_one(
{"email": request.form.get("email").lower()})
if existing_user:
if check_password_hash(
existing_user["password"], request.form.get("password")):
logging.info('Admin Login attempt successful')
flash("Welcome, {}".format(existing_user["name"]))
session["admin"] = existing_user["name"]
session["email"] = existing_user["email"]
session["logged_in"] = True
else:
logging.warning(
'Admin Login attempt failed with wrong password')
flash("Incorrect Email and/or Password")
return redirect(url_for("admin"))
else:
flash("Incorrect Email and/or Password")
logging.warning('Admin Login attempt failed with incorrect email')
return redirect(url_for("admin"))
mongo_collection = mongo_database["settings"]
settings = mongo_collection.find_one({"id": "instructions"})
return render_template(
"admin.html",
admin_logged=session.get('logged_in'),
admin_session=session,
settings=settings
)
@ app.route("/admin_logout")
def logout():
"""
Logout function
"""
flash("You have been logged out")
logging.info('Admin Logout')
session.pop("logged_in")
return redirect(url_for("admin"))
@ app.route("/admin_cards", methods=["GET", "POST"])
def admin_cards():
"""
Admin Cards Overview Page:
1. Will check if logged in, then show page
2. Will get values from database for template render
"""
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
cards = list(mongo_collection.find({"visible": "Yes"}))
cards_not_visible = list(
mongo_collection.find({"visible": {'$ne': 'Yes'}})
)
mongo_collection = mongo_database["settings"]
cards_count = mongo_collection.find_one({"id": "cards_count"})
cards_count = cards_count['integer']
return render_template(
"admin_cards.html",
cards=cards,
cards_not_visible=cards_not_visible,
cards_count=cards_count,
datetime=date_today.strftime("%x"),
admin_logged=session.get('logged_in'),
admin_session=session
)
else:
return admin()
@ app.route("/admin_new_card", methods=["GET", "POST"])
def admin_new_card():
"""
Question Card Creation:
1. Will check if logged in and method is POST
2. Will attempt to add the new card & update the card counter
"""
if request.method == "POST":
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
new_admin_card_details = {
"id": request.form.get("id"),
"question": request.form.get("question"),
"tip": request.form.get("tip"),
"visible": request.form.get("visible"),
"added_date": request.form.get("date")
}
mongo_collection.insert_one(new_admin_card_details)
flash("New Questions Card added!")
logging.info('Admin has added a new card')
mongo_collection = mongo_database["settings"]
cards_count_collection = mongo_collection.find_one(
{"id": "cards_count"})
cards_count_incrementing = cards_count_collection['integer'] + 1
mongo_collection.replace_one(
{"id": "cards_count"},
{
"id": "cards_count",
"integer": cards_count_incrementing
},
)
return redirect(url_for("admin_cards"))
else:
return admin()
@ app.route("/admin_card_update/<card_id>", methods=["GET", "POST"])
def admin_card_update(card_id):
"""
Questions Card Update Form
"""
mongo_collection = mongo_database["questions"]
card = mongo_collection.find_one({"id": card_id})
return render_template(
"admin_card_update.html",
card=card,
datetime=date_today.strftime("%x"),
admin_logged=session.get('logged_in'),
admin_session=session
)
@ app.route("/admin_card_update_execute/<card_id>", methods=["GET", "POST"])
def admin_card_update_execute(card_id):
| request.form.get("id"))
logging.info('Questions Card %s has been updated.' %
request.form.get("id"))
return redirect(url_for("admin_cards"))
else:
return admin()
@ app.route("/admin_card_delete/<card_id>", methods=["GET", "POST"])
def admin_card_delete(card_id):
"""
Questions Card Update Form
:type card_id:
:param card_id:
"""
if request.method == "GET":
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
mongo_collection.delete_one({"_id": ObjectId(card_id)})
flash("Questions Card with _id %s been deleted." % card_id)
logging.info('Questions Card with _id %s been deleted.' % card_id)
return redirect(url_for("admin_cards"))
else:
logging.info('Card deletion has been attempted without a session')
index()
@ app.route("/instructions_update", | """
Questions Card Update Execution:
1. Will check if logged in and method is POST
2. Will attempt to update the Question Card accordingly
:type card_id:
:param card_id:
"""
if request.method == "POST":
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
submit = {
"id": request.form.get("id"),
"question": request.form.get("question"),
"tip": request.form.get("tip"),
"visible": request.form.get("visible_update"),
"added_date": request.form.get("date")
}
mongo_collection.replace_one({"_id": ObjectId(card_id)}, submit)
flash("Questions Card %s has been updated." % | identifier_body |
app.py | :<8} {message}",
style='{',
filename='logs.log',
filemode='a'
)
f = open("logs.log", "w+")
logging.info('Main application has been initialized!')
"""
Check if environment variables are set
"""
if os.environ.get("MONGO_URI") and os.environ.get("SECRET_KEY"):
MONGO_URI = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
logging.info('Environmental variables found and loaded')
else:
logging.critical('Environmental variables NOT FOUND!')
logging.critical(
'Ensure environmental variables are set before running again!')
exit()
def mongo_connect(url):
"""
Function to perform initial MongoDB connection
:type url:
:param url:
"""
try:
conn = pymongo.MongoClient(url)
logging.info('MongoDB Connected successfully!')
return conn
except pymongo.errors.ConnectionFailure as e:
logging.critical('Could not connect to MongoDB: %s', e)
"""
Initialization block:
1. Will connect to MongoDB using the environmental variable
2. Will create a search index if not yet created
3. Will detect if database has been setup
"""
DATABASE = "TheInterviewMasterDeck"
date_today = datetime.datetime.now()
conn = mongo_connect(MONGO_URI)
mongo_database = conn[DATABASE]
logging.info('MongoDB Server version: %s', conn.server_info()["version"])
mongo_collection = mongo_database["questions"]
index_name = 'question_1'
if index_name not in mongo_collection.index_information():
logging.info(
'MongoDB Text Search index has not yet been created... creating.')
mongo_collection.create_index(
name='question_1',
keys=[('question', TEXT)],
default_language='none'
)
else:
logging.info(
'MongoDB Text Search index has already been created... skipping.')
dblist = conn.list_database_names()
if DATABASE in dblist:
logging.info("Database 'TheInterviewMasterDeck' detected in MongoDB!")
else:
logging.critical("Database 'TheInterviewMasterDeck' NOT detected!")
logging.critical(
"Ensure you have followed \
https://github.com/patrickpulfer/code_insitute_m3#steps"
)
exit()
"""
App Routings
"""
@app.errorhandler(404)
def page_not_found(e):
"""
Return 404 if page not found
"""
return render_template("404.html"), 404
@app.errorhandler(500)
def page_not_found(e):
"""
Return 500 if internal error
"""
return render_template("500.html"), 500
@ app.route("/")
def | ():
"""
End User Index Page
"""
mongo_collection = mongo_database["settings"]
doc_instructions = mongo_collection.find_one({"id": "instructions"})
instructions = markdown.markdown(doc_instructions['text'])
return render_template("index.html", instructions=instructions)
@ app.route("/start")
def start():
"""
End User Start the Game Page
"""
mongo_collection = mongo_database["questions"]
all_cards = mongo_collection.find({"visible": "Yes"})
objects = []
for object in all_cards:
objects.append(object)
random.shuffle(objects)
return render_template("start.html", cards=objects)
@ app.route("/admin", methods=["GET", "POST"])
def admin():
"""
Admin Page function:
1. Will attempt login procedures (compare admin & password hash)
if user is not yet logged in and method is POST
2. Will display the admin console if admin is logged in
"""
settings = ''
mongo_collection = mongo_database["admin"]
if request.method == "POST" and session.get('logged_in') is None:
existing_user = mongo_collection.find_one(
{"email": request.form.get("email").lower()})
if existing_user:
if check_password_hash(
existing_user["password"], request.form.get("password")):
logging.info('Admin Login attempt successful')
flash("Welcome, {}".format(existing_user["name"]))
session["admin"] = existing_user["name"]
session["email"] = existing_user["email"]
session["logged_in"] = True
else:
logging.warning(
'Admin Login attempt failed with wrong password')
flash("Incorrect Email and/or Password")
return redirect(url_for("admin"))
else:
flash("Incorrect Email and/or Password")
logging.warning('Admin Login attempt failed with incorrect email')
return redirect(url_for("admin"))
mongo_collection = mongo_database["settings"]
settings = mongo_collection.find_one({"id": "instructions"})
return render_template(
"admin.html",
admin_logged=session.get('logged_in'),
admin_session=session,
settings=settings
)
@ app.route("/admin_logout")
def logout():
"""
Logout function
"""
flash("You have been logged out")
logging.info('Admin Logout')
session.pop("logged_in")
return redirect(url_for("admin"))
@ app.route("/admin_cards", methods=["GET", "POST"])
def admin_cards():
"""
Admin Cards Overview Page:
1. Will check if logged in, then show page
2. Will get values from database for template render
"""
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
cards = list(mongo_collection.find({"visible": "Yes"}))
cards_not_visible = list(
mongo_collection.find({"visible": {'$ne': 'Yes'}})
)
mongo_collection = mongo_database["settings"]
cards_count = mongo_collection.find_one({"id": "cards_count"})
cards_count = cards_count['integer']
return render_template(
"admin_cards.html",
cards=cards,
cards_not_visible=cards_not_visible,
cards_count=cards_count,
datetime=date_today.strftime("%x"),
admin_logged=session.get('logged_in'),
admin_session=session
)
else:
return admin()
@ app.route("/admin_new_card", methods=["GET", "POST"])
def admin_new_card():
"""
Question Card Creation:
1. Will check if logged in and method is POST
2. Will attempt to add the new card & update the card counter
"""
if request.method == "POST":
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
new_admin_card_details = {
"id": request.form.get("id"),
"question": request.form.get("question"),
"tip": request.form.get("tip"),
"visible": request.form.get("visible"),
"added_date": request.form.get("date")
}
mongo_collection.insert_one(new_admin_card_details)
flash("New Questions Card added!")
logging.info('Admin has added a new card')
mongo_collection = mongo_database["settings"]
cards_count_collection = mongo_collection.find_one(
{"id": "cards_count"})
cards_count_incrementing = cards_count_collection['integer'] + 1
mongo_collection.replace_one(
{"id": "cards_count"},
{
"id": "cards_count",
"integer": cards_count_incrementing
},
)
return redirect(url_for("admin_cards"))
else:
return admin()
@ app.route("/admin_card_update/<card_id>", methods=["GET", "POST"])
def admin_card_update(card_id):
"""
Questions Card Update Form
"""
mongo_collection = mongo_database["questions"]
card = mongo_collection.find_one({"id": card_id})
return render_template(
"admin_card_update.html",
card=card,
datetime=date_today.strftime("%x"),
admin_logged=session.get('logged_in'),
admin_session=session
)
@ app.route("/admin_card_update_execute/<card_id>", methods=["GET", "POST"])
def admin_card_update_execute(card_id):
"""
Questions Card Update Execution:
1. Will check if logged in and method is POST
2. Will attempt to update the Question Card accordingly
:type card_id:
:param card_id:
"""
if request.method == "POST":
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
submit = {
"id": request.form.get("id"),
"question": request.form.get("question"),
"tip": request.form.get("tip"),
"visible": request.form.get("visible_update"),
"added_date": request.form.get("date")
}
mongo_collection.replace_one({"_id": ObjectId(card_id)}, submit)
flash("Questions Card %s has been updated." %
request.form.get("id"))
logging.info('Questions Card %s has been updated.' %
request.form.get("id"))
return redirect(url_for("admin_cards"))
else:
return admin()
@ app.route("/admin_card_delete/<card_id>", methods=["GET", "POST"])
def admin_card_delete(card_id):
"""
Questions Card Update Form
:type card_id:
:param card_id:
"""
if request.method == "GET":
if session.get('logged_in') is True:
mongo_collection = mongo_database["questions"]
mongo_collection.delete_one({"_id": ObjectId(card_id)})
flash("Questions Card with _id %s been deleted." % card_id)
logging.info('Questions Card with _id %s been deleted.' % card_id)
return redirect(url_for("admin_cards"))
else:
logging.info('Card deletion has been attempted without a session')
index()
@ app.route("/instructions_update", | index | identifier_name |
main.rs | stack: vec![],
output: "".to_string(),
}
}
}
enum Msg {
Input(String),
Interval(String),
Toggle,
Step,
Reset,
Tick,
}
fn update(context: &mut Context<Msg>, model: &mut Model, msg: Msg) {
match msg {
Msg::Input(input) => {
// model.befunge.source = string_to_array(input.as_str());
model.input = input;
},
Msg::Interval(interval) => {
model.interval = interval;
},
Msg::Toggle => {
match model.befunge.mode {
Mode::End => model.befunge = init_befunge(model),
_ => model.befunge.running = !model.befunge.running,
}
if model.befunge.running {
context.timeout(Duration::from_millis(0), || Msg::Tick);
}
match model.befunge.mode {
Mode::End => model.time = 0,
_ => (),
}
},
Msg::Reset => {
model.befunge = Befunge {
cursor: (0, 0),
direction: Direction::Right,
stack: vec![],
output: "".to_string(),
running: false,
source: string_to_array(model.input.as_str()),
mode: Mode::End,
};
model.time = 0;
},
Msg::Tick => {
if model.befunge.running {
let frame = (1.0 / model.interval
.parse()
.unwrap_or(DEFAULT_INTERVAL)
.max(0.0001).min(1.0))
.round() as usize;
for _ in 0..frame {
process(&mut model.befunge)
}
model.time += frame as u64;
let ms = model.interval
.parse()
.unwrap_or(DEFAULT_INTERVAL as u64)
.max(0).min(5000);
context.timeout(Duration::from_millis(ms), || Msg::Tick);
}
},
Msg::Step => {
match model.befunge.mode {
Mode::End => model.befunge = init_befunge(model),
_ => (),
}
model.befunge.running = false;
model.time += 1;
process(&mut model.befunge);
},
}
}
fn init_befunge(model: &Model) -> Befunge {
Befunge {
cursor: (-1, 0),
direction: Direction::Right,
stack: vec![],
output: "".to_string(),
running: true,
source: string_to_array(model.input.as_str()),
mode: Mode::None,
}
}
fn string_to_array(source: &str) -> Array2d<char> {
source.split("\n").map( |v|
v.chars().collect()
).collect()
}
fn cyclic_index<T>(a: &Vec<T>, i: i64) -> Option<i64> {
let l = a.len() as i64;
if l == 0 { None } else { Some(i % l) }
}
fn cyclic_index2d<T>(a: &Array2d<T>, cursor: (i64, i64)) -> Option<(i64, i64)> {
let (x, y) = cursor;
let cy = cyclic_index(&a, y);
let cx = cy
.and_then( |cy_| a.get(cy_ as usize) )
.and_then( |row| cyclic_index(row, x) );
cx.and_then( |cx_| cy.map( |cy_| (cx_, cy_) ) )
}
fn get2d<T: Clone>(a: &Array2d<T>, cursor: (i64, i64)) -> Option<T> {
let (x, y) = cursor;
a.get(y as usize)
.and_then( |row| row.get(x as usize) )
.cloned()
}
fn set2d<T>(cursor: (i64, i64), v: T, a: &mut Array2d<T>) {
let (x, y) = cursor;
a[y as usize][x as usize] = v;
}
// fn indexed_map2d<T, S, F: Fn((i64, i64), &T) -> S>(f: F, a: &Array2d<T>) -> Array2d<S> {
// a.iter().enumerate().map( |(y, row)|
// row.iter().enumerate().map( |(x, c)| f((x as i64, y as i64), c)).collect()
// ).collect()
// }
fn walk_next<T>(a: &Array2d<T>, direction: &Direction, cursor: (i64, i64)) -> (i64, i64) {
let (x, y) = cursor;
let cursor_candidate = match *direction {
Direction::Left => (x - 1, y),
Direction::Right => (x + 1, y),
Direction::Up => (x, y - 1),
Direction::Down => (x, y + 1),
};
cyclic_index2d(&a, cursor_candidate).unwrap_or((0, 0))
}
fn process(b: &mut Befunge) {
let cursor = walk_next(&b.source, &b.direction, b.cursor);
let cell = get2d(&b.source, cursor).unwrap_or(' ');
match b.mode {
Mode::End => (),
Mode::StringMode => {
b.cursor = cursor;
if cell != '"' {
b.stack.push(cell as i64);
} else {
commands(cell, cursor, b);
}
},
Mode::None => {
b.cursor = cursor;
commands(cell, cursor, b);
}
}
}
fn calc<F: Fn(i64, i64) -> i64>(s: &mut Stack, f: F) {
let y = s.pop().unwrap_or(0);
let x = s.pop().unwrap_or(0);
s.push(f(x, y));
}
fn commands(cell: char, cursor: (i64, i64), b: &mut Befunge) {
match cell {
'<' => b.direction = Direction::Left,
'>' => b.direction = Direction::Right,
'^' => b.direction = Direction::Up,
'v' => b.direction = Direction::Down,
' ' => (),
'_' => {
let v = b.stack.pop().unwrap_or(0);
b.direction = if v == 0 { Direction::Right } else { Direction::Left };
},
'|' => {
let v = b.stack.pop().unwrap_or(0);
b.direction = if v == 0 { Direction::Down } else { Direction::Up };
},
'#' => b.cursor = walk_next(&b.source, &b.direction, cursor),
'@' => {
b.running = false;
b.mode = Mode::End;
},
'0' => b.stack.push(0),
'1' => b.stack.push(1),
'2' => b.stack.push(2),
'3' => b.stack.push(3),
'4' => b.stack.push(4),
'5' => b.stack.push(5),
'6' => b.stack.push(6),
'7' => b.stack.push(7),
'8' => b.stack.push(8),
'9' => b.stack.push(9),
'"' => b.mode = match b.mode {
Mode::StringMode => Mode::None,
_ => Mode::StringMode,
},
'.' => {
let v = b.stack.pop().unwrap_or(0);
b.output = format!("{}{} ", b.output, v);
},
',' => {
let v = b.stack.pop().unwrap_or(0);
b.output = format!("{}{}", b.output,
char::from_u32(v as u32).unwrap_or(' ')
);
},
'+' => calc( &mut b.stack, |x, y| x + y ),
'-' => calc( &mut b.stack, |x, y| x - y ),
'*' => calc( &mut b.stack, |x, y| x * y ),
'/' => calc( &mut b.stack, |x, y| x / y ),
'%' => calc( &mut b.stack, |x, y| x % y ),
'`' => calc( &mut b.stack, |x, y| if x > y { 1 } else { 0 } ),
'!' => {
let v = b.stack.pop().unwrap_or(0);
b.stack.push(if v == 0 { 1 } else { 0 });
},
':' => {
let v = b.stack.pop().unwrap_or(0);
b.stack.push(v);
b.stack.push(v);
},
'\\' => {
let y = b.stack.pop().unwrap_or(0);
let x = b.stack.pop().unwrap_or(0);
b.stack.push(y);
b.stack.push(x);
},
'$' => {
b.stack.pop();
},
'g' => {
let y = b.stack.pop().unwrap_or(0);
let x = b.stack | running: false,
mode: Mode::End, | random_line_split |
|
main.rs | ) -> Array2d<char> {
source.split("\n").map( |v|
v.chars().collect()
).collect()
}
fn cyclic_index<T>(a: &Vec<T>, i: i64) -> Option<i64> {
let l = a.len() as i64;
if l == 0 { None } else { Some(i % l) }
}
fn cyclic_index2d<T>(a: &Array2d<T>, cursor: (i64, i64)) -> Option<(i64, i64)> {
let (x, y) = cursor;
let cy = cyclic_index(&a, y);
let cx = cy
.and_then( |cy_| a.get(cy_ as usize) )
.and_then( |row| cyclic_index(row, x) );
cx.and_then( |cx_| cy.map( |cy_| (cx_, cy_) ) )
}
fn get2d<T: Clone>(a: &Array2d<T>, cursor: (i64, i64)) -> Option<T> {
let (x, y) = cursor;
a.get(y as usize)
.and_then( |row| row.get(x as usize) )
.cloned()
}
fn set2d<T>(cursor: (i64, i64), v: T, a: &mut Array2d<T>) {
let (x, y) = cursor;
a[y as usize][x as usize] = v;
}
// fn indexed_map2d<T, S, F: Fn((i64, i64), &T) -> S>(f: F, a: &Array2d<T>) -> Array2d<S> {
// a.iter().enumerate().map( |(y, row)|
// row.iter().enumerate().map( |(x, c)| f((x as i64, y as i64), c)).collect()
// ).collect()
// }
fn walk_next<T>(a: &Array2d<T>, direction: &Direction, cursor: (i64, i64)) -> (i64, i64) {
let (x, y) = cursor;
let cursor_candidate = match *direction {
Direction::Left => (x - 1, y),
Direction::Right => (x + 1, y),
Direction::Up => (x, y - 1),
Direction::Down => (x, y + 1),
};
cyclic_index2d(&a, cursor_candidate).unwrap_or((0, 0))
}
fn process(b: &mut Befunge) {
let cursor = walk_next(&b.source, &b.direction, b.cursor);
let cell = get2d(&b.source, cursor).unwrap_or(' ');
match b.mode {
Mode::End => (),
Mode::StringMode => {
b.cursor = cursor;
if cell != '"' {
b.stack.push(cell as i64);
} else {
commands(cell, cursor, b);
}
},
Mode::None => {
b.cursor = cursor;
commands(cell, cursor, b);
}
}
}
fn calc<F: Fn(i64, i64) -> i64>(s: &mut Stack, f: F) {
let y = s.pop().unwrap_or(0);
let x = s.pop().unwrap_or(0);
s.push(f(x, y));
}
fn commands(cell: char, cursor: (i64, i64), b: &mut Befunge) {
match cell {
'<' => b.direction = Direction::Left,
'>' => b.direction = Direction::Right,
'^' => b.direction = Direction::Up,
'v' => b.direction = Direction::Down,
' ' => (),
'_' => {
let v = b.stack.pop().unwrap_or(0);
b.direction = if v == 0 { Direction::Right } else { Direction::Left };
},
'|' => {
let v = b.stack.pop().unwrap_or(0);
b.direction = if v == 0 { Direction::Down } else { Direction::Up };
},
'#' => b.cursor = walk_next(&b.source, &b.direction, cursor),
'@' => {
b.running = false;
b.mode = Mode::End;
},
'0' => b.stack.push(0),
'1' => b.stack.push(1),
'2' => b.stack.push(2),
'3' => b.stack.push(3),
'4' => b.stack.push(4),
'5' => b.stack.push(5),
'6' => b.stack.push(6),
'7' => b.stack.push(7),
'8' => b.stack.push(8),
'9' => b.stack.push(9),
'"' => b.mode = match b.mode {
Mode::StringMode => Mode::None,
_ => Mode::StringMode,
},
'.' => {
let v = b.stack.pop().unwrap_or(0);
b.output = format!("{}{} ", b.output, v);
},
',' => {
let v = b.stack.pop().unwrap_or(0);
b.output = format!("{}{}", b.output,
char::from_u32(v as u32).unwrap_or(' ')
);
},
'+' => calc( &mut b.stack, |x, y| x + y ),
'-' => calc( &mut b.stack, |x, y| x - y ),
'*' => calc( &mut b.stack, |x, y| x * y ),
'/' => calc( &mut b.stack, |x, y| x / y ),
'%' => calc( &mut b.stack, |x, y| x % y ),
'`' => calc( &mut b.stack, |x, y| if x > y { 1 } else { 0 } ),
'!' => {
let v = b.stack.pop().unwrap_or(0);
b.stack.push(if v == 0 { 1 } else { 0 });
},
':' => {
let v = b.stack.pop().unwrap_or(0);
b.stack.push(v);
b.stack.push(v);
},
'\\' => {
let y = b.stack.pop().unwrap_or(0);
let x = b.stack.pop().unwrap_or(0);
b.stack.push(y);
b.stack.push(x);
},
'$' => {
b.stack.pop();
},
'g' => {
let y = b.stack.pop().unwrap_or(0);
let x = b.stack.pop().unwrap_or(0);
let c = get2d(&b.source, (x, y))
.map( |v| v as i64 )
.unwrap_or(0);
b.stack.push(c);
},
'p' => {
let y = b.stack.pop().unwrap_or(0);
let x = b.stack.pop().unwrap_or(0);
let v = b.stack.pop().unwrap_or(0);
set2d((x, y), char::from_u32(v as u32).unwrap_or(' '), &mut b.source);
},
_ => (),
}
}
fn view(model: &Model) -> Html<Msg> {
html! {
<div class="main", >
<h1 class="title", >
{ "Befunge" }
<span class="note", >{ "Yew (Rust wasm32-unknown-emscripten)" }</span>
</h1>
<div>
<textarea
class="text",
type="text",
oninput=|e: InputData| Msg::Input(e.value),
value=&model.input,
placeholder="This textarea will not work! Sorry :(",
rows=10,
cols=80, />
</div>
<input
class="text",
type="text",
oninput=|e: InputData| Msg::Interval(e.value),
value=&model.interval, />
<input
class="button",
type="button",
onclick=|_| Msg::Toggle,
value=&if model.befunge.running { "stop" } else { "run" }, />
<input class="button", type="button", onclick=|_| Msg::Step, value=&"step", />
<input class="button", type="button", onclick=|_| Msg::Reset, value=&"reset", />
<div>
<div class="text", >
{ colorize(&model.befunge.source, model.befunge.cursor) }
</div>
</div>
<div>
<div class="text", >
{ model.befunge.stack.iter().map( |v| format!("{}", v) ).collect::<Vec<_>>().join(" ") }
</div>
</div>
<div>
<pre class="text", >
{ &model.befunge.output }
</pre>
</div>
<div>{ format!("{}", model.time) }</div>
<div>
<a
class="footer",
href="https://github.com/pnlybubbles/yew-befunge",
target="_blank", >
{ "source "}
</a>
</div>
</div>
}
}
fn | fix_char_width | identifier_name |
|
main.rs |
struct Model {
input: String,
interval: String,
time: u64,
befunge: Befunge,
}
struct Befunge {
source: Array2d<char>,
cursor: (i64, i64),
direction: Direction,
running: bool,
mode: Mode,
stack: Stack,
output: String
}
type Stack = Vec<i64>;
#[derive(Debug)]
enum Mode { StringMode, End, None }
#[derive(Debug)]
enum Direction { Up, Down, Left, Right }
type Array2d<T> = Vec<Vec<T>>;
const DEFAULT_INTERVAL: f64 = 200.0;
const DEFAULT_INPUT: &str = "2>:1->1-00p::00g: v
v%-g00::_v#!`\\-_$$.v
^g00_ v
^+1 <
> :.^";
fn init_model() -> Model {
Model {
input: DEFAULT_INPUT.to_string(),
interval: format!("{}", DEFAULT_INTERVAL),
time: 0,
befunge: Befunge {
source: string_to_array(DEFAULT_INPUT),
cursor: (0, 0),
direction: Direction::Right,
running: false,
mode: Mode::End,
stack: vec![],
output: "".to_string(),
}
}
}
enum Msg {
Input(String),
Interval(String),
Toggle,
Step,
Reset,
Tick,
}
fn update(context: &mut Context<Msg>, model: &mut Model, msg: Msg) {
match msg {
Msg::Input(input) => {
// model.befunge.source = string_to_array(input.as_str());
model.input = input;
},
Msg::Interval(interval) => {
model.interval = interval;
},
Msg::Toggle => {
match model.befunge.mode {
Mode::End => model.befunge = init_befunge(model),
_ => model.befunge.running = !model.befunge.running,
}
if model.befunge.running {
context.timeout(Duration::from_millis(0), || Msg::Tick);
}
match model.befunge.mode {
Mode::End => model.time = 0,
_ => (),
}
},
Msg::Reset => {
model.befunge = Befunge {
cursor: (0, 0),
direction: Direction::Right,
stack: vec![],
output: "".to_string(),
running: false,
source: string_to_array(model.input.as_str()),
mode: Mode::End,
};
model.time = 0;
},
Msg::Tick => {
if model.befunge.running {
let frame = (1.0 / model.interval
.parse()
.unwrap_or(DEFAULT_INTERVAL)
.max(0.0001).min(1.0))
.round() as usize;
for _ in 0..frame {
process(&mut model.befunge)
}
model.time += frame as u64;
let ms = model.interval
.parse()
.unwrap_or(DEFAULT_INTERVAL as u64)
.max(0).min(5000);
context.timeout(Duration::from_millis(ms), || Msg::Tick);
}
},
Msg::Step => {
match model.befunge.mode {
Mode::End => model.befunge = init_befunge(model),
_ => (),
}
model.befunge.running = false;
model.time += 1;
process(&mut model.befunge);
},
}
}
fn init_befunge(model: &Model) -> Befunge {
Befunge {
cursor: (-1, 0),
direction: Direction::Right,
stack: vec![],
output: "".to_string(),
running: true,
source: string_to_array(model.input.as_str()),
mode: Mode::None,
}
}
fn string_to_array(source: &str) -> Array2d<char> {
source.split("\n").map( |v|
v.chars().collect()
).collect()
}
fn cyclic_index<T>(a: &Vec<T>, i: i64) -> Option<i64> {
let l = a.len() as i64;
if l == 0 { None } else { Some(i % l) }
}
fn cyclic_index2d<T>(a: &Array2d<T>, cursor: (i64, i64)) -> Option<(i64, i64)> {
let (x, y) = cursor;
let cy = cyclic_index(&a, y);
let cx = cy
.and_then( |cy_| a.get(cy_ as usize) )
.and_then( |row| cyclic_index(row, x) );
cx.and_then( |cx_| cy.map( |cy_| (cx_, cy_) ) )
}
fn get2d<T: Clone>(a: &Array2d<T>, cursor: (i64, i64)) -> Option<T> {
let (x, y) = cursor;
a.get(y as usize)
.and_then( |row| row.get(x as usize) )
.cloned()
}
fn set2d<T>(cursor: (i64, i64), v: T, a: &mut Array2d<T>) {
let (x, y) = cursor;
a[y as usize][x as usize] = v;
}
// fn indexed_map2d<T, S, F: Fn((i64, i64), &T) -> S>(f: F, a: &Array2d<T>) -> Array2d<S> {
// a.iter().enumerate().map( |(y, row)|
// row.iter().enumerate().map( |(x, c)| f((x as i64, y as i64), c)).collect()
// ).collect()
// }
fn walk_next<T>(a: &Array2d<T>, direction: &Direction, cursor: (i64, i64)) -> (i64, i64) {
let (x, y) = cursor;
let cursor_candidate = match *direction {
Direction::Left => (x - 1, y),
Direction::Right => (x + 1, y),
Direction::Up => (x, y - 1),
Direction::Down => (x, y + 1),
};
cyclic_index2d(&a, cursor_candidate).unwrap_or((0, 0))
}
fn process(b: &mut Befunge) {
let cursor = walk_next(&b.source, &b.direction, b.cursor);
let cell = get2d(&b.source, cursor).unwrap_or(' ');
match b.mode {
Mode::End => (),
Mode::StringMode => {
b.cursor = cursor;
if cell != '"' {
b.stack.push(cell as i64);
} else {
commands(cell, cursor, b);
}
},
Mode::None => {
b.cursor = cursor;
commands(cell, cursor, b);
}
}
}
fn calc<F: Fn(i64, i64) -> i64>(s: &mut Stack, f: F) {
let y = s.pop().unwrap_or(0);
let x = s.pop().unwrap_or(0);
s.push(f(x, y));
}
fn commands(cell: char, cursor: (i64, i64), b: &mut Befunge) {
match cell {
'<' => b.direction = Direction::Left,
'>' => b.direction = Direction::Right,
'^' => b.direction = Direction::Up,
'v' => b.direction = Direction::Down,
' ' => (),
'_' => {
let v = b.stack.pop().unwrap_or(0);
b.direction = if v == 0 { Direction::Right } else { Direction::Left };
},
'|' => {
let v = b.stack.pop().unwrap_or(0);
b.direction = if v == 0 { Direction::Down } else { Direction::Up };
},
'#' => b.cursor = walk_next(&b.source, &b.direction, cursor),
'@' => {
b.running = false;
b.mode = Mode::End;
},
'0' => b.stack.push(0),
'1' => b.stack.push(1),
'2' => b.stack.push(2),
'3' => b.stack.push(3),
'4' => b.stack.push(4),
'5' => b.stack.push(5),
'6' => b.stack.push(6),
'7' => b.stack.push(7),
'8' => b.stack.push(8),
'9' => b.stack.push(9),
'"' => b.mode = match b.mode {
Mode::StringMode => Mode::None,
_ => Mode::StringMode,
},
'.' => {
let v = b.stack.pop().unwrap_or(0);
b.output = format!("{}{} ", b.output, v);
},
',' => {
let v = b.stack.pop().unwrap_or(0);
b.output = format!("{}{}", b.output,
char::from_u3 | {
let model = init_model();
program(model, update, view);
} | identifier_body |
|
section.rs | use crate::val_helpr::ValidationResult;
use crate::{compose::text, elems::BlockElement};
/// # Section Block
///
/// _[slack api docs 🔗]_
///
/// Available in surfaces:
/// - [modals 🔗]
/// - [messages 🔗]
/// - [home tabs 🔗]
///
/// A `section` is one of the most flexible blocks available -
/// it can be used as a simple text block,
/// in combination with text fields,
/// or side-by-side with any of the available [block elements 🔗]
///
/// [slack api docs 🔗]: https://api.slack.com/reference/block-kit/blocks#section
/// [modals 🔗]: https://api.slack.com/surfaces/modals
/// [messages 🔗]: https://api.slack.com/surfaces/messages
/// [home tabs 🔗]: https://api.slack.com/surfaces/tabs
/// [block elements 🔗]: https://api.slack.com/reference/messaging/block-elements
#[derive(Clone, Debug, Deserialize, Hash, PartialEq, Serialize)]
#[cfg_attr(feature = "validation", derive(Validate))]
pub struct Section<'a> {
#[serde(skip_serializing_if = "Option::is_none")]
#[cfg_attr(feature = "validation", validate(custom = "validate::fields"))]
fields: Option<Cow<'a, [text::Text]>>,
#[serde(skip_serializing_if = "Option::is_none")]
#[cfg_attr(feature = "validation", validate(custom = "validate::text"))]
text: Option<text::Text>,
#[serde(skip_serializing_if = "Option::is_none")]
#[cfg_attr(feature = "validation", validate(custom = "validate::block_id"))]
block_id: Option<Cow<'a, str>>,
/// One of the available [element objects 🔗][element_objects].
///
/// [element_objects]: https://api.slack.com/reference/messaging/block-elements
#[serde(skip_serializing_if = "Option::is_none")]
accessory: Option<BlockElement<'a>>,
}
impl<'a> Section<'a> {
/// Build a new section block
///
/// For example, see `blocks::section::build::SectionBuilder`.
pub fn builder() -> build::SectionBuilderInit<'a> {
build::SectionBuilderInit::new()
}
/// Validate that this Section block agrees with Slack's model requirements
///
/// # Errors
/// - If `fields` contains more than 10 fields
/// - If one of `fields` longer than 2000 chars
/// - If `text` longer than 3000 chars
/// - If `block_id` longer than 255 chars
///
/// # Example
/// ```
/// use slack_blocks::{blocks, compose::text};
///
/// let long_string = std::iter::repeat(' ').take(256).collect::<String>();
///
/// let block = blocks::Section::builder().text(text::Plain::from("file_id"))
/// .block_id(long_string)
/// .build();
///
/// assert_eq!(true, matches!(block.validate(), Err(_)));
/// ``` | Validate::validate(self)
}
}
/// Section block builder
pub mod build {
use std::marker::PhantomData;
use super::*;
use crate::build::*;
/// Compile-time markers for builder methods
#[allow(non_camel_case_types)]
pub mod method {
/// SectionBuilder.text
#[derive(Clone, Copy, Debug)]
pub struct text;
}
/// Initial state for `SectionBuilder`
pub type SectionBuilderInit<'a> =
SectionBuilder<'a, RequiredMethodNotCalled<method::text>>;
/// Build an Section block
///
/// Allows you to construct safely, with compile-time checks
/// on required setter methods.
///
/// # Required Methods
/// `SectionBuilder::build()` is only available if these methods have been called:
/// - `text` **or** `field(s)`, both may be called.
///
/// # Example
/// ```
/// use slack_blocks::{blocks::Section,
/// elems::Image,
/// text,
/// text::ToSlackPlaintext};
///
/// let block =
/// Section::builder().text("foo".plaintext())
/// .field("bar".plaintext())
/// .field("baz".plaintext())
/// // alternatively:
/// .fields(vec!["bar".plaintext(),
/// "baz".plaintext()]
/// .into_iter()
/// .map(text::Text::from)
/// )
/// .accessory(Image::builder().image_url("foo.png")
/// .alt_text("pic of foo")
/// .build())
/// .build();
/// ```
#[derive(Debug)]
pub struct SectionBuilder<'a, Text> {
accessory: Option<BlockElement<'a>>,
text: Option<text::Text>,
fields: Option<Vec<text::Text>>,
block_id: Option<Cow<'a, str>>,
state: PhantomData<Text>,
}
impl<'a, E> SectionBuilder<'a, E> {
/// Create a new SectionBuilder
pub fn new() -> Self {
Self { accessory: None,
text: None,
fields: None,
block_id: None,
state: PhantomData::<_> }
}
/// Set `accessory` (Optional)
pub fn accessory<B>(mut self, acc: B) -> Self
where B: Into<BlockElement<'a>>
{
self.accessory = Some(acc.into());
self
}
/// Add `text` (**Required: this or `field(s)`**)
///
/// The text for the block, in the form of a [text object 🔗].
///
/// Maximum length for the text in this field is 3000 characters.
///
/// [text object 🔗]: https://api.slack.com/reference/messaging/composition-objects#text
pub fn text<T>(self, text: T) -> SectionBuilder<'a, Set<method::text>>
where T: Into<text::Text>
{
SectionBuilder { accessory: self.accessory,
text: Some(text.into()),
fields: self.fields,
block_id: self.block_id,
state: PhantomData::<_> }
}
/// Set `fields` (**Required: this or `text`**)
///
/// A collection of [text objects 🔗].
///
/// Any text objects included with fields will be
/// rendered in a compact format that allows for
/// 2 columns of side-by-side text.
///
/// Maximum number of items is 10.
///
/// Maximum length for the text in each item is 2000 characters.
///
/// [text objects 🔗]: https://api.slack.com/reference/messaging/composition-objects#text
pub fn fields<I>(self, fields: I) -> SectionBuilder<'a, Set<method::text>>
where I: IntoIterator<Item = text::Text>
{
SectionBuilder { accessory: self.accessory,
text: self.text,
fields: Some(fields.into_iter().collect()),
block_id: self.block_id,
state: PhantomData::<_> }
}
/// Append a single field to `fields`.
pub fn field<T>(mut self, text: T) -> SectionBuilder<'a, Set<method::text>>
where T: Into<text::Text>
{
let mut fields = self.fields.take().unwrap_or_default();
fields.push(text.into());
self.fields(fields)
}
/// XML macro children, appends `fields` to the Section.
///
/// To set `text`, use the `text` attribute.
/// ```
/// use slack_blocks::{blocks::Section, blox::*, text, text::ToSlackPlaintext};
///
/// let xml = blox! {
/// <section_block text={"Section".plaintext()}>
/// <text kind=plain>"Foo"</text>
/// <text kind=plain>"Bar"</text>
/// </section_block>
/// };
///
/// let equiv = Section::builder().text("Section".plaintext())
/// .field("Foo".plaintext())
/// .field("Bar".plaintext())
/// .build();
///
/// assert_eq!(xml, equiv);
/// ```
#[cfg(feature = "blox")]
#[cfg_attr(docsrs, doc(cfg(feature = "blox")))]
pub fn child<T>(self, text: T) -> SectionBuilder<'a, Set<method::text>>
where T: Into<text::Text>
{
self.field(text)
}
/// Set `block_id` (Optional)
///
/// A string acting as a unique identifier for a block.
///
/// You can use this `block_id` when you receive an interaction payload
/// to [identify the source of the action 🔗].
///
/// If not specified, a `block_id` will be generated.
///
/// Maximum length for this field is 255 | #[cfg(feature = "validation")]
#[cfg_attr(docsrs, doc(cfg(feature = "validation")))]
pub fn validate(&self) -> ValidationResult { | random_line_split |
section.rs | fn builder() -> build::SectionBuilderInit<'a> {
build::SectionBuilderInit::new()
}
/// Validate that this Section block agrees with Slack's model requirements
///
/// # Errors
/// - If `fields` contains more than 10 fields
/// - If one of `fields` longer than 2000 chars
/// - If `text` longer than 3000 chars
/// - If `block_id` longer than 255 chars
///
/// # Example
/// ```
/// use slack_blocks::{blocks, compose::text};
///
/// let long_string = std::iter::repeat(' ').take(256).collect::<String>();
///
/// let block = blocks::Section::builder().text(text::Plain::from("file_id"))
/// .block_id(long_string)
/// .build();
///
/// assert_eq!(true, matches!(block.validate(), Err(_)));
/// ```
#[cfg(feature = "validation")]
#[cfg_attr(docsrs, doc(cfg(feature = "validation")))]
pub fn validate(&self) -> ValidationResult {
Validate::validate(self)
}
}
/// Section block builder
pub mod build {
use std::marker::PhantomData;
use super::*;
use crate::build::*;
/// Compile-time markers for builder methods
#[allow(non_camel_case_types)]
pub mod method {
/// SectionBuilder.text
#[derive(Clone, Copy, Debug)]
pub struct text;
}
/// Initial state for `SectionBuilder`
pub type SectionBuilderInit<'a> =
SectionBuilder<'a, RequiredMethodNotCalled<method::text>>;
/// Build an Section block
///
/// Allows you to construct safely, with compile-time checks
/// on required setter methods.
///
/// # Required Methods
/// `SectionBuilder::build()` is only available if these methods have been called:
/// - `text` **or** `field(s)`, both may be called.
///
/// # Example
/// ```
/// use slack_blocks::{blocks::Section,
/// elems::Image,
/// text,
/// text::ToSlackPlaintext};
///
/// let block =
/// Section::builder().text("foo".plaintext())
/// .field("bar".plaintext())
/// .field("baz".plaintext())
/// // alternatively:
/// .fields(vec!["bar".plaintext(),
/// "baz".plaintext()]
/// .into_iter()
/// .map(text::Text::from)
/// )
/// .accessory(Image::builder().image_url("foo.png")
/// .alt_text("pic of foo")
/// .build())
/// .build();
/// ```
#[derive(Debug)]
pub struct SectionBuilder<'a, Text> {
accessory: Option<BlockElement<'a>>,
text: Option<text::Text>,
fields: Option<Vec<text::Text>>,
block_id: Option<Cow<'a, str>>,
state: PhantomData<Text>,
}
impl<'a, E> SectionBuilder<'a, E> {
/// Create a new SectionBuilder
pub fn new() -> Self {
Self { accessory: None,
text: None,
fields: None,
block_id: None,
state: PhantomData::<_> }
}
/// Set `accessory` (Optional)
pub fn accessory<B>(mut self, acc: B) -> Self
where B: Into<BlockElement<'a>>
{
self.accessory = Some(acc.into());
self
}
/// Add `text` (**Required: this or `field(s)`**)
///
/// The text for the block, in the form of a [text object 🔗].
///
/// Maximum length for the text in this field is 3000 characters.
///
/// [text object 🔗]: https://api.slack.com/reference/messaging/composition-objects#text
pub fn text<T>(self, text: T) -> SectionBuilder<'a, Set<method::text>>
where T: Into<text::Text>
{
SectionBuilder { accessory: self.accessory,
text: Some(text.into()),
fields: self.fields,
block_id: self.block_id,
state: PhantomData::<_> }
}
/// Set `fields` (**Required: this or `text`**)
///
/// A collection of [text objects 🔗].
///
/// Any text objects included with fields will be
/// rendered in a compact format that allows for
/// 2 columns of side-by-side text.
///
/// Maximum number of items is 10.
///
/// Maximum length for the text in each item is 2000 characters.
///
/// [text objects 🔗]: https://api.slack.com/reference/messaging/composition-objects#text
pub fn fields<I>(self, fields: I) -> SectionBuilder<'a, Set<method::text>>
where I: IntoIterator<Item = text::Text>
{
SectionBuilder { accessory: self.accessory,
text: self.text,
fields: Some(fields.into_iter().collect()),
block_id: self.block_id,
state: PhantomData::<_> }
}
/// Append a single field to `fields`.
pub fn field<T>(mut self, text: T) -> SectionBuilder<'a, Set<method::text>>
where T: Into<text::Text>
{
let mut fields = self.fields.take().unwrap_or_default();
fields.push(text.into());
self.fields(fields)
}
/// XML macro children, appends `fields` to the Section.
///
/// To set `text`, use the `text` attribute.
/// ```
/// use slack_blocks::{blocks::Section, blox::*, text, text::ToSlackPlaintext};
///
/// let xml = blox! {
/// <section_block text={"Section".plaintext()}>
/// <text kind=plain>"Foo"</text>
/// <text kind=plain>"Bar"</text>
/// </section_block>
/// };
///
/// let equiv = Section::builder().text("Section".plaintext())
/// .field("Foo".plaintext())
/// .field("Bar".plaintext())
/// .build();
///
/// assert_eq!(xml, equiv);
/// ```
#[cfg(feature = "blox")]
#[cfg_attr(docsrs, doc(cfg(feature = "blox")))]
pub fn child<T>(self, text: T) -> SectionBuilder<'a, Set<method::text>>
where T: Into<text::Text>
{
self.field(text)
}
/// Set `block_id` (Optional)
///
/// A string acting as a unique identifier for a block.
///
/// You can use this `block_id` when you receive an interaction payload
/// to [identify the source of the action 🔗].
///
/// If not specified, a `block_id` will be generated.
///
/// Maximum length for this field is 255 characters.
///
/// [identify the source of the action 🔗]: https://api.slack.com/interactivity/handling#payloads
pub fn block_id<S>(mut self, block_id: S) -> Self
where S: Into<Cow<'a, str>>
{
self.block_id = Some(block_id.into());
self
}
}
impl<'a> SectionBuilder<'a, Set<method::text>> {
/// All done building, now give me a darn actions block!
///
/// > `no method name 'build' found for struct 'SectionBuilder<...>'`?
/// Make sure all required setter methods have been called. See docs for `SectionBuilder`.
///
/// ```compile_fail
/// use slack_blocks::blocks::Section;
///
/// let foo = Section::builder().build(); // Won't compile!
/// ```
///
/// ```
/// use slack_blocks::{blocks::Section,
/// compose::text::ToSlackPlaintext,
/// elems::Image};
///
/// let block =
/// Section::builder().text("foo".plaintext())
/// .accessory(Image::builder().image_url("foo.png")
/// .alt_text("pic of foo")
/// .build())
/// .build();
/// ```
pub fn build(self) -> Section<'a> {
Section { text: self.text,
fields: self.fields.map(|fs| fs.into()),
accessory: self.accessory,
block_id: self.block_id }
}
}
}
#[cfg(feature = "validation")]
mod validate {
use super::*;
use crate::{compose::text,
val_helpr::{below_len, ValidatorResult}};
pub(super) fn text(text: &text::Text) -> ValidatorResult {
below_len("Section.text", 3000, text.as_ref())
}
pub(super) fn block_id(text: &Cow<str>) -> ValidatorResult {
below_len("Section.block_id", 255, text.as_ref())
}
pub(super) fn fields(texts: &Cow<[text::Text]>) -> ValidatorResult {
below_len("Section.fie | lds", | identifier_name |
|
section.rs | crate::val_helpr::ValidationResult;
use crate::{compose::text, elems::BlockElement};
/// # Section Block
///
/// _[slack api docs 🔗]_
///
/// Available in surfaces:
/// - [modals 🔗]
/// - [messages 🔗]
/// - [home tabs 🔗]
///
/// A `section` is one of the most flexible blocks available -
/// it can be used as a simple text block,
/// in combination with text fields,
/// or side-by-side with any of the available [block elements 🔗]
///
/// [slack api docs 🔗]: https://api.slack.com/reference/block-kit/blocks#section
/// [modals 🔗]: https://api.slack.com/surfaces/modals
/// [messages 🔗]: https://api.slack.com/surfaces/messages
/// [home tabs 🔗]: https://api.slack.com/surfaces/tabs
/// [block elements 🔗]: https://api.slack.com/reference/messaging/block-elements
#[derive(Clone, Debug, Deserialize, Hash, PartialEq, Serialize)]
#[cfg_attr(feature = "validation", derive(Validate))]
pub struct Section<'a> {
#[serde(skip_serializing_if = "Option::is_none")]
#[cfg_attr(feature = "validation", validate(custom = "validate::fields"))]
fields: Option<Cow<'a, [text::Text]>>,
#[serde(skip_serializing_if = "Option::is_none")]
#[cfg_attr(feature = "validation", validate(custom = "validate::text"))]
text: Option<text::Text>,
#[serde(skip_serializing_if = "Option::is_none")]
#[cfg_attr(feature = "validation", validate(custom = "validate::block_id"))]
block_id: Option<Cow<'a, str>>,
/// One of the available [element objects 🔗][element_objects].
///
/// [element_objects]: https://api.slack.com/reference/messaging/block-elements
#[serde(skip_serializing_if = "Option::is_none")]
accessory: Option<BlockElement<'a>>,
}
impl<'a> Section<'a> {
/// Build a new section block
///
/// For example, see `blocks::section::build::SectionBuilder`.
pub fn builder() -> build::SectionBuilderInit<'a> {
build::SectionBuilderInit::new()
}
/// Validate that this Section block agrees with Slack's model requirements
///
/// # Errors
/// - If `fields` contains more than 10 fields
/// - If one of `fields` longer than 2000 chars
/// - If `text` longer than 3000 chars
/// - If `block_id` longer than 255 chars
///
/// # Example
/// ```
/// use slack_blocks::{blocks, compose::text};
///
/// let long_string = std::iter::repeat(' ').take(256).collect::<String>();
///
/// let block = blocks::Section::builder().text(text::Plain::from("file_id"))
/// .block_id(long_string)
/// .build();
///
/// assert_eq!(true, matches!(block.validate(), Err(_)));
/// ```
#[cfg(feature = "validation")]
#[cfg_attr(docsrs, doc(cfg(feature = "validation")))]
pub fn validate(&self) -> ValidationResult {
Validate::validate(self)
}
}
/// Section block builder
pub mod build {
use std::marker::PhantomData;
use super::*;
use crate::build::*;
/// Compile-time markers for builder methods
#[allow(non_camel_case_types)]
pub mod method {
/// SectionBuilder.text
#[derive(Clone, Copy, Debug)]
pub struct text;
}
/// Initial state for `SectionBuilder`
pub type SectionBuilderInit<'a> =
SectionBuilder<'a, RequiredMethodNotCalled<method::text>>;
/// Build an Section block
///
/// Allows you to construct safely, with compile-time checks
/// on required setter methods.
///
/// # Required Methods
/// `SectionBuilder::build()` is only available if these methods have been called:
/// - `text` **or** `field(s)`, both may be called.
///
/// # Example
/// ```
/// use slack_blocks::{blocks::Section,
/// elems::Image,
/// text,
/// text::ToSlackPlaintext};
///
/// let block =
/// Section::builder().text("foo".plaintext())
/// .field("bar".plaintext())
/// .field("baz".plaintext())
/// // alternatively:
/// .fields(vec!["bar".plaintext(),
/// "baz".plaintext()]
/// .into_iter()
/// .map(text::Text::from)
/// )
/// .accessory(Image::builder().image_url("foo.png")
/// .alt_text("pic of foo")
/// .build())
/// .build();
/// ```
#[derive(Debug)]
pub struct SectionBuilder<'a, Text> {
accessory: Option<BlockElement<'a>>,
text: Option<text::Text>,
fields: Option<Vec<text::Text>>,
block_id: Option<Cow<'a, str>>,
state: PhantomData<Text>,
}
impl<'a, E> SectionBuilder<'a, E> {
/// Create a new SectionBuilder
pub fn new() -> Self {
Self { accessory: None,
text: None,
fields: None,
block_id: None,
state: PhantomData::<_> }
}
/// Set `accessory` (Optional)
pub fn accessory<B>(mut self, acc: B) -> Self
where B: Into<BlockElement<'a>>
{
self.accessory = Some(acc.into());
self
}
/// Add `text` (**Required: this or `field(s)`**)
///
/// The text for the block, in the form of a [text object 🔗].
///
/// Maximum length for the text in this field is 3000 characters.
///
/// [text object 🔗]: https://api.slack.com/reference/messaging/composition-objects#text
pub fn text<T>(self, text: T) -> SectionBuilder<'a, Set<method::text>>
where T: Into<text::Text>
{
SectionBuilder { accessory: self.accessory,
text: Some(text.into()),
fields: self.fields,
block_id: self.block_id,
state: PhantomData::<_> }
}
/// Set `fields` (**Required: this or `text`**)
///
/// A collection of [text objects 🔗].
///
/// Any text objects included with fields will be
/// rendered in a compact format that allows for
/// 2 columns of side-by-side text.
///
/// Maximum number of items is 10.
///
/// Maximum length for the text in each item is 2000 characters.
///
/// [text objects 🔗]: https://api.slack.com/reference/messaging/composition-objects#text
pub fn fields<I>(self, fields: I) -> SectionBuilder<'a, Set<method::text>>
where I: IntoIterator<Item = text::Text>
{
SectionBuilder { accessory: self.accessory,
text: self.text,
fields: Some(fields.into_iter().collect()),
block_id: self.block_id,
state: PhantomData::<_> }
}
/// Append a single field to `fields`.
pub fn field<T>(mut self, text: T) -> SectionBuilder<'a, Set<method::text>>
where T: Into<text::Text>
{
let mut fields = self.fields.take().unwrap_or_default();
fields.push(text.into());
self.fields(fields)
}
/// XML macro children, appends `fields` to the Section.
///
/// To set `text`, use the `text` attribute.
/// ```
/// use slack_blocks::{blocks::Section, blox::*, text, text::ToSlackPlaintext};
///
/// let xml = blox! {
/// <section_block text={"Section".plaintext()}>
/// <text kind=plain>"Foo"</text>
/// <text kind=plain>"Bar"</text>
/// </section_block>
/// };
///
/// let equiv = Section::builder().text("Section".plaintext())
/// .field("Foo".plaintext())
/// .field("Bar".plaintext())
/// .build();
///
/// assert_eq!(xml, equiv);
/// ```
#[cfg(feature = "blox")]
#[cfg_attr(docsrs, doc(cfg(feature = "blox")))]
pub fn child<T>(self, text: T) -> SectionBuilder<'a, Set<method::text>>
where T: Into<text::Text>
{
self.field(text)
}
/// Set `block_id` (Optional)
///
| ique identifier for a block.
///
/// You can use this `block_id` when you receive an interaction payload
/// to [identify the source of the action 🔗].
///
/// If not specified, a `block_id` will be generated.
///
/// Maximum length for this field is 25 | /// A string acting as a un | identifier_body |
Release.py | #definitionsperson
"""
# Map BibTeX to CFF fields
name_fields = {
"last": "family-names",
"bibtex_first": "given-names",
"prelast": "name-particle",
"lineage": "name-suffix",
}
result = {
cff_field: " ".join(person.get_part(bibtex_field))
for bibtex_field, cff_field in name_fields.items()
if person.get_part(bibtex_field)
}
# Use CFF "entity" format if BibTex has no first & last names
if list(result.keys()) == ["family-names"]:
return {"name": result["family-names"]}
return result
def to_cff_reference(bib_entry: pybtex.database.Entry) -> dict:
"""BibTeX to CFF conversion for references.
The format is defined here:
https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md#definitionsreference
"""
def _cff_transform(cff_field, bib_value):
| "sep": 9,
"oct": 10,
"nov": 11,
"dec": 12,
}[bib_value[:3].lower()]
return bib_value
cff_reference = {
"type": _cff_transform(cff_field="type", bib_value=bib_entry.type),
"authors": [
to_cff_person(person) for person in bib_entry.persons["author"]
],
}
# Map BibTeX to CFF fields. This is just a subset of the most relevant
# fields.
fields = {
"doi": "doi",
"edition": "edition",
"isbn": "isbn",
"license": "license",
"month": "month",
"number": "number",
"pages": "pages",
"publisher": "publisher",
"title": "title",
"url": "url",
"version": "version",
"volume": "volume",
"year": "year",
"booktitle": "collection-title",
}
for bibtex_field, value in bib_entry.fields.items():
bibtex_field = bibtex_field.lower()
if bibtex_field in fields:
cff_field = fields[bibtex_field]
cff_reference[cff_field] = _cff_transform(
cff_field=cff_field, bib_value=value
)
return cff_reference
def collect_citation_metadata(
metadata: dict, references: List[pybtex.database.Entry]
) -> dict:
"""Produces the data stored in the CITATION.cff file
Args:
metadata: The project metadata read from the YAML file. This is the main
source of information for this function.
references: List of references resolved from 'metadata["References"]'.
They will be converted to CFF.
Returns:
Citation data in the [Citation File Format](https://github.com/citation-file-format/citation-file-format)
"""
# Author list
citation_authors = []
for author_tier in ["Core", "Developers", "Contributors"]:
for author in metadata["Authors"][author_tier]["List"]:
family_names, given_names = author["Name"].split(", ")
citation_author = {
"family-names": family_names,
"given-names": given_names,
}
if "Orcid" in author:
citation_author["orcid"] = (
"https://orcid.org/" + author["Orcid"]
)
if "Affiliations" in author and len(author["Affiliations"]) > 0:
citation_author["affiliation"] = " and ".join(
author["Affiliations"]
)
citation_authors.append(citation_author)
# References in CITATION.cff format
citation_references = [to_cff_reference(entry) for entry in references]
return {
"cff-version": "1.2.0",
"message": (
"Please cite SpECTRE in any publications that make use of its code"
" or data. Cite the latest version that you use in your"
" publication. The citation for this version is listed below."
),
"title": metadata["Name"],
"url": metadata["Homepage"],
"repository-code": "https://github.com/" + metadata["GitHub"],
"version": metadata["Version"],
"date-released": metadata["PublicationDate"],
"doi": metadata["Doi"],
"authors": citation_authors,
"keywords": metadata["Keywords"],
"license": metadata["License"],
"references": citation_references,
}
def build_bibtex_entry(metadata: dict):
"""Builds a BibTeX entry that we suggest people cite in publications
Args:
metadata: The project metadata read from the YAML file
Returns:
A pybtex.database.Entry. Use the `to_string` member function to convert to
a string in BibTeX, YAML or other formats.
"""
# We truncate the author list in the BibTeX entry after 'Developers',
# because not all journals are happy with printing an excessively long
# author list, e.g. Phys. Rev. wants at most 15 authors. By truncating the
# author list here, the user who copies the BibTeX entry doesn't have to
# make the decision where to truncate.
authors = [
pybtex.database.Person(author["Name"])
for author in (
metadata["Authors"]["Core"]["List"]
+ metadata["Authors"]["Developers"]["List"]
)
] + [pybtex.database.Person("others")]
entry = pybtex.database.Entry(
"software",
persons=dict(author=authors),
fields=dict(
title=(
r"\texttt{"
+ metadata["Name"]
+ " v"
+ metadata["Version"]
+ "}"
),
# The 'version' field is not used by revtex4-2, so we also put the
# version in the title
version=metadata["Version"],
publisher="Zenodo",
doi=metadata["Doi"],
url=metadata["Homepage"],
howpublished=(
r"\href{https://doi.org/"
+ metadata["Doi"]
+ "}{"
+ metadata["Doi"]
+ "}"
),
license=metadata["License"],
year=str(metadata["PublicationDate"].year),
month=str(metadata["PublicationDate"].month),
),
)
entry.key = "spectrecode"
return entry
def prepare(
metadata: dict,
version_name: str,
metadata_file: str,
citation_file: str,
bib_file: str,
references_file: str,
readme_file: str,
zenodo: Zenodo,
github: Github,
update_only: bool,
check_only: bool,
):
# Validate new version name
match_version_name = re.match(VERSION_PATTERN + "$", version_name)
if not match_version_name:
raise ValueError(
f"Version name '{version_name}' doesn't match "
f"pattern '{VERSION_PATTERN}'."
)
publication_date = datetime.date(
year=int(match_version_name.group(1)),
month=int(match_version_name.group(2)),
day=int(match_version_name.group(3)),
)
if update_only:
# Don't try to create a new version draft on Zenodo but update the
# existing one. We assume that the metadata in the repository already
# point to the existing version draft on Zenodo that we want to update.
# This is the case when the user has run this script without the
# `--update-only` option before and has thus created the new version
# draft on Zenodo, and is now running it again with the `--update-only`
# option to push updated metadata to the draft.
new_version_id = metadata["ZenodoId"]
else:
# Zenodo doesn't have a draft for the new version yet, or the metadata
# in the repository is not yet updated. Either way, we use the ID from
# the metadata to obtain the latest version on Zenodo and create a new
# draft. Zenodo doesn't create another draft if one already exists, but
# just returns it.
latest_version_id = metadata["ZenodoId"]
try:
latest_version_id_on_zenodo = zenodo.get_latest_version_id(
record_id=latest_version_id
)
except requests.exceptions.HTTPError as err:
raise requests.exceptions.HTTPError(
f"No published record with ID {latest_version_id} found on "
"Zenodo. Use the '--update-only' flag if you're re-running "
"the script over a repository that already has an unpublished "
"new version ID inserted into Metadata.yaml | if cff_field == "type":
if bib_value == "inproceedings":
return "article"
elif bib_value == "incollection":
return "article"
elif cff_field == "publisher":
return {"name": bib_value}
elif cff_field == "month":
try:
return int(bib_value)
except ValueError:
return {
"jan": 1,
"feb": 2,
"mar": 3,
"apr": 4,
"may": 5,
"jun": 6,
"jul": 7,
"aug": 8, | identifier_body |
Release.py | \[{}\]\(.*\)".format(DOI_PATTERN),
r"DOI: [{}]({})".format(doi, doi_url),
content,
flags=re.MULTILINE,
)
assert (
num_subs > 0
), "Could not find DOI (matching '{}') with link in file '{}'.".format(
DOI_PATTERN, readme_file
)
return content
def replace_link_in_readme(content, link_text, link_url):
content, num_subs = re.subn(
r"\[{}\]\(.*\)".format(link_text),
r"[{}]({})".format(link_text, link_url),
content,
flags=re.MULTILINE,
)
assert num_subs > 0, (
f"Could not find link with text '{link_text}' in "
f"file '{readme_file}'."
)
return content
def replace_bibtex_entry_in_readme(content, bibtex_entry):
bibtex_entry_string = bib_file_content
# Work around an issue with escaping LaTeX commands
bibtex_entry_string = bibtex_entry_string.replace("\\", "\\\\")
FENCE_PATTERN = "<!-- BIBTEX ENTRY -->"
content, num_subs = re.subn(
(FENCE_PATTERN + "(.*)" + FENCE_PATTERN),
(
FENCE_PATTERN
+ "\n```bib\n"
+ bibtex_entry_string.strip()
+ "\n```\n"
+ FENCE_PATTERN
),
content,
flags=re.DOTALL,
)
assert (
num_subs > 0
), f"Could not find a BibTeX entry in file '{readme_file}'."
return content
with open(readme_file, "r" if check_only else "r+") as open_readme_file:
content_original = open_readme_file.read()
content = replace_badge_in_readme(
content_original,
"release",
f"https://img.shields.io/badge/release-v{version_name}-informational",
"https://github.com/{}/releases/tag/v{}".format(
metadata["GitHub"], version_name
),
)
content = replace_badge_in_readme(
content,
"DOI",
new_version_draft["links"]["badge"],
new_version_draft["links"]["doi"],
)
content = replace_doi_in_readme(
content, new_version_doi, new_version_draft["links"]["doi"]
)
# We don't currently link to the Zenodo BibTeX entry because it isn't
# very good. Instead, we generate our own.
content = replace_bibtex_entry_in_readme(content, bibtex_entry)
content_diff = "\n".join(
difflib.context_diff(
content_original.split("\n"),
content.split("\n"),
lineterm="",
fromfile=readme_file,
tofile=readme_file,
)
)
if check_only:
report_check_only(f"Would apply diff:\n{content_diff}")
else:
logger.debug(f"Applying diff:\n{content_diff}")
open_readme_file.seek(0)
open_readme_file.write(content)
open_readme_file.truncate()
# Upload the updated metadata to Zenodo
zenodo_metadata = collect_zenodo_metadata(metadata, references, github)
logger.debug(
"The metadata we'll send to Zenodo are:\n{}".format(
yaml.safe_dump(zenodo_metadata, allow_unicode=True)
)
)
if check_only:
report_check_only("Would upload metadata to Zenodo.")
else:
zenodo.update_deposition(id=new_version_id, metadata=zenodo_metadata)
logger.debug(
(
"New Zenodo version draft is now prepared. You can edit "
"it here:\n{}"
).format(new_version_draft["links"]["html"])
)
def publish(
metadata: dict,
zenodo: Zenodo,
github: Github,
auto_publish: bool,
check_only: bool,
):
version_name = metadata["Version"]
new_version_id = metadata["ZenodoId"]
# Retrieve the Zenodo deposition for the version draft that we have
# prepared before
new_version_draft = zenodo.get_deposition(id=new_version_id)
# Retrieve the file "bucket" ID for uploading data
bucket_id = pathlib.PurePosixPath(
urllib.parse.urlparse(new_version_draft["links"]["bucket"]).path
).parts[-1]
# Retrieve the URL of the GitHub release archive that we want to upload
# to Zenodo
gh_user, gh_repo = metadata["GitHub"].split("/")
# Alternatively we could use the release ID that GitHub's
# 'actions/create-release' returns to retrieve the release
gh_release = github.get_release_by_tag(
user=gh_user, repo=gh_repo, tag="v" + version_name
)
logger.debug(
"The release on GitHub is:\n{}".format(
yaml.safe_dump(gh_release, allow_unicode=True)
)
)
zipball_url = gh_release["zipball_url"]
# Stream the release archive to Zenodo.
# We keep the file name for the archive on Zenodo the same for each
# release so we can just overwrite it. Note that the _unpacked_ directory
# name contains the version as expected, since the unpacked directory name
# is determined by the GitHub release.
archive_filename = gh_repo + ".zip"
if check_only:
report_check_only(
f"Would stream release zipball '{zipball_url}' as "
f"filename '{archive_filename}' to bucket '{bucket_id}'."
)
else:
# Download the zipball from GitHub, then upload to Zenodo.
# Note: Something like this should also work to stream the file
# directly from GitHub to Zenodo without temporarily saving it, but
# Zenodo doesn't currently document their new "bucket" file API so it
# is difficult to debug:
# with requests.get(zipball_url, stream=True) as zipball_stream:
# zipball_stream.raise_for_status()
# uploaded_file = zenodo.upload_file(bucket_id=bucket_id,
# file=zipball_stream,
# filename=archive_filename)
zipball_download = requests.get(zipball_url, stream=True)
with tempfile.TemporaryFile() as open_tmp_file:
for chunk in zipball_download.iter_content():
open_tmp_file.write(chunk)
open_tmp_file.seek(0)
uploaded_file = zenodo.upload_file(
bucket_id=bucket_id,
file=open_tmp_file,
filename=archive_filename,
)
logger.debug(
"Release archive upload complete:\n{}".format(
yaml.safe_dump(uploaded_file, allow_unicode=True)
)
)
# Publish!
if auto_publish:
if check_only:
report_check_only(
f"Would publish Zenodo record {new_version_id} now!"
)
else:
published_record = zenodo.publish(id=new_version_id)
logger.debug(
"Zenodo record published:\n{}".format(
yaml.safe_dump(published_record, allow_unicode=True)
)
)
logger.info(
(
"Zenodo record is now public! Here's the link to the "
"record:\n{}"
).format(published_record["links"]["record_html"])
)
else:
logger.info(
(
"Release is ready to be published on Zenodo. Go to this "
"website, make sure everything looks fine and then hit the "
"'Publish' button:\n{}"
).format(new_version_draft["links"]["html"])
)
if __name__ == "__main__":
# Always work with the repository that contains this file
repo = git.Repo(__file__, search_parent_directories=True)
import argparse
parser = argparse.ArgumentParser(
description=(
"Prepare the repository and publish releases on Zenodo as part of"
" the automatic versioning procedure. This script is not intended"
" to be run outside of GitHub actions. The 'prepare' subprogram"
" reserves a DOI on Zenodo and inserts it into the repository"
" along with the new version name. Once the release archive has"
" been created, the 'publish'subprogram uploads it to Zenodo."
f" Repository: {repo.working_dir}."
)
)
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"--zenodo-token",
required=True,
help=(
"Zenodo access token. Refer to the Zenodo documentation "
"for instructions on creating a personal access token."
),
)
parent_parser.add_argument(
"--zenodo-sandbox",
action="store_true",
help="Use the Zenodo sandbox instead of the public version of Zenodo",
)
parent_parser.add_argument(
"--github-token",
required=False,
help=(
"Access token for GitHub queries. Refer to the GitHub documentation"
" for instructions on creating a personal access token."
),
)
parent_parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="Verbosity (-v, -vv, ...)",
)
parent_parser.add_argument(
"--check-only",
action="store_true", | help=(
"Dry mode, only check that all files are consistent. Nothing is"
" edited or uploaded to Zenodo. Used in CI tests to make sure" | random_line_split |
|
Release.py | #definitionsperson
"""
# Map BibTeX to CFF fields
name_fields = {
"last": "family-names",
"bibtex_first": "given-names",
"prelast": "name-particle",
"lineage": "name-suffix",
}
result = {
cff_field: " ".join(person.get_part(bibtex_field))
for bibtex_field, cff_field in name_fields.items()
if person.get_part(bibtex_field)
}
# Use CFF "entity" format if BibTex has no first & last names
if list(result.keys()) == ["family-names"]:
return {"name": result["family-names"]}
return result
def to_cff_reference(bib_entry: pybtex.database.Entry) -> dict:
"""BibTeX to CFF conversion for references.
The format is defined here:
https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md#definitionsreference
"""
def _cff_transform(cff_field, bib_value):
if cff_field == "type":
if bib_value == "inproceedings":
return "article"
elif bib_value == "incollection":
return "article"
elif cff_field == "publisher":
return {"name": bib_value}
elif cff_field == "month":
try:
return int(bib_value)
except ValueError:
return {
"jan": 1,
"feb": 2,
"mar": 3,
"apr": 4,
"may": 5,
"jun": 6,
"jul": 7,
"aug": 8,
"sep": 9,
"oct": 10,
"nov": 11,
"dec": 12,
}[bib_value[:3].lower()]
return bib_value
cff_reference = {
"type": _cff_transform(cff_field="type", bib_value=bib_entry.type),
"authors": [
to_cff_person(person) for person in bib_entry.persons["author"]
],
}
# Map BibTeX to CFF fields. This is just a subset of the most relevant
# fields.
fields = {
"doi": "doi",
"edition": "edition",
"isbn": "isbn",
"license": "license",
"month": "month",
"number": "number",
"pages": "pages",
"publisher": "publisher",
"title": "title",
"url": "url",
"version": "version",
"volume": "volume",
"year": "year",
"booktitle": "collection-title",
}
for bibtex_field, value in bib_entry.fields.items():
bibtex_field = bibtex_field.lower()
if bibtex_field in fields:
cff_field = fields[bibtex_field]
cff_reference[cff_field] = _cff_transform(
cff_field=cff_field, bib_value=value
)
return cff_reference
def collect_citation_metadata(
metadata: dict, references: List[pybtex.database.Entry]
) -> dict:
"""Produces the data stored in the CITATION.cff file
Args:
metadata: The project metadata read from the YAML file. This is the main
source of information for this function.
references: List of references resolved from 'metadata["References"]'.
They will be converted to CFF.
Returns:
Citation data in the [Citation File Format](https://github.com/citation-file-format/citation-file-format)
"""
# Author list
citation_authors = []
for author_tier in ["Core", "Developers", "Contributors"]:
for author in metadata["Authors"][author_tier]["List"]:
family_names, given_names = author["Name"].split(", ")
citation_author = {
"family-names": family_names,
"given-names": given_names,
}
if "Orcid" in author:
citation_author["orcid"] = (
"https://orcid.org/" + author["Orcid"]
)
if "Affiliations" in author and len(author["Affiliations"]) > 0:
citation_author["affiliation"] = " and ".join(
author["Affiliations"]
)
citation_authors.append(citation_author)
# References in CITATION.cff format
citation_references = [to_cff_reference(entry) for entry in references]
return {
"cff-version": "1.2.0",
"message": (
"Please cite SpECTRE in any publications that make use of its code"
" or data. Cite the latest version that you use in your"
" publication. The citation for this version is listed below."
),
"title": metadata["Name"],
"url": metadata["Homepage"],
"repository-code": "https://github.com/" + metadata["GitHub"],
"version": metadata["Version"],
"date-released": metadata["PublicationDate"],
"doi": metadata["Doi"],
"authors": citation_authors,
"keywords": metadata["Keywords"],
"license": metadata["License"],
"references": citation_references,
}
def build_bibtex_entry(metadata: dict):
"""Builds a BibTeX entry that we suggest people cite in publications
Args:
metadata: The project metadata read from the YAML file
Returns:
A pybtex.database.Entry. Use the `to_string` member function to convert to
a string in BibTeX, YAML or other formats.
"""
# We truncate the author list in the BibTeX entry after 'Developers',
# because not all journals are happy with printing an excessively long
# author list, e.g. Phys. Rev. wants at most 15 authors. By truncating the
# author list here, the user who copies the BibTeX entry doesn't have to
# make the decision where to truncate.
authors = [
pybtex.database.Person(author["Name"])
for author in (
metadata["Authors"]["Core"]["List"]
+ metadata["Authors"]["Developers"]["List"]
)
] + [pybtex.database.Person("others")]
entry = pybtex.database.Entry(
"software",
persons=dict(author=authors),
fields=dict(
title=(
r"\texttt{"
+ metadata["Name"]
+ " v"
+ metadata["Version"]
+ "}"
),
# The 'version' field is not used by revtex4-2, so we also put the
# version in the title
version=metadata["Version"],
publisher="Zenodo",
doi=metadata["Doi"],
url=metadata["Homepage"],
howpublished=(
r"\href{https://doi.org/"
+ metadata["Doi"]
+ "}{"
+ metadata["Doi"]
+ "}"
),
license=metadata["License"],
year=str(metadata["PublicationDate"].year),
month=str(metadata["PublicationDate"].month),
),
)
entry.key = "spectrecode"
return entry
def | (
metadata: dict,
version_name: str,
metadata_file: str,
citation_file: str,
bib_file: str,
references_file: str,
readme_file: str,
zenodo: Zenodo,
github: Github,
update_only: bool,
check_only: bool,
):
# Validate new version name
match_version_name = re.match(VERSION_PATTERN + "$", version_name)
if not match_version_name:
raise ValueError(
f"Version name '{version_name}' doesn't match "
f"pattern '{VERSION_PATTERN}'."
)
publication_date = datetime.date(
year=int(match_version_name.group(1)),
month=int(match_version_name.group(2)),
day=int(match_version_name.group(3)),
)
if update_only:
# Don't try to create a new version draft on Zenodo but update the
# existing one. We assume that the metadata in the repository already
# point to the existing version draft on Zenodo that we want to update.
# This is the case when the user has run this script without the
# `--update-only` option before and has thus created the new version
# draft on Zenodo, and is now running it again with the `--update-only`
# option to push updated metadata to the draft.
new_version_id = metadata["ZenodoId"]
else:
# Zenodo doesn't have a draft for the new version yet, or the metadata
# in the repository is not yet updated. Either way, we use the ID from
# the metadata to obtain the latest version on Zenodo and create a new
# draft. Zenodo doesn't create another draft if one already exists, but
# just returns it.
latest_version_id = metadata["ZenodoId"]
try:
latest_version_id_on_zenodo = zenodo.get_latest_version_id(
record_id=latest_version_id
)
except requests.exceptions.HTTPError as err:
raise requests.exceptions.HTTPError(
f"No published record with ID {latest_version_id} found on "
"Zenodo. Use the '--update-only' flag if you're re-running "
"the script over a repository that already has an unpublished "
"new version ID inserted into Metadata | prepare | identifier_name |
Release.py | #definitionsperson
"""
# Map BibTeX to CFF fields
name_fields = {
"last": "family-names",
"bibtex_first": "given-names",
"prelast": "name-particle",
"lineage": "name-suffix",
}
result = {
cff_field: " ".join(person.get_part(bibtex_field))
for bibtex_field, cff_field in name_fields.items()
if person.get_part(bibtex_field)
}
# Use CFF "entity" format if BibTex has no first & last names
if list(result.keys()) == ["family-names"]:
return {"name": result["family-names"]}
return result
def to_cff_reference(bib_entry: pybtex.database.Entry) -> dict:
"""BibTeX to CFF conversion for references.
The format is defined here:
https://github.com/citation-file-format/citation-file-format/blob/main/schema-guide.md#definitionsreference
"""
def _cff_transform(cff_field, bib_value):
if cff_field == "type":
if bib_value == "inproceedings":
return "article"
elif bib_value == "incollection":
return "article"
elif cff_field == "publisher":
return {"name": bib_value}
elif cff_field == "month":
try:
return int(bib_value)
except ValueError:
return {
"jan": 1,
"feb": 2,
"mar": 3,
"apr": 4,
"may": 5,
"jun": 6,
"jul": 7,
"aug": 8,
"sep": 9,
"oct": 10,
"nov": 11,
"dec": 12,
}[bib_value[:3].lower()]
return bib_value
cff_reference = {
"type": _cff_transform(cff_field="type", bib_value=bib_entry.type),
"authors": [
to_cff_person(person) for person in bib_entry.persons["author"]
],
}
# Map BibTeX to CFF fields. This is just a subset of the most relevant
# fields.
fields = {
"doi": "doi",
"edition": "edition",
"isbn": "isbn",
"license": "license",
"month": "month",
"number": "number",
"pages": "pages",
"publisher": "publisher",
"title": "title",
"url": "url",
"version": "version",
"volume": "volume",
"year": "year",
"booktitle": "collection-title",
}
for bibtex_field, value in bib_entry.fields.items():
bibtex_field = bibtex_field.lower()
if bibtex_field in fields:
cff_field = fields[bibtex_field]
cff_reference[cff_field] = _cff_transform(
cff_field=cff_field, bib_value=value
)
return cff_reference
def collect_citation_metadata(
metadata: dict, references: List[pybtex.database.Entry]
) -> dict:
"""Produces the data stored in the CITATION.cff file
Args:
metadata: The project metadata read from the YAML file. This is the main
source of information for this function.
references: List of references resolved from 'metadata["References"]'.
They will be converted to CFF.
Returns:
Citation data in the [Citation File Format](https://github.com/citation-file-format/citation-file-format)
"""
# Author list
citation_authors = []
for author_tier in ["Core", "Developers", "Contributors"]:
for author in metadata["Authors"][author_tier]["List"]:
family_names, given_names = author["Name"].split(", ")
citation_author = {
"family-names": family_names,
"given-names": given_names,
}
if "Orcid" in author:
|
if "Affiliations" in author and len(author["Affiliations"]) > 0:
citation_author["affiliation"] = " and ".join(
author["Affiliations"]
)
citation_authors.append(citation_author)
# References in CITATION.cff format
citation_references = [to_cff_reference(entry) for entry in references]
return {
"cff-version": "1.2.0",
"message": (
"Please cite SpECTRE in any publications that make use of its code"
" or data. Cite the latest version that you use in your"
" publication. The citation for this version is listed below."
),
"title": metadata["Name"],
"url": metadata["Homepage"],
"repository-code": "https://github.com/" + metadata["GitHub"],
"version": metadata["Version"],
"date-released": metadata["PublicationDate"],
"doi": metadata["Doi"],
"authors": citation_authors,
"keywords": metadata["Keywords"],
"license": metadata["License"],
"references": citation_references,
}
def build_bibtex_entry(metadata: dict):
"""Builds a BibTeX entry that we suggest people cite in publications
Args:
metadata: The project metadata read from the YAML file
Returns:
A pybtex.database.Entry. Use the `to_string` member function to convert to
a string in BibTeX, YAML or other formats.
"""
# We truncate the author list in the BibTeX entry after 'Developers',
# because not all journals are happy with printing an excessively long
# author list, e.g. Phys. Rev. wants at most 15 authors. By truncating the
# author list here, the user who copies the BibTeX entry doesn't have to
# make the decision where to truncate.
authors = [
pybtex.database.Person(author["Name"])
for author in (
metadata["Authors"]["Core"]["List"]
+ metadata["Authors"]["Developers"]["List"]
)
] + [pybtex.database.Person("others")]
entry = pybtex.database.Entry(
"software",
persons=dict(author=authors),
fields=dict(
title=(
r"\texttt{"
+ metadata["Name"]
+ " v"
+ metadata["Version"]
+ "}"
),
# The 'version' field is not used by revtex4-2, so we also put the
# version in the title
version=metadata["Version"],
publisher="Zenodo",
doi=metadata["Doi"],
url=metadata["Homepage"],
howpublished=(
r"\href{https://doi.org/"
+ metadata["Doi"]
+ "}{"
+ metadata["Doi"]
+ "}"
),
license=metadata["License"],
year=str(metadata["PublicationDate"].year),
month=str(metadata["PublicationDate"].month),
),
)
entry.key = "spectrecode"
return entry
def prepare(
metadata: dict,
version_name: str,
metadata_file: str,
citation_file: str,
bib_file: str,
references_file: str,
readme_file: str,
zenodo: Zenodo,
github: Github,
update_only: bool,
check_only: bool,
):
# Validate new version name
match_version_name = re.match(VERSION_PATTERN + "$", version_name)
if not match_version_name:
raise ValueError(
f"Version name '{version_name}' doesn't match "
f"pattern '{VERSION_PATTERN}'."
)
publication_date = datetime.date(
year=int(match_version_name.group(1)),
month=int(match_version_name.group(2)),
day=int(match_version_name.group(3)),
)
if update_only:
# Don't try to create a new version draft on Zenodo but update the
# existing one. We assume that the metadata in the repository already
# point to the existing version draft on Zenodo that we want to update.
# This is the case when the user has run this script without the
# `--update-only` option before and has thus created the new version
# draft on Zenodo, and is now running it again with the `--update-only`
# option to push updated metadata to the draft.
new_version_id = metadata["ZenodoId"]
else:
# Zenodo doesn't have a draft for the new version yet, or the metadata
# in the repository is not yet updated. Either way, we use the ID from
# the metadata to obtain the latest version on Zenodo and create a new
# draft. Zenodo doesn't create another draft if one already exists, but
# just returns it.
latest_version_id = metadata["ZenodoId"]
try:
latest_version_id_on_zenodo = zenodo.get_latest_version_id(
record_id=latest_version_id
)
except requests.exceptions.HTTPError as err:
raise requests.exceptions.HTTPError(
f"No published record with ID {latest_version_id} found on "
"Zenodo. Use the '--update-only' flag if you're re-running "
"the script over a repository that already has an unpublished "
"new version ID inserted into Metadata | citation_author["orcid"] = (
"https://orcid.org/" + author["Orcid"]
) | conditional_block |
match.go | (1) < 1 {
// match.slotxxxResult = goodHands[rand.Intn(len(goodHands))]
// }
//
match.playerResult.SlotxxxResult = match.slotxxxResult
match.playerResult.MapPaylineIndexToWonMoney, match.playerResult.MapPaylineIndexToIsWin, match.playerResult.MatchWonType = CalcWonMoneys(
match.slotxxxResult, match.payLineIndexs, match.moneyPerLine)
sumMoneyAfterSpin := CalcSumPay(match.playerResult.MapPaylineIndexToWonMoney)
match.mutex.Unlock()
match.updateMatchStatus()
time.Sleep(DURATION_PHASE_1_SPIN)
// add % to jackpot
var jackpotObj *jackpot.Jackpot
if match.moneyPerLine == MONEYS_PER_LINE[1] {
jackpotObj = match.game.jackpot100
} else if match.moneyPerLine == MONEYS_PER_LINE[2] {
jackpotObj = match.game.jackpot1000
} else if match.moneyPerLine == MONEYS_PER_LINE[3] {
jackpotObj = match.game.jackpot10000
} else {
}
if jackpotObj != nil {
temp := match.moneyPerLine * int64(len(match.payLineIndexs))
temp = int64(0.025 * float64(temp)) // repay to users 95%
jackpotObj.AddMoney(temp)
if match.playerResult.MatchWonType == MATCH_WON_TYPE_JACKPOT {
amount := int64(float64(jackpotObj.Value()) * 0.5)
match.winningMoneyIfStop = amount
jackpotObj.AddMoney(-amount)
jackpotObj.NotifySomeoneHitJackpot(
match.GameCode(),
amount,
match.player.Id(),
match.player.Name(),
)
} else if sumMoneyAfterSpin > 0 {
match.winningMoneyIfStop = sumMoneyAfterSpin
match.currentXxxMoney = sumMoneyAfterSpin
match.requiredMoneyToGoOn = 0
// loop x2 game, i is level counter
i := 0
match.phase = PHASE_3_CHOOSE_GO_ON
for i < MAX_XXX_LEVEL {
if match.player.GetAvailableMoney(match.CurrencyType()) < match.currentXxxMoney {
break
}
match.currentXxxLevel = i
match.updateMatchStatus()
timer := time.After(DURATION_PHASE_3_CHOOSE_GO_ON)
var phase3choice string
select {
case <-timer:
phase3choice = ACTION_STOP_PLAYING
case phase3choice = <-match.ChanPhase3:
// receive user action to phase3choice
}
if phase3choice == ACTION_STOP_PLAYING {
break
} else {
match.phase3result = Random1Card(phase3choice, i)
match.isRightPhase3 = false
var cardRank string
if len(match.phase3result) > 0 {
cardRank = string(match.phase3result[0])
}
if cardRank == "A" || cardRank == "2" || cardRank == "3" ||
cardRank == "4" || cardRank == "5" || cardRank == "6" {
if phase3choice == ACTION_SELECT_SMALL {
match.isRightPhase3 = true
}
} else if cardRank == "7" {
} else {
if phase3choice == ACTION_SELECT_BIG {
match.isRightPhase3 = true
}
}
isFirstTry := (match.is1stTryFailed[i] == false)
if match.isRightPhase3 {
match.requiredMoneyToGoOn = 0
match.currentXxxMoney = 2 * match.currentXxxMoney
match.winningMoneyIfStop = match.currentXxxMoney
i += 1
} else { // chọn sai, trừ tiền
match.is1stTryFailed[i] = true
match.winningMoneyIfStop = match.currentXxxMoney
if i == 0 && isFirstTry {
// match.requiredMoneyToGoOn = int64(float64(match.currentXxxMoney) * (2.0 / 3.0))
match.requiredMoneyToGoOn = match.currentXxxMoney
} else {
match.requiredMoneyToGoOn = match.currentXxxMoney
}
if match.requiredMoneyToGoOn > 0 {
match.player.ChangeMoneyAndLog(
-match.requiredMoneyToGoOn, match.CurrencyType(), false, "",
phase3choice, match.GameCode(), match.matchId)
//
match.playerResult.SumLostMoney -= match.requiredMoneyToGoOn
// add half money to jackpot
jackpotObj.AddMoney(match.requiredMoneyToGoOn / 52)
}
}
}
} // end loop x2 game
match.currentXxxLevel = i
match.updateMatchStatus()
time.Sleep(200 * time.Millisecond)
if i == MAX_XXX_LEVEL {
amount := int64(float64(jackpotObj.Value()) * 0.05)
match.winningMoneyIfStop += amount
jackpotObj.AddMoney(-amount)
jackpotObj.NotifySomeoneHitJackpot(
match.GameCode(),
amount,
match.player.Id(),
match.player.Name(),
)
}
}
}
// _________________________________________________________________________
// end the match
// _________________________________________________________________________
action := Action{
actionName: ACTION_FINISH_SESSION,
chanResponse: make(chan *ActionResponse),
}
match.ChanActionReceiver <- &action
<-action.chanResponse
match.phase = PHASE_4_RESULT
match.playerResult.SumWonMoney = match.winningMoneyIfStop
match.updateMatchStatus()
if match.playerResult.SumWonMoney > 0 {
match.player.ChangeMoneyAndLog(
match.playerResult.SumWonMoney, match.CurrencyType(), false, "",
ACTION_FINISH_SESSION, match.game.GameCode(), match.matchId)
}
if match.playerResult.SumWonMoney >= zmisc.GLOBAL_TEXT_LOWER_BOUND {
zmisc.InsertNewGlobalText(map[string]interface{}{
"type": zmisc.GLOBAL_TEXT_TYPE_BIG_WIN,
"username": match.player.DisplayName(),
"wonMoney": match.playerResult.SumWonMoney,
"gamecode": match.GameCode(),
})
}
// cập nhật lịch sửa 10 ván chơi gần nhất
match.game.mutex.Lock()
if _, isIn := match.game.mapPlayerIdToHistory[match.player.Id()]; !isIn {
temp := cardgame.NewSizedList(10)
match.game.mapPlayerIdToHistory[match.player.Id()] = &temp
}
match.game.mapPlayerIdToHistory[match.player.Id()].Append(
match.playerResult.String())
match.game.mutex.Unlock()
// cập nhật danh sách thắng lớn
if match.playerResult.SumWonMoney >= 10*match.moneyPerLine {
match.game.mutex.Lock()
match.game.bigWinList.Append(match.playerResult.String())
match.game.mutex.Unlock()
}
// LogMatchRecord2
var humanWon, humanLost, botWon, botLost int64
humanWon = match.playerResult.SumWonMoney
humanLost = -match.playerResult.SumLostMoney
if humanWon > humanLost {
rank.ChangeKey(rank.RANK_NUMBER_OF_WINS, match.playerResult.Id, 1)
}
playerIpAdds := map[int64]string{}
playerObj := match.player
playerIpAdds[playerObj.Id()] = playerObj.IpAddress()
playerResults := make([]map[string]interface{}, 0)
r1p := match.playerResult
playerResults = append(playerResults, r1p.ToMap())
record.LogMatchRecord2(
match.game.GameCode(), match.game.CurrencyType(), match.moneyPerLine, 0,
humanWon, humanLost, botWon, botLost,
match.matchId, playerIpAdds,
playerResults)
}
//
func (match *SlotxxxMatch) GameCode() string {
return match.game.GameCode()
}
func (match *SlotxxxMatch) CurrencyType() string {
return match.game.CurrencyType()
}
// json obj represent general match info
func (match *SlotxxxMatch) SerializedData() map[string]interface{} {
data := match.playerResult.Serialize()
data["phase"] = match.phase
data["currentXxxLevel"] = match.currentXxxLevel
data["currentXxxMoney"] = match.currentXxxMoney
data["is1stTryFailed"] = match.is1stTryFailed
data["phase3result"] = match.phase3result
data["requiredMoneyToGoOn"] = match.requiredMoneyToGoOn
return data
}
func (match *SlotxxxMatch) updateMatchStatus() {
data := match.SerializedData()
match.game.SendDataToPlayerId(
"SlotxxxUpdateMatchStatus",
data,
match.player.Id(),
)
}
func InMatchLoopReceiveActions(match *SlotxxxMatch) {
for {
action := <-match.ChanActionReceiver
if action.actionName == ACTION_FINISH_SESSION {
action.chanResponse <- &ActionResponse{err: nil} | break
} else {
go func(match *SlotxxxMatch, action *Action) { | random_line_split |
|
match.go | "sumWonMoney": result1p.SumWonMoney,
"sumLostMoney": result1p.SumLostMoney,
"mapPaylineIndexToWonMoney": result1p.MapPaylineIndexToWonMoney,
"mapPaylineIndexToIsWin": result1p.MapPaylineIndexToIsWin,
"matchWonType": result1p.MatchWonType,
"changedMoney": result1p.ChangedMoney,
}
return result
}
// for table match_record
func (result1p *ResultOnePlayer) ToMap() map[string]interface{} {
result := map[string]interface{}{
"id": result1p.Id,
"username": result1p.Username,
"change": result1p.ChangedMoney,
}
return result
}
func (result1p *ResultOnePlayer) String() string {
bytes, _ := json.Marshal(result1p.Serialize())
return string(bytes)
}
type SlotxxxMatch struct {
game *SlotxxxGame
player *player.Player
startedTime time.Time
matchId string
tax int64
moneyPerLine int64
payLineIndexs []int
slotxxxResult [][]string
// from 0 to 6
currentXxxLevel int
// money to bet at current level,
// if choose the right phase3 and stop, the user receives x2 this amount
currentXxxMoney int64
requiredMoneyToGoOn int64
winningMoneyIfStop int64
// choose the right small or big
isRightPhase3 bool
// đoán một lần là đúng nhỏ hay lớn,
// cả 7 lần đoán một phát là trúng thì được thưởng jackpot
is1stTryFailed []bool // đoán một lần là đúng nhỏ hay lớn,
playerResult *ResultOnePlayer
phase string
// 1 card, A <= card.rank <= 6 is small, 8 <= card.rank <= K is big
phase3result string
// choose stop, small or big
ChanPhase3 chan string
ChanActionReceiver chan *Action
mutex sync.RWMutex
}
type Action struct {
actionName string
playerId int64
data map[string]interface{}
chanResponse chan *ActionResponse
}
type ActionResponse struct {
err error
data map[string]interface{}
}
func NewSlotxxxMatch(
slotxxxG *SlotxxxGame, createdPlayer *player.Player, matchCounter int64,
moneyPerLine int64, payLineIndexs []int,
) *SlotxxxMatch {
match := &SlotxxxMatch{
game: slotxxxG,
player: createdPlayer,
startedTime: time.Now(),
matchId: fmt.Sprintf("%v_%v_%v", slotxxxG.GameCode(), matchCounter, time.Now().Unix()),
playerResult: &ResultOnePlayer{},
moneyPerLine: moneyPerLine,
payLineIndexs: payLineIndexs,
is1stTryFailed: make([]bool, MAX_XXX_LEVEL),
phase: "PHASE_0_INITING",
ChanPhase3: make(chan string),
ChanActionReceiver: make(chan *Action),
}
// init vars code in match here
match.playerResult.Id = match.player.Id()
match.playerResult.Username = match.player.Name()
match.playerResult.MatchId = match.matchId
match.playerResult.StartedTime = match.startedTime
match.playerResult.MoneyPerLine = match.moneyPerLine
match.playerResult.SumLostMoney = -match.moneyPerLine
//
go Start(match)
go InMatchLoopReceiveActions(match)
return match
}
// match main flow
func Start(match *SlotxxxMatch) {
defer func() {
if r := recover(); r != nil {
bytes := debug.Stack()
fmt.Println("ERROR ERROR ERROR: ", r, string(bytes))
}
}()
defer func() {
match.game.mutex.Lock()
delete(match.game.mapPlayerIdToMatch, match.player.Id())
match.game.mutex.Unlock()
}()
// _________________________________________________________________________
// _________________________________________________________________________
match.mutex.Lock()
match.phase = PHASE_1_SPIN
match.slotxxxResult = RandomSpin()
// test hit jackpot
// goodHands := [][][]string{
// [][]string{[]string{"Ac"}, []string{"As"}, []string{"Ah"}},
// [][]string{[]string{"Ad"}, []string{"Ac"}, []string{"As"}},
// [][]string{[]string{"Ah"}, []string{"Ad"}, []string{"Ac"}},
// [][]string{[]string{"8d"}, []string{"7d"}, []string{"9d"}},
// [][]string{[]string{"4h"}, []string{"6h"}, []string{"5h"}},
// [][]string{[]string{"3s"}, []string{"As"}, []string{"2s"}},
// [][]string{[]string{"8s"}, []string{"8c"}, []string{"8d"}},
// [][]string{[]string{"6s"}, []string{"6c"}, []string{"6d"}},
// [][]string{[]string{"9s"}, []string{"9c"}, []string{"9d"}},
// [][]string{[]string{"Ad"}, []string{"4c"}, []string{"5d"}},
// [][]string{[]string{"As"}, []string{"Ad"}, []string{"8c"}},
// [][]string{[]string{"7h"}, []string{"2c"}, []string{"Ad"}},
// }
// if rand.Intn(1) < 1 {
// match.slotxxxResult = goodHands[rand.Intn(len(goodHands))]
// }
//
match.playerResult.SlotxxxResult = match.slotxxxResult
match.playerResult.MapPaylineIndexToWonMoney, match.playerResult.MapPaylineIndexToIsWin, match.playerResult.MatchWonType = CalcWonMoneys(
match.slotxxxResult, match.payLineIndexs, match.moneyPerLine)
sumMoneyAfterSpin := CalcSumPay(match.playerResult.MapPaylineIndexToWonMoney)
match.mutex.Unlock()
match.updateMatchStatus()
time.Sleep(DURATION_PHASE_1_SPIN)
// add % to jackpot
var jackpotObj *jackpot.Jackpot
if match.moneyPerLine == MONEYS_PER_LINE[1] {
jackpotObj = match.game.jackpot100
} else if match.moneyPerLine == MONEYS_PER_LINE[2] {
jackpotObj = match.game.jackpot1000
} el | E[3] {
jackpotObj = match.game.jackpot10000
} else {
}
if jackpotObj != nil {
temp := match.moneyPerLine * int64(len(match.payLineIndexs))
temp = int64(0.025 * float64(temp)) // repay to users 95%
jackpotObj.AddMoney(temp)
if match.playerResult.MatchWonType == MATCH_WON_TYPE_JACKPOT {
amount := int64(float64(jackpotObj.Value()) * 0.5)
match.winningMoneyIfStop = amount
jackpotObj.AddMoney(-amount)
jackpotObj.NotifySomeoneHitJackpot(
match.GameCode(),
amount,
match.player.Id(),
match.player.Name(),
)
} else if sumMoneyAfterSpin > 0 {
match.winningMoneyIfStop = sumMoneyAfterSpin
match.currentXxxMoney = sumMoneyAfterSpin
match.requiredMoneyToGoOn = 0
// loop x2 game, i is level counter
i := 0
match.phase = PHASE_3_CHOOSE_GO_ON
for i < MAX_XXX_LEVEL {
if match.player.GetAvailableMoney(match.CurrencyType()) < match.currentXxxMoney {
break
}
match.currentXxxLevel = i
match.updateMatchStatus()
timer := time.After(DURATION_PHASE_3_CHOOSE_GO_ON)
var phase3choice string
select {
case <-timer:
phase3choice = ACTION_STOP_PLAYING
case phase3choice = <-match.ChanPhase3:
// receive user action to phase3choice
}
if phase3choice == ACTION_STOP_PLAYING {
break
} else {
match.phase3result = Random1Card(phase3choice, i)
match.isRightPhase3 = false
var cardRank string
if len(match.phase3result) > 0 {
cardRank = string(match.phase3result[0])
}
if cardRank == "A" || cardRank == "2" || cardRank == "3" ||
cardRank == "4" || cardRank == "5" || cardRank == "6" {
if phase3choice == ACTION_SELECT_SMALL {
match.isRightPhase3 = true
}
} else if cardRank == "7" {
} else {
if phase3choice == ACTION_SELECT_BIG {
match.isRightPhase3 = true
}
}
isFirstTry := (match.is1stTryFailed[i] == false)
| se if match.moneyPerLine == MONEYS_PER_LIN | conditional_block |
match.go | "}, []string{"Ac"}},
// [][]string{[]string{"8d"}, []string{"7d"}, []string{"9d"}},
// [][]string{[]string{"4h"}, []string{"6h"}, []string{"5h"}},
// [][]string{[]string{"3s"}, []string{"As"}, []string{"2s"}},
// [][]string{[]string{"8s"}, []string{"8c"}, []string{"8d"}},
// [][]string{[]string{"6s"}, []string{"6c"}, []string{"6d"}},
// [][]string{[]string{"9s"}, []string{"9c"}, []string{"9d"}},
// [][]string{[]string{"Ad"}, []string{"4c"}, []string{"5d"}},
// [][]string{[]string{"As"}, []string{"Ad"}, []string{"8c"}},
// [][]string{[]string{"7h"}, []string{"2c"}, []string{"Ad"}},
// }
// if rand.Intn(1) < 1 {
// match.slotxxxResult = goodHands[rand.Intn(len(goodHands))]
// }
//
match.playerResult.SlotxxxResult = match.slotxxxResult
match.playerResult.MapPaylineIndexToWonMoney, match.playerResult.MapPaylineIndexToIsWin, match.playerResult.MatchWonType = CalcWonMoneys(
match.slotxxxResult, match.payLineIndexs, match.moneyPerLine)
sumMoneyAfterSpin := CalcSumPay(match.playerResult.MapPaylineIndexToWonMoney)
match.mutex.Unlock()
match.updateMatchStatus()
time.Sleep(DURATION_PHASE_1_SPIN)
// add % to jackpot
var jackpotObj *jackpot.Jackpot
if match.moneyPerLine == MONEYS_PER_LINE[1] {
jackpotObj = match.game.jackpot100
} else if match.moneyPerLine == MONEYS_PER_LINE[2] {
jackpotObj = match.game.jackpot1000
} else if match.moneyPerLine == MONEYS_PER_LINE[3] {
jackpotObj = match.game.jackpot10000
} else {
}
if jackpotObj != nil {
temp := match.moneyPerLine * int64(len(match.payLineIndexs))
temp = int64(0.025 * float64(temp)) // repay to users 95%
jackpotObj.AddMoney(temp)
if match.playerResult.MatchWonType == MATCH_WON_TYPE_JACKPOT {
amount := int64(float64(jackpotObj.Value()) * 0.5)
match.winningMoneyIfStop = amount
jackpotObj.AddMoney(-amount)
jackpotObj.NotifySomeoneHitJackpot(
match.GameCode(),
amount,
match.player.Id(),
match.player.Name(),
)
} else if sumMoneyAfterSpin > 0 {
match.winningMoneyIfStop = sumMoneyAfterSpin
match.currentXxxMoney = sumMoneyAfterSpin
match.requiredMoneyToGoOn = 0
// loop x2 game, i is level counter
i := 0
match.phase = PHASE_3_CHOOSE_GO_ON
for i < MAX_XXX_LEVEL {
if match.player.GetAvailableMoney(match.CurrencyType()) < match.currentXxxMoney {
break
}
match.currentXxxLevel = i
match.updateMatchStatus()
timer := time.After(DURATION_PHASE_3_CHOOSE_GO_ON)
var phase3choice string
select {
case <-timer:
phase3choice = ACTION_STOP_PLAYING
case phase3choice = <-match.ChanPhase3:
// receive user action to phase3choice
}
if phase3choice == ACTION_STOP_PLAYING {
break
} else {
match.phase3result = Random1Card(phase3choice, i)
match.isRightPhase3 = false
var cardRank string
if len(match.phase3result) > 0 {
cardRank = string(match.phase3result[0])
}
if cardRank == "A" || cardRank == "2" || cardRank == "3" ||
cardRank == "4" || cardRank == "5" || cardRank == "6" {
if phase3choice == ACTION_SELECT_SMALL {
match.isRightPhase3 = true
}
} else if cardRank == "7" {
} else {
if phase3choice == ACTION_SELECT_BIG {
match.isRightPhase3 = true
}
}
isFirstTry := (match.is1stTryFailed[i] == false)
if match.isRightPhase3 {
match.requiredMoneyToGoOn = 0
match.currentXxxMoney = 2 * match.currentXxxMoney
match.winningMoneyIfStop = match.currentXxxMoney
i += 1
} else { // chọn sai, trừ tiền
match.is1stTryFailed[i] = true
match.winningMoneyIfStop = match.currentXxxMoney
if i == 0 && isFirstTry {
// match.requiredMoneyToGoOn = int64(float64(match.currentXxxMoney) * (2.0 / 3.0))
match.requiredMoneyToGoOn = match.currentXxxMoney
} else {
match.requiredMoneyToGoOn = match.currentXxxMoney
}
if match.requiredMoneyToGoOn > 0 {
match.player.ChangeMoneyAndLog(
-match.requiredMoneyToGoOn, match.CurrencyType(), false, "",
phase3choice, match.GameCode(), match.matchId)
//
match.playerResult.SumLostMoney -= match.requiredMoneyToGoOn
// add half money to jackpot
jackpotObj.AddMoney(match.requiredMoneyToGoOn / 52)
}
}
}
} // end loop x2 game
match.currentXxxLevel = i
match.updateMatchStatus()
time.Sleep(200 * time.Millisecond)
if i == MAX_XXX_LEVEL {
amount := int64(float64(jackpotObj.Value()) * 0.05)
match.winningMoneyIfStop += amount
jackpotObj.AddMoney(-amount)
jackpotObj.NotifySomeoneHitJackpot(
match.GameCode(),
amount,
match.player.Id(),
match.player.Name(),
)
}
}
}
// _________________________________________________________________________
// end the match
// _________________________________________________________________________
action := Action{
actionName: ACTION_FINISH_SESSION,
chanResponse: make(chan *ActionResponse),
}
match.ChanActionReceiver <- &action
<-action.chanResponse
match.phase = PHASE_4_RESULT
match.playerResult.SumWonMoney = match.winningMoneyIfStop
match.updateMatchStatus()
if match.playerResult.SumWonMoney > 0 {
match.player.ChangeMoneyAndLog(
match.playerResult.SumWonMoney, match.CurrencyType(), false, "",
ACTION_FINISH_SESSION, match.game.GameCode(), match.matchId)
}
if match.playerResult.SumWonMoney >= zmisc.GLOBAL_TEXT_LOWER_BOUND {
zmisc.InsertNewGlobalText(map[string]interface{}{
"type": zmisc.GLOBAL_TEXT_TYPE_BIG_WIN,
"username": match.player.DisplayName(),
"wonMoney": match.playerResult.SumWonMoney,
"gamecode": match.GameCode(),
})
}
// cập nhật lịch sửa 10 ván chơi gần nhất
match.game.mutex.Lock()
if _, isIn := match.game.mapPlayerIdToHistory[match.player.Id()]; !isIn {
temp := cardgame.NewSizedList(10)
match.game.mapPlayerIdToHistory[match.player.Id()] = &temp
}
match.game.mapPlayerIdToHistory[match.player.Id()].Append(
match.playerResult.String())
match.game.mutex.Unlock()
// cập nhật danh sách thắng lớn
if match.playerResult.SumWonMoney >= 10*match.moneyPerLine {
match.game.mutex.Lock()
match.game.bigWinList.Append(match.playerResult.String())
match.game.mutex.Unlock()
}
// LogMatchRecord2
var humanWon, humanLost, botWon, botLost int64
humanWon = match.playerResult.SumWonMoney
humanLost = -match.playerResult.SumLostMoney
if humanWon > humanLost {
rank.ChangeKey(rank.RANK_NUMBER_OF_WINS, match.playerResult.Id, 1)
}
playerIpAdds := map[int64]string{}
playerObj := match.player
playerIpAdds[playerObj.Id()] = playerObj.IpAddress()
playerResults := make([]map[string]interface{}, 0)
r1p := match.playerResult
playerResults = append(playerResults, r1p.ToMap())
record.LogMatchRecord2(
match.game.GameCode(), match.game.CurrencyType(), match.moneyPerLine, 0,
humanWon, humanLost, botWon, botLost,
match.matchId, playerIpAdds,
playerResults)
}
//
func (match *SlotxxxMatch) GameCode() string {
return match.game.GameCode()
}
func (match *SlotxxxMatch) CurrencyType | () string {
return match.game.Cu | identifier_body |
|
match.go | "sumWonMoney": result1p.SumWonMoney,
"sumLostMoney": result1p.SumLostMoney,
"mapPaylineIndexToWonMoney": result1p.MapPaylineIndexToWonMoney,
"mapPaylineIndexToIsWin": result1p.MapPaylineIndexToIsWin,
"matchWonType": result1p.MatchWonType,
"changedMoney": result1p.ChangedMoney,
}
return result
}
// for table match_record
func (result1p *ResultOnePlayer) ToMap() map[string]interface{} {
result := map[string]interface{}{
"id": result1p.Id,
"username": result1p.Username,
"change": result1p.ChangedMoney,
}
return result
}
func (result1p *ResultOnePlayer) String() string {
bytes, _ := json.Marshal(result1p.Serialize())
return string(bytes)
}
type SlotxxxMatch struct {
game *SlotxxxGame
player *player.Player
startedTime time.Time
matchId string
tax int64
moneyPerLine int64
payLineIndexs []int
slotxxxResult [][]string
// from 0 to 6
currentXxxLevel int
// money to bet at current level,
// if choose the right phase3 and stop, the user receives x2 this amount
currentXxxMoney int64
requiredMoneyToGoOn int64
winningMoneyIfStop int64
// choose the right small or big
isRightPhase3 bool
// đoán một lần là đúng nhỏ hay lớn,
// cả 7 lần đoán một phát là trúng thì được thưởng jackpot
is1stTryFailed []bool // đoán một lần là đúng nhỏ hay lớn,
playerResult *ResultOnePlayer
phase string
// 1 card, A <= card.rank <= 6 is small, 8 <= card.rank <= K is big
phase3result string
// choose stop, small or big
ChanPhase3 chan string
ChanActionReceiver chan *Action
mutex sync.RWMutex
}
type Action struct {
actionName string
playerId int64
data map[string]interface{}
chanResponse chan *ActionResponse
}
type ActionResponse struct {
err error
data map[string]interface{}
}
func NewSlotxxxMatch(
slotxxxG *SlotxxxGame, createdPlayer *player.Player, matchCounter int64,
moneyPerLine int64, payLineIndexs []int,
) *SlotxxxMatch {
match := &SlotxxxMatch{
game: slotxxxG,
player: createdPlayer,
startedTime: time.Now(),
matchId: fmt.Sprintf("%v_%v_%v", slotxxxG.GameCode(), matchCounter, time.Now().Unix()),
playerResult: &ResultOnePlayer{},
moneyPerLine: moneyPerLine,
payLineIndexs: payLineIndexs,
is1stTryFailed: make([]bool, MAX_XXX_LEVEL),
phase: "PHASE_0_INITING",
ChanPhase3: make(chan string),
ChanActionReceiver: make(chan *Action),
}
// init vars code in match here
match.playerResult.Id = match.player.Id()
match.playerResult.Username = match.player.Name()
match.playerResult.MatchId = match.matchId
match.playerResult.StartedTime = match.startedTime
match.playerResult.MoneyPerLine = match.moneyPerLine
match.playerResult.SumLostMoney = -match.moneyPerLine
//
go Start(match)
go InMatchLoopReceiveActions(match)
return match
}
// match main flow
func Start(match *SlotxxxMatch) {
defer func() {
| r := recover(); r != nil {
bytes := debug.Stack()
fmt.Println("ERROR ERROR ERROR: ", r, string(bytes))
}
}()
defer func() {
match.game.mutex.Lock()
delete(match.game.mapPlayerIdToMatch, match.player.Id())
match.game.mutex.Unlock()
}()
// _________________________________________________________________________
// _________________________________________________________________________
match.mutex.Lock()
match.phase = PHASE_1_SPIN
match.slotxxxResult = RandomSpin()
// test hit jackpot
// goodHands := [][][]string{
// [][]string{[]string{"Ac"}, []string{"As"}, []string{"Ah"}},
// [][]string{[]string{"Ad"}, []string{"Ac"}, []string{"As"}},
// [][]string{[]string{"Ah"}, []string{"Ad"}, []string{"Ac"}},
// [][]string{[]string{"8d"}, []string{"7d"}, []string{"9d"}},
// [][]string{[]string{"4h"}, []string{"6h"}, []string{"5h"}},
// [][]string{[]string{"3s"}, []string{"As"}, []string{"2s"}},
// [][]string{[]string{"8s"}, []string{"8c"}, []string{"8d"}},
// [][]string{[]string{"6s"}, []string{"6c"}, []string{"6d"}},
// [][]string{[]string{"9s"}, []string{"9c"}, []string{"9d"}},
// [][]string{[]string{"Ad"}, []string{"4c"}, []string{"5d"}},
// [][]string{[]string{"As"}, []string{"Ad"}, []string{"8c"}},
// [][]string{[]string{"7h"}, []string{"2c"}, []string{"Ad"}},
// }
// if rand.Intn(1) < 1 {
// match.slotxxxResult = goodHands[rand.Intn(len(goodHands))]
// }
//
match.playerResult.SlotxxxResult = match.slotxxxResult
match.playerResult.MapPaylineIndexToWonMoney, match.playerResult.MapPaylineIndexToIsWin, match.playerResult.MatchWonType = CalcWonMoneys(
match.slotxxxResult, match.payLineIndexs, match.moneyPerLine)
sumMoneyAfterSpin := CalcSumPay(match.playerResult.MapPaylineIndexToWonMoney)
match.mutex.Unlock()
match.updateMatchStatus()
time.Sleep(DURATION_PHASE_1_SPIN)
// add % to jackpot
var jackpotObj *jackpot.Jackpot
if match.moneyPerLine == MONEYS_PER_LINE[1] {
jackpotObj = match.game.jackpot100
} else if match.moneyPerLine == MONEYS_PER_LINE[2] {
jackpotObj = match.game.jackpot1000
} else if match.moneyPerLine == MONEYS_PER_LINE[3] {
jackpotObj = match.game.jackpot10000
} else {
}
if jackpotObj != nil {
temp := match.moneyPerLine * int64(len(match.payLineIndexs))
temp = int64(0.025 * float64(temp)) // repay to users 95%
jackpotObj.AddMoney(temp)
if match.playerResult.MatchWonType == MATCH_WON_TYPE_JACKPOT {
amount := int64(float64(jackpotObj.Value()) * 0.5)
match.winningMoneyIfStop = amount
jackpotObj.AddMoney(-amount)
jackpotObj.NotifySomeoneHitJackpot(
match.GameCode(),
amount,
match.player.Id(),
match.player.Name(),
)
} else if sumMoneyAfterSpin > 0 {
match.winningMoneyIfStop = sumMoneyAfterSpin
match.currentXxxMoney = sumMoneyAfterSpin
match.requiredMoneyToGoOn = 0
// loop x2 game, i is level counter
i := 0
match.phase = PHASE_3_CHOOSE_GO_ON
for i < MAX_XXX_LEVEL {
if match.player.GetAvailableMoney(match.CurrencyType()) < match.currentXxxMoney {
break
}
match.currentXxxLevel = i
match.updateMatchStatus()
timer := time.After(DURATION_PHASE_3_CHOOSE_GO_ON)
var phase3choice string
select {
case <-timer:
phase3choice = ACTION_STOP_PLAYING
case phase3choice = <-match.ChanPhase3:
// receive user action to phase3choice
}
if phase3choice == ACTION_STOP_PLAYING {
break
} else {
match.phase3result = Random1Card(phase3choice, i)
match.isRightPhase3 = false
var cardRank string
if len(match.phase3result) > 0 {
cardRank = string(match.phase3result[0])
}
if cardRank == "A" || cardRank == "2" || cardRank == "3" ||
cardRank == "4" || cardRank == "5" || cardRank == "6" {
if phase3choice == ACTION_SELECT_SMALL {
match.isRightPhase3 = true
}
} else if cardRank == "7" {
} else {
if phase3choice == ACTION_SELECT_BIG {
match.isRightPhase3 = true
}
}
isFirstTry := (match.is1stTryFailed[i] == false)
if match | if | identifier_name |
upgrading.go |
func (this *UpgradingController) Get() {
//导出
isExport, _ := this.GetInt("isExport", 0)
if isExport == 1 {
this.Export()
return
}
beego.Informational("query upgrading")
gId, _ := this.GetInt64("gameId", 0)
account := strings.TrimSpace(this.GetString("account"))
totalamount1 := strings.TrimSpace(this.GetString("totalamount"))
currentgift := strings.TrimSpace(this.GetString("CurrentGift"))
//总榜的信息是所有周榜之和,获取总榜信息后,查询表如果不存在或者不相同就插入
page, err := this.GetInt("p")
if err != nil {
page = 1
}
limit, _ := beego.AppConfig.Int("pagelimit")
list, total := new(Upgrading).Paginate(page, limit, gId, account, totalamount1, currentgift)
pagination.SetPaginator(this.Ctx, limit, total)
this.Data["condArr"] = map[string]interface{}{
"account": account,
"totalamount": totalamount1,
"currentgift": currentgift}
this.Data["gameList"] = common.GetGames("upgrading")
this.Data["dataList"] = list
this.TplName = "gamedetail/upgrading/upgrading/index.tpl"
}
func (this *UpgradingController) Import() {
var code int
var msg string
defer sysmanage.Retjson(this.Ctx, &msg, &code)
f, h, err := this.GetFile("file")
defer f.Close()
if err != nil {
beego.Error("upgradingWeek upload file get file error", err)
msg = "上传失败,请重试(1)"
return
}
fname := url.QueryEscape(h.Filename)
suffix := utils2.SubString(fname, len(fname), strings.LastIndex(fname, ".")-len(fname))
if suffix != ".xlsx" {
msg = "文件必须为 xlsx"
return
}
o := orm.NewOrm()
models := make([]Upgrading, 0)
//xlsx
xlsx, err := excelize.OpenReader(f)
if xlsx.GetSheetIndex("金管家总信息") == 0 {
msg = "不存在<<金管家总信息>>"
return
}
rows := xlsx.GetRows("金管家总信息")
for i, row := range rows {
if i == 0 {
continue
}
if len(row) < 7 {
msg = fmt.Sprintf("%s第%d行活动会员账号、有效投注不能为空<br>", msg, i+1)
continue
}
gameid, err := strconv.ParseInt(strings.TrimSpace(row[0]), 10, 64)
if err != nil {
msg = fmt.Sprintf("%s第%d行有效投注必须为数字<br>", msg, i+1)
continue
}
account := strings.TrimSpace(row[1])
if account == "" {
msg = fmt.Sprintf("%s第%d行会员账号不能为空<br>", msg, i+1)
}
total, err := strconv.ParseInt(strings.TrimSpace(row[2]), 10, 64)
if err != nil {
msg = fmt.Sprintf("%s第%d行有效投注必须为数字<br>", msg, i+1)
continue
}
level, err := strconv.ParseInt(strings.TrimSpace(row[3]), 10, 64)
if err != nil {
msg = fmt.Sprintf("%s第%d行有效投注必须为数字<br>", msg, i+1)
continue
}
leiji, err := strconv.ParseInt(strings.TrimSpace(row[4]), 10, 64)
if err != nil {
msg = fmt.Sprintf("%s第%d行有效投注必须为数字<br>", msg, i+1)
continue
}
/* week, err := strconv.ParseInt(strings.TrimSpace(row[5]), 10, 64)
if err != nil {
msg = fmt.Sprintf("%s第%d行有效投注必须为数字<br>", msg, i+1)
continue
}
month, err := strconv.ParseInt(strings.TrimSpace(row[6]), 10, 64)
if err != nil {
msg = fmt.Sprintf("%s第%d行有效投注必须为数字<br>", msg, i+1)
continue
}*/
cha, err := strconv.ParseInt(strings.TrimSpace(row[7]), 10, 64)
if err != nil {
msg = fmt.Sprintf("%s第%d行有效投注必须为数字<br>", msg, i+1)
continue
}
//获取第几周时间
model := Upgrading{}
model.GameId = gameid
model.Account = account
model.TotalAmount = total
model.Level = level
model.TotalGift = leiji
model.Balance = cha
model.Creator = this.LoginAdminId
model.Modifior = this.LoginAdminId
model.CreateDate = time.Now()
model.ModifyDate = time.Now()
model.Version = 0
models = append(models, model)
}
if msg != "" {
msg = fmt.Sprintf("请处理以下错误后再导入:<br>%s", msg)
return
}
if len(models) == 0 {
msg = "导入表格为空,请确认"
return
}
o.Begin()
var susNums int64
//将数组拆分导入,一次1000条
mlen := len(models)
for i := 0; i <= mlen/1000; i++ {
end := 0
if (i+1)*1000 >= mlen {
end = mlen
} else {
end = (i + 1) * 1000
}
if i*1000 == end {
continue
}
tmpArr := models[i*1000 : end]
if nums, err := o.InsertMulti(len(tmpArr), tmpArr); err != nil {
o.Rollback()
beego.Error("upgradingweek import, insert error", err)
msg = "上传失败,请重试(2)"
return
} else {
susNums += nums
}
}
o.Commit()
code = 1
msg = fmt.Sprintf("成功导入%d条记录", susNums)
}
//计算周俸禄
func (this *UpgradingController) CountWeek() {
var code int
var msg string
defer sysmanage.Retjson(this.Ctx, &msg, &code)
gId, _ := this.GetInt64("gameId", 0)
o := orm.NewOrm()
//获取配置信息
var udcs []UpgradingConfig
_, err := o.QueryTable(new(UpgradingConfig)).Filter("GameId", gId).Filter("WeekAmount__gt", 0).OrderBy("Level").All(&udcs)
if err != nil {
beego.Error("获取配置信息失败", err)
msg = "获取配置信息失败"
return
}
//获取所有总榜信息
var ups []Upgrading
_, err1 := o.QueryTable(new(Upgrading)).Filter("GameId", gId).Filter("Level__gt", 1).Limit(-1).All(&ups)
if err1 != nil {
beego.Error("获取总榜信息失败", err1)
msg = "获取总榜信息失败"
return
}
var sum int64
for _, v := range ups {
for _, j := range udcs {
o.Begin()
if v.Level == j.Level {
if j.WeekAmount == 0 {
break
} else {
_, err := o.QueryTable(new(Upgrading)).Filter("Id", v.Id).Update(orm.Params{"WeekSalary": j.WeekAmount, "ModifyDate": time.Now()})
if err != nil {
beego.Error(v.Account, "更新周俸禄失败", err)
o.Rollback()
msg = "更新周俸禄失败"
return
}
sum += 1
break
}
}
}
}
code = 1
o.Commit()
msg = fmt.Sprintf("已成功更新%d个会员的周俸禄", sum)
}
//计算月俸禄
func (this *UpgradingController) CountMonth() {
var code int
var msg string
defer sysmanage.Retjson(this.Ctx, &msg, &code)
gId, _ := this.GetInt64("gameId", 0)
o := orm.NewOrm()
//获取配置信息
var udcs []UpgradingConfig
_, err := o.QueryTable(new(UpgradingConfig)).Filter("GameId", gId).OrderBy("Level").All(&udcs)
if err != nil {
beego.Error("获取配置信息失败", err)
msg = "获取配置信息失败"
return
}
//获取所有总榜信息
var ups []Upgrading
_, err1 := o.QueryTable(new(Upgrading)).Filter("GameId", gId).Filter("Level__gt", 0).Limit(-1).All(&ups)
if err1 != nil {
beego.Error("获取总榜信息失败", err1)
msg = "获取总 | {
this.EnableXSRF = false
} | identifier_body |
|
upgrading.go | .Retjson(this.Ctx, &msg, &code)
gId, _ := this.GetInt64("gameId", 0)
o := orm.NewOrm()
//删除所有,再插入
upgradingdel := Upgrading{GameId: gId}
_, err0 := o.Delete(&upgradingdel, "GameId")
if err0 != nil {
beego.Error("删除之前的所有记录失败", err0)
msg = "删除之前的所有记录失败"
return
}
models := make([]Upgrading, 0)
//idsDel := make([]int64, 0)
var totals []orm.ParamsList
//var upgrading Upgrading
_, err := o.Raw("SELECT account ,SUM(week_amount) from ph_upgrading_week where game_id = ? group by account", gId).ValuesList(&totals)
if err != nil {
beego.Error("查询总信息错误", err)
}
for _, v := range totals {
totalamount, _ := strconv.ParseInt(v[1].(string), 10, 64)
//var upgradingweeegift UpgradingWeek
//err := o.Raw("select a.* from ph_upgrading_week a where period = (select max(period) from ph_upgrading_week ) and account = ? ", v[0].(string)).QueryRow(&upgradingweeegift)
model := Upgrading{}
/*if err != nil {
model.CurrentGift = 0
} else {
model.CurrentGift = upgradingweeegift.RiseAmount
}*/
total1 := GetDetail(totalamount, gId)
//计算和下一级的差额
next := GetNextAmount(total1.Level)
balance := next - totalamount
model.Account = v[0].(string)
model.TotalAmount = totalamount
model.Level = total1.Level
model.TotalGift = GetTotalGift(total1.LevelGift, gId)
model.WeekSalary = total1.WeekAmount
if balance < 0 {
balance = 0
}
model.Balance = balance
model.GameId = gId
model.MonthSalary = total1.MonthAmount
model.Creator = this.LoginAdminId
model.Modifior = this.LoginAdminId
model.CreateDate = time.Now()
model.ModifyDate = time.Now()
model.Version = 0
models = append(models, model)
//}
}
o.Begin()
var susNums int64
//将数组拆分导入,一次1000条
mlen := len(models)
if mlen > 0 {
for i := 0; i <= mlen/1000; i++ {
end := 0
if (i+1)*1000 >= mlen {
end = mlen
} else {
end = (i + 1) * 1000
}
if i*1000 == end {
continue
}
tmpArr := models[i*1000 : end]
if nums, err := o.InsertMulti(len(tmpArr), tmpArr); err != nil {
o.Rollback()
beego.Error("upgrading insert error", err)
return
} else {
susNums += nums
}
}
}
o.Commit()
code = 1
msg = "生成成功"
}
func (this *UpgradingController) Delbatch() {
beego.Informational("Delete batch")
var code int
var msg string
defer sysmanage.Retjson(this.Ctx, &msg, &code)
gId, _ := this.GetInt64("gameId", 0)
o := orm.NewOrm()
model := Upgrading{GameId: gId}
if num, err := o.Delete(&model, "GameId"); err != nil {
beego.Error("Delete batch upgrading error", err)
msg = "删除失败"
} else {
code = 1
msg = fmt.Sprintf("成功删除%d条记录", num)
}
}
func (this *UpgradingController) Export() {
beego.Informational("export upgrading")
gId, _ := this.GetInt64("gameId", 0)
account := strings.TrimSpace(this.GetString("account"))
totalamount1 := strings.TrimSpace(this.GetString("totalamount"))
currentgift := strings.TrimSpace(this.GetString("CurrentGift"))
page := 1
limit := 1000
list, total := new(Upgrading).Paginate(page, limit, gId, account, totalamount1, currentgift)
totalInt := int(total)
if totalInt > limit {
//全部转换为float64进行计算
page1 := (float64(totalInt) - float64(limit)) / float64(limit)
//用此函数如果有余数,就向上加一
page2 := int(math.Ceil(page1))
for page = 2; page <= (page2 + 1); page++ {
list1, _ := new(Upgrading).Paginate(page, limit, gId, account, totalamount1, currentgift)
for _, v := range list1 {
list = append(list, v)
}
}
}
gameName := utils.GetGameName(gId)
xlsx := excelize.NewFile()
xlsx.SetCellValue("Sheet1", "A1", "活动名称")
xlsx.SetCellValue("Sheet1", "B1", "会员账号")
xlsx.SetCellValue("Sheet1", "C1", "总投注额")
xlsx.SetCellValue("Sheet1", "D1", "当前等级")
xlsx.SetCellValue("Sheet1", "E1", "累计晋级彩金")
xlsx.SetCellValue("Sheet1", "F1", "周俸禄")
xlsx.SetCellValue("Sheet1", "G1", "月俸禄")
xlsx.SetCellValue("Sheet1", "H1", "距离晋级需投注金额")
for i, value := range list {
xlsx.SetCellValue("Sheet1", fmt.Sprintf("A%d", i+2), gameName)
xlsx.SetCellValue("Sheet1", fmt.Sprintf("B%d", i+2), value.Account)
xlsx.SetCellValue("Sheet1", fmt.Sprintf("C%d", i+2), value.TotalAmount)
xlsx.SetCellValue("Sheet1", fmt.Sprintf("D%d", i+2), value.Level)
xlsx.SetCellValue("Sheet1", fmt.Sprintf("E%d", i+2), value.TotalGift)
xlsx.SetCellValue("Sheet1", fmt.Sprintf("F%d", i+2), value.WeekSalary)
xlsx.SetCellValue("Sheet1", fmt.Sprintf("G%d", i+2), value.MonthSalary)
xlsx.SetCellValue("Sheet1", fmt.Sprintf("H%d", i+2), value.Balance)
}
fileName := fmt.Sprintf("./tmp/excel/upgradinglist_%s.xlsx", time.Now().Format("20060102150405"))
err := xlsx.SaveAs(fileName)
if err != nil {
beego.Error("Export reward error", err.Error())
} else {
defer os.Remove(fileName)
this.Ctx.Output.Download(fileName)
}
}
type UpgradingEditController struct {
sysmanage.BaseController
}
func (this *UpgradingEditController) Get() {
id, _ := this.GetInt64("id")
o := orm.NewOrm()
upgrading := Upgrading{BaseModel: BaseModel{Id: id}}
err := o.Read(&upgrading)
if err == orm.ErrNoRows || err == orm.ErrMissPK {
this.Redirect(beego.URLFor("UpgradingController.get"), 302)
} else {
this.Data["data"] = upgrading
this.Data["xsrfdata"] = template.HTML(this.XSRFFormHTML())
this.TplName = "gamedetail/upgrading/upgrading/edit.tpl"
}
}
func (this *UpgradingEditController) Post() {
var code int
var msg string
var url = beego.URLFor("UpgradingController.Get")
defer sysmanage.Retjson(this.Ctx, &msg, &code, &url)
upgrading := Upgrading{}
if err := this.ParseForm(&upgrading); err != nil {
msg = "参数异常"
return
}
cols := []string{"TotalAmount", "Level", "TotalGift", "WeekSalary", "MonthSalary", "Balance"}
upgrading.Modifior = this.LoginAdminId
_, err1 := upgrading.Update(cols...)
if err1 != nil {
msg = "更新失败"
beego.Error("更新upgrading失败", err1)
} else {
code = 1
msg = "更新成功"
}
}
//获取累计晋级彩金
func GetTotalGift(amount int64, gid int64) int64 {
var num int64
upgradingconfigs := GetUpgradingConfigs(gid)
for _, v := range upgradingconfigs {
if v.LevelGift < amount {
num += v.LevelGift
}
}
return num + amount
}
//获取下一等级需要的投注额
func GetNextAmount(level int64) (nextamount int64) {
var upgradingconfig UpgradingConfig
o := orm.NewOrm()
o.QueryTable(new(UpgradingConfig)).Filter("Level", level+1).One(&upgradingconfig)
return upgradingconfig.TotalAmount | } | random_line_split |
|
upgrading.go | json(this.Ctx, &msg, &code)
gId, _ := this.GetInt64("gameId", 0)
o := orm.NewOrm()
//删除所有,再插入
upgradingdel := Upgrading{GameId: gId}
_, err0 := o.Delete(&upgradingdel, "GameId")
if err0 != nil {
beego.Error("删除之前的所有记录失败", err0)
msg = "删除之前的所有记录失败"
return
}
models := make([]Upgrading, 0)
//idsDel := make([]int64, 0)
var totals []orm.ParamsList
//var upgrading Upgrading
_, err := o.Raw("SELECT account ,SUM(week_amount) from ph_upgrading_week where game_id = ? group by account", gId).ValuesList(&totals)
if err != nil {
beego.Error("查询总信息错误", err)
}
for _, v := range totals {
totalamount, _ := strconv.ParseInt(v[1].(string), 10, 64)
//var upgradingweeegift UpgradingWeek
//err := o.Raw("select a.* from ph_upgrading_week a where period = (select max(period) from ph_upgrading_week ) and account = ? ", v[0].(string)).QueryRow(&upgradingweeegift)
model := Upgrading{}
/*if err != nil {
model.CurrentGift = 0
} else {
model.CurrentGift = upgradingweeegift.RiseAmount
}*/
total1 := GetDetail(totalamount, gId)
//计算和下一级的差额
next := GetNextAmount(total1.Level)
balance := next - totalamount
model.Account = v[0].(string)
model.TotalAmount = totalamount
model.Level = total1.Level
model.TotalGift = GetTotalGift(total1.LevelGift, gId)
model.WeekSalary = total1.WeekAmount
if balance < 0 {
balance = 0
}
model.Balance = balance
model.GameId = gId
model.MonthSalary = total1.MonthAmount
model.Creator = this.LoginAdminId
model.Modifior = this.LoginAdminId
model.CreateDate = time.Now()
model.ModifyDate = time.Now()
model.Version = 0
models = append(models, model)
//}
}
o.Begin()
var susNums int64
//将数组拆分导入,一次1000条
mlen := len(models)
if mlen > 0 {
for i := 0; i <= mlen/1000; i++ {
end := 0
if (i+1)*1000 >= mlen {
end = mlen
} else {
end = (i + 1) * 1000
}
if i*1000 == end {
continue
}
tmpArr := models[i*1000 : end]
if nums, err := o.InsertMulti(len(tmpArr), tmpArr); err != nil {
o.Rollback()
beego.Error("upgrading insert error", err)
return
} else {
susNums += nums
}
}
}
o.Commit()
code = 1
msg = "生成成功"
}
func (this *UpgradingController) Delbatch() {
beego.Informational("Delete batch")
var code int
var msg string
defer sysmanage.Retjson(this.Ctx, &msg, &code)
gId, _ := this.GetInt64("gameId", 0)
o := orm.NewOrm()
model := Upgrading{GameId: gId}
if num, err := o.Delete(&model, "GameId"); err != nil {
beego.Error("Delete batch upgrading error", err)
msg = "删除失败"
} else {
code = 1
msg = fmt.Sprintf("成功删除%d条记录", num)
}
}
func (this *UpgradingController) Export() {
beego.Informational("export upgrading")
gId, _ := this.GetInt64("gameId", 0)
account := strings.TrimSpace(this.GetString("account"))
totalamount1 := strings.TrimSpace(this.GetString("totalamount"))
currentgift := strings.TrimSpace(this.GetString("CurrentGift"))
page := 1
limit := 1000
list, total := new(Upgrading).Paginate(page, limit, gId, account, totalamount1, currentgift)
totalInt := int(total)
if totalInt > limit {
//全部转换为float64进行计算
page1 := (float64(totalInt) - float64(limit)) / float64(limit)
//用此函数如果有余数,就向上加一
page2 := int(math.Ceil(page1))
for page = 2; page <= (page2 + 1); page++ {
list1, _ := new(Upgrading).Paginate(page, limit, gId, account, totalamount1, currentgift)
for _, v := range list1 {
list = append(list, v)
}
}
}
gameName := utils.GetGameName(gId)
xlsx := excelize.NewFile()
xlsx.SetCellValue("Sheet1", "A1", "活动名称")
xlsx.SetCellValue("Sheet1", "B1", "会员账号")
xlsx.SetCellValue("Sheet1", "C1", "总投注额")
xlsx.SetCellValue("Sheet1", "D1", "当前等级")
xlsx.SetCellValue("Sheet1", "E1", "累计晋级彩金")
xlsx.SetCellValue("Sheet1", "F1", "周俸禄")
xlsx.SetCellValue("Sheet1", "G1", "月俸禄")
xlsx.SetCellValue("Sheet1", "H1", "距离晋级需投注金额")
for i, value := range list {
xlsx.SetCellValue("Sheet1", fmt.Sprintf("A%d", i+2), gameName)
xlsx.SetCellValue("Sheet1", fmt.Sprintf("B%d", i+2), value.Account)
xlsx.SetCellValue("Sheet1", fmt.Sprintf("C%d", i+2), value.TotalAmount)
xlsx.SetCellValue("Sheet1", fmt.Sprintf("D%d", i+2), value.Level)
xlsx.SetCellValue("Sheet1", fmt.Sprintf("E%d", i+2), value.TotalGift)
xlsx.SetCellValue("Sheet1", fmt.Sprintf("F%d", i+2), value.WeekSalary)
xlsx.SetCellValue("Sheet1", fmt.Sprintf("G%d", i+2), value.MonthSalary)
xlsx.SetCellValue("Sheet1", fmt.Sprintf("H%d", i+2), value.Balance)
}
fileName := fmt.Sprintf("./tmp/excel/upgradinglist_%s.xlsx", time.Now().Format("20060102150405"))
err := xlsx.SaveAs(fileName)
if err != nil {
beego.Error("Export reward error", err.Error())
} else {
defer os.Remove(fileName)
this.Ctx.Output.Download(fileName)
}
}
type UpgradingEditController struct {
sysmanage.BaseController
}
func (this *UpgradingEditController) Get() {
id, _ := this.GetInt64("id")
o := orm.NewOrm()
upgrading := Upgrading{BaseModel: BaseModel{Id: id}}
err := o.Read(&upgrading)
if err == orm.ErrNoRows || err == orm.ErrMissPK {
this.Redirect(beego.URLFor("UpgradingController.get"), 302)
} else {
this.Data["data"] = upgrading
this.Data["xsrfdata"] = template.HTML(this.XSRFFormHTML())
this.TplName = "gamedetail/upgrading/upgrading/edit.tpl"
}
}
func (this *UpgradingEditController) Post() {
var code int
var msg string
var url = beego.URLFor("UpgradingController.Get")
defer sysmanage.Retjson(this.Ctx, &msg, &code, &url)
upgrading := Upgrading{}
if err := this.ParseForm(&upgrading); err != nil {
msg = "参数异常"
return
}
cols := []string{"TotalAmount", "Level", "TotalGift", "WeekSalary", "MonthSalary", "Balance"}
upgrading.Modifior = this.LoginAdminId
_, err1 := upgrading.Update(cols...)
if err1 != nil {
msg = "更新失败"
beego.Error("更新upgrading失败", err1)
} else {
code = 1
msg = "更新成功"
}
}
//获取累计晋级彩金
func GetTotalGift(amount int64, gid int64) int64 {
var num int64
upgradingconfigs := GetUpgradingConfigs(gid)
for _, v := range upgradingconfigs {
if v.LevelGift < amount {
num += v.LevelGift
}
}
return num + amount
}
//获取下一等级需要的投注额
func GetNextAmount(level int64) (nextamount int64) {
var upgradingconfig UpgradingConfig
o := orm.NewOrm()
o.QueryTable(new(UpgradingConfig)).Filter("Level", level+1).One(&upgradingconfig)
return upgradingconfig.TotalAmount
}
| identifier_name |
||
upgrading.go | }
gameid, err := strconv.ParseInt(strings.TrimSpace(row[0]), 10, 64)
if err != nil {
msg = fmt.Sprintf("%s第%d行有效投注必须为数字<br>", msg, i+1)
continue
}
account := strings.TrimSpace(row[1])
if account == "" {
msg = fmt.Sprintf("%s第%d行会员账号不能为空<br>", msg, i+1)
}
total, err := strconv.ParseInt(strings.TrimSpace(row[2]), 10, 64)
if err != nil {
msg = fmt.Sprintf("%s第%d行有效投注必须为数字<br>", msg, i+1)
continue
}
level, err := strconv.ParseInt(strings.TrimSpace(row[3]), 10, 64)
if err != nil {
msg = fmt.Sprintf("%s第%d行有效投注必须为数字<br>", msg, i+1)
continue
}
leiji, err := strconv.ParseInt(strings.TrimSpace(row[4]), 10, 64)
if err != nil {
msg = fmt.Sprintf("%s第%d行有效投注必须为数字<br>", msg, i+1)
continue
}
/* week, err := strconv.ParseInt(strings.TrimSpace(row[5]), 10, 64)
if err != nil {
msg = fmt.Sprintf("%s第%d行有效投注必须为数字<br>", msg, i+1)
continue
}
month, err := strconv.ParseInt(strings.TrimSpace(row[6]), 10, 64)
if err != nil {
msg = fmt.Sprintf("%s第%d行有效投注必须为数字<br>", msg, i+1)
continue
}*/
cha, err := strconv.ParseInt(strings.TrimSpace(row[7]), 10, 64)
if err != nil {
msg = fmt.Sprintf("%s第%d行有效投注必须为数字<br>", msg, i+1)
continue
}
//获取第几周时间
model := Upgrading{}
model.GameId = gameid
model.Account = account
model.TotalAmount = total
model.Level = level
model.TotalGift = leiji
model.Balance = cha
model.Creator = this.LoginAdminId
model.Modifior = this.LoginAdminId
model.CreateDate = time.Now()
model.ModifyDate = time.Now()
model.Version = 0
models = append(models, model)
}
if msg != "" {
msg = fmt.Sprintf("请处理以下错误后再导入:<br>%s", msg)
return
}
if len(models) == 0 {
msg = "导入表格为空,请确认"
return
}
o.Begin()
var susNums int64
//将数组拆分导入,一次1000条
mlen := len(models)
for i := 0; i <= mlen/1000; i++ {
end := 0
if (i+1)*1000 >= mlen {
end = mlen
} else {
end = (i + 1) * 1000
}
if i*1000 == end {
continue
}
tmpArr := models[i*1000 : end]
if nums, err := o.InsertMulti(len(tmpArr), tmpArr); err != nil {
o.Rollback()
beego.Error("upgradingweek import, insert error", err)
msg = "上传失败,请重试(2)"
return
} else {
susNums += nums
}
}
o.Commit()
code = 1
msg = fmt.Sprintf("成功导入%d条记录", susNums)
}
//计算周俸禄
func (this *UpgradingController) CountWeek() {
var code int
var msg string
defer sysmanage.Retjson(this.Ctx, &msg, &code)
gId, _ := this.GetInt64("gameId", 0)
o := orm.NewOrm()
//获取配置信息
var udcs []UpgradingConfig
_, err := o.QueryTable(new(UpgradingConfig)).Filter("GameId", gId).Filter("WeekAmount__gt", 0).OrderBy("Level").All(&udcs)
if err != nil {
beego.Error("获取配置信息失败", err)
msg = "获取配置信息失败"
return
}
//获取所有总榜信息
var ups []Upgrading
_, err1 := o.QueryTable(new(Upgrading)).Filter("GameId", gId).Filter("Level__gt", 1).Limit(-1).All(&ups)
if err1 != nil {
beego.Error("获取总榜信息失败", err1)
msg = "获取总榜信息失败"
return
}
var sum int64
for _, v := range ups {
for _, j := range udcs {
o.Begin()
if v.Level == j.Level {
if j.WeekAmount == 0 {
break
} else {
_, err := o.QueryTable(new(Upgrading)).Filter("Id", v.Id).Update(orm.Params{"WeekSalary": j.WeekAmount, "ModifyDate": time.Now()})
if err != nil {
beego.Error(v.Account, "更新周俸禄失败", err)
o.Rollback()
msg = "更新周俸禄失败"
return
}
sum += 1
break
}
}
}
}
code = 1
o.Commit()
msg = fmt.Sprintf("已成功更新%d个会员的周俸禄", sum)
}
//计算月俸禄
func (this *UpgradingController) CountMonth() {
var code int
var msg string
defer sysmanage.Retjson(this.Ctx, &msg, &code)
gId, _ := this.GetInt64("gameId", 0)
o := orm.NewOrm()
//获取配置信息
var udcs []UpgradingConfig
_, err := o.QueryTable(new(UpgradingConfig)).Filter("GameId", gId).OrderBy("Level").All(&udcs)
if err != nil {
beego.Error("获取配置信息失败", err)
msg = "获取配置信息失败"
return
}
//获取所有总榜信息
var ups []Upgrading
_, err1 := o.QueryTable(new(Upgrading)).Filter("GameId", gId).Filter("Level__gt", 0).Limit(-1).All(&ups)
if err1 != nil {
beego.Error("获取总榜信息失败", err1)
msg = "获取总榜信息失败"
return
}
//统计成功更新多少会员
var sum int64
for _, v := range ups {
for _, j := range udcs {
o.Begin()
if v.Level == j.Level {
if j.MonthAmount == 0 {
break
} else {
_, err := o.QueryTable(new(Upgrading)).Filter("Id", v.Id).Update(orm.Params{"MonthSalary": j.MonthAmount, "ModifyDate": time.Now()})
if err != nil {
beego.Error(v.Acco |
break
}
}
}
}
code = 1
o.Commit()
msg = fmt.Sprintf("已成功更新%d个会员的月俸禄", sum)
}
func (this *UpgradingController) CreateTotal() {
var code int
var msg string
defer sysmanage.Retjson(this.Ctx, &msg, &code)
gId, _ := this.GetInt64("gameId", 0)
o := orm.NewOrm()
//删除所有,再插入
upgradingdel := Upgrading{GameId: gId}
_, err0 := o.Delete(&upgradingdel, "GameId")
if err0 != nil {
beego.Error("删除之前的所有记录失败", err0)
msg = "删除之前的所有记录失败"
return
}
models := make([]Upgrading, 0)
//idsDel := make([]int64, 0)
var totals []orm.ParamsList
//var upgrading Upgrading
_, err := o.Raw("SELECT account ,SUM(week_amount) from ph_upgrading_week where game_id = ? group by account", gId).ValuesList(&totals)
if err != nil {
beego.Error("查询总信息错误", err)
}
for _, v := range totals {
totalamount, _ := strconv.ParseInt(v[1].(string), 10, 64)
//var upgradingweeegift UpgradingWeek
//err := o.Raw("select a.* from ph_upgrading_week a where period = (select max(period) from ph_upgrading_week ) and account = ? ", v[0].(string)).QueryRow(&upgradingweeegift)
model := Upgrading{}
/*if err != nil {
model.CurrentGift = 0
} else {
model.CurrentGift = upgradingweeegift.RiseAmount
}*/
total1 := GetDetail(totalamount, g | unt, "更新月俸禄失败", err)
o.Rollback()
msg = "更新月俸禄失败"
return
}
sum += 1 | conditional_block |
legacy_message.go | then this might be a proto3 empty message
// from before the size cache was added. If there are any fields, check to
// see that at least one of them looks like something we generated.
if t.Elem().Kind() == reflect.Struct {
if nfield := t.Elem().NumField(); nfield > 0 {
hasProtoField := false
for i := 0; i < nfield; i++ {
f := t.Elem().Field(i)
if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") {
hasProtoField = true
break
}
}
if !hasProtoField {
return aberrantLoadMessageDesc(t, name)
}
}
}
md := legacyLoadFileDesc(b).Messages().Get(idxs[0])
for _, i := range idxs[1:] {
md = md.Messages().Get(i)
}
if name != "" && md.FullName() != name {
panic(fmt.Sprintf("mismatching message name: got %v, want %v", md.FullName(), name))
}
if md, ok := legacyMessageDescCache.LoadOrStore(t, md); ok {
return md.(protoreflect.MessageDescriptor)
}
return md
}
var (
aberrantMessageDescLock sync.Mutex
aberrantMessageDescCache map[reflect.Type]protoreflect.MessageDescriptor
)
// aberrantLoadMessageDesc returns an MessageDescriptor derived from the Go type,
// which must not implement protoreflect.ProtoMessage or messageV1.
//
// This is a best-effort derivation of the message descriptor using the protobuf
// tags on the struct fields.
func aberrantLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor {
aberrantMessageDescLock.Lock()
defer aberrantMessageDescLock.Unlock()
if aberrantMessageDescCache == nil {
aberrantMessageDescCache = make(map[reflect.Type]protoreflect.MessageDescriptor)
}
return aberrantLoadMessageDescReentrant(t, name)
}
func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor {
// Fast-path: check if an MessageDescriptor is cached for this concrete type.
if md, ok := aberrantMessageDescCache[t]; ok {
return md
}
// Slow-path: construct a descriptor from the Go struct type (best-effort).
// Cache the MessageDescriptor early on so that we can resolve internal
// cyclic references.
md := &filedesc.Message{L2: new(filedesc.MessageL2)}
md.L0.FullName = aberrantDeriveMessageName(t, name)
md.L0.ParentFile = filedesc.SurrogateProto2
aberrantMessageDescCache[t] = md
if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
return md
}
// Try to determine if the message is using proto3 by checking scalars.
for i := 0; i < t.Elem().NumField(); i++ {
f := t.Elem().Field(i)
if tag := f.Tag.Get("protobuf"); tag != "" {
switch f.Type.Kind() {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
md.L0.ParentFile = filedesc.SurrogateProto3
}
for _, s := range strings.Split(tag, ",") {
if s == "proto3" {
md.L0.ParentFile = filedesc.SurrogateProto3
}
}
}
}
// Obtain a list of oneof wrapper types.
var oneofWrappers []reflect.Type
for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} {
if fn, ok := t.MethodByName(method); ok {
for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
if vs, ok := v.Interface().([]interface{}); ok {
for _, v := range vs {
oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
}
}
}
}
}
// Obtain a list of the extension ranges.
if fn, ok := t.MethodByName("ExtensionRangeArray"); ok {
vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0]
for i := 0; i < vs.Len(); i++ {
v := vs.Index(i)
md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{
protoreflect.FieldNumber(v.FieldByName("Start").Int()),
protoreflect.FieldNumber(v.FieldByName("End").Int() + 1),
})
md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil)
}
}
// Derive the message fields by inspecting the struct fields.
for i := 0; i < t.Elem().NumField(); i++ {
f := t.Elem().Field(i)
if tag := f.Tag.Get("protobuf"); tag != "" {
tagKey := f.Tag.Get("protobuf_key")
tagVal := f.Tag.Get("protobuf_val")
aberrantAppendField(md, f.Type, tag, tagKey, tagVal)
}
if tag := f.Tag.Get("protobuf_oneof"); tag != "" {
n := len(md.L2.Oneofs.List)
md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{})
od := &md.L2.Oneofs.List[n]
od.L0.FullName = md.FullName().Append(protoreflect.Name(tag))
od.L0.ParentFile = md.L0.ParentFile
od.L0.Parent = md
od.L0.Index = n
for _, t := range oneofWrappers {
if t.Implements(f.Type) {
f := t.Elem().Field(0)
if tag := f.Tag.Get("protobuf"); tag != "" {
aberrantAppendField(md, f.Type, tag, "", "")
fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1]
fd.L1.ContainingOneof = od
od.L1.Fields.List = append(od.L1.Fields.List, fd)
}
}
}
}
}
return md
}
func aberrantDeriveMessageName(t reflect.Type, name protoreflect.FullName) protoreflect.FullName {
if name.IsValid() {
return name
}
func() {
defer func() { recover() }() // swallow possible nil panics
if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok {
name = protoreflect.FullName(m.XXX_MessageName())
}
}()
if name.IsValid() {
return name
}
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
return AberrantDeriveFullName(t)
}
func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, tagVal string) {
t := goType
isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct
isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
if isOptional || isRepeated {
t = t.Elem()
}
fd := ptag.Unmarshal(tag, t, placeholderEnumValues{}).(*filedesc.Field)
// Append field descriptor to the message.
n := len(md.L2.Fields.List)
md.L2.Fields.List = append(md.L2.Fields.List, *fd)
fd = &md.L2.Fields.List[n]
fd.L0.FullName = md.FullName().Append(fd.Name())
fd.L0.ParentFile = md.L0.ParentFile
fd.L0.Parent = md
fd.L0.Index = n
if fd.L1.IsWeak || fd.L1.HasPacked {
fd.L1.Options = func() protoreflect.ProtoMessage {
opts := descopts.Field.ProtoReflect().New()
if fd.L1.IsWeak {
opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
}
if fd.L1.HasPacked {
opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked))
}
return opts.Interface()
}
}
// Populate Enum and Message.
if fd.Enum() == nil && fd.Kind() == protoreflect.EnumKind {
switch v := reflect.Zero(t).Interface().(type) {
case protoreflect.Enum:
fd.L1.Enum = v.Descriptor()
default:
fd.L1.Enum = LegacyLoadEnumDesc(t)
}
}
if fd.Message() == nil && (fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind) {
switch v := reflect.Zero(t).Interface().(type) {
case protoreflect.ProtoMessage:
fd.L1.Message = v.ProtoReflect().Descriptor()
case messageV1: | fd.L1.Message = LegacyLoadMessageDesc(t)
default:
if t.Kind() == reflect.Map { | random_line_split |
|
legacy_message.go | .ProtoMessage); ok {
panic(fmt.Sprintf("%v already implements proto.Message", t))
}
mdV1, ok := mv.(messageV1)
if !ok {
return aberrantLoadMessageDesc(t, name)
}
// If this is a dynamic message type where there isn't a 1-1 mapping between
// Go and protobuf types, calling the Descriptor method on the zero value of
// the message type isn't likely to work. If it panics, swallow the panic and
// continue as if the Descriptor method wasn't present.
b, idxs := func() ([]byte, []int) {
defer func() {
recover()
}()
return mdV1.Descriptor()
}()
if b == nil {
return aberrantLoadMessageDesc(t, name)
}
// If the Go type has no fields, then this might be a proto3 empty message
// from before the size cache was added. If there are any fields, check to
// see that at least one of them looks like something we generated.
if t.Elem().Kind() == reflect.Struct {
if nfield := t.Elem().NumField(); nfield > 0 {
hasProtoField := false
for i := 0; i < nfield; i++ {
f := t.Elem().Field(i)
if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") {
hasProtoField = true
break
}
}
if !hasProtoField {
return aberrantLoadMessageDesc(t, name)
}
}
}
md := legacyLoadFileDesc(b).Messages().Get(idxs[0])
for _, i := range idxs[1:] {
md = md.Messages().Get(i)
}
if name != "" && md.FullName() != name {
panic(fmt.Sprintf("mismatching message name: got %v, want %v", md.FullName(), name))
}
if md, ok := legacyMessageDescCache.LoadOrStore(t, md); ok {
return md.(protoreflect.MessageDescriptor)
}
return md
}
var (
aberrantMessageDescLock sync.Mutex
aberrantMessageDescCache map[reflect.Type]protoreflect.MessageDescriptor
)
// aberrantLoadMessageDesc returns an MessageDescriptor derived from the Go type,
// which must not implement protoreflect.ProtoMessage or messageV1.
//
// This is a best-effort derivation of the message descriptor using the protobuf
// tags on the struct fields.
func aberrantLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor {
aberrantMessageDescLock.Lock()
defer aberrantMessageDescLock.Unlock()
if aberrantMessageDescCache == nil {
aberrantMessageDescCache = make(map[reflect.Type]protoreflect.MessageDescriptor)
}
return aberrantLoadMessageDescReentrant(t, name)
}
func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor {
// Fast-path: check if an MessageDescriptor is cached for this concrete type.
if md, ok := aberrantMessageDescCache[t]; ok {
return md
}
// Slow-path: construct a descriptor from the Go struct type (best-effort).
// Cache the MessageDescriptor early on so that we can resolve internal
// cyclic references.
md := &filedesc.Message{L2: new(filedesc.MessageL2)}
md.L0.FullName = aberrantDeriveMessageName(t, name)
md.L0.ParentFile = filedesc.SurrogateProto2
aberrantMessageDescCache[t] = md
if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
return md
}
// Try to determine if the message is using proto3 by checking scalars.
for i := 0; i < t.Elem().NumField(); i++ {
f := t.Elem().Field(i)
if tag := f.Tag.Get("protobuf"); tag != "" {
switch f.Type.Kind() {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
md.L0.ParentFile = filedesc.SurrogateProto3
}
for _, s := range strings.Split(tag, ",") {
if s == "proto3" {
md.L0.ParentFile = filedesc.SurrogateProto3
}
}
}
}
// Obtain a list of oneof wrapper types.
var oneofWrappers []reflect.Type
for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} {
if fn, ok := t.MethodByName(method); ok {
for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) |
}
}
// Obtain a list of the extension ranges.
if fn, ok := t.MethodByName("ExtensionRangeArray"); ok {
vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0]
for i := 0; i < vs.Len(); i++ {
v := vs.Index(i)
md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{
protoreflect.FieldNumber(v.FieldByName("Start").Int()),
protoreflect.FieldNumber(v.FieldByName("End").Int() + 1),
})
md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil)
}
}
// Derive the message fields by inspecting the struct fields.
for i := 0; i < t.Elem().NumField(); i++ {
f := t.Elem().Field(i)
if tag := f.Tag.Get("protobuf"); tag != "" {
tagKey := f.Tag.Get("protobuf_key")
tagVal := f.Tag.Get("protobuf_val")
aberrantAppendField(md, f.Type, tag, tagKey, tagVal)
}
if tag := f.Tag.Get("protobuf_oneof"); tag != "" {
n := len(md.L2.Oneofs.List)
md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{})
od := &md.L2.Oneofs.List[n]
od.L0.FullName = md.FullName().Append(protoreflect.Name(tag))
od.L0.ParentFile = md.L0.ParentFile
od.L0.Parent = md
od.L0.Index = n
for _, t := range oneofWrappers {
if t.Implements(f.Type) {
f := t.Elem().Field(0)
if tag := f.Tag.Get("protobuf"); tag != "" {
aberrantAppendField(md, f.Type, tag, "", "")
fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1]
fd.L1.ContainingOneof = od
od.L1.Fields.List = append(od.L1.Fields.List, fd)
}
}
}
}
}
return md
}
func aberrantDeriveMessageName(t reflect.Type, name protoreflect.FullName) protoreflect.FullName {
if name.IsValid() {
return name
}
func() {
defer func() { recover() }() // swallow possible nil panics
if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok {
name = protoreflect.FullName(m.XXX_MessageName())
}
}()
if name.IsValid() {
return name
}
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
return AberrantDeriveFullName(t)
}
func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, tagVal string) {
t := goType
isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct
isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
if isOptional || isRepeated {
t = t.Elem()
}
fd := ptag.Unmarshal(tag, t, placeholderEnumValues{}).(*filedesc.Field)
// Append field descriptor to the message.
n := len(md.L2.Fields.List)
md.L2.Fields.List = append(md.L2.Fields.List, *fd)
fd = &md.L2.Fields.List[n]
fd.L0.FullName = md.FullName().Append(fd.Name())
fd.L0.ParentFile = md.L0.ParentFile
fd.L0.Parent = md
fd.L0.Index = n
if fd.L1.IsWeak || fd.L1.HasPacked {
fd.L1.Options = func() protoreflect.ProtoMessage {
opts := descopts.Field.ProtoReflect().New()
if fd.L1.IsWeak {
opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
}
if fd.L1.HasPacked {
opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1 | {
if vs, ok := v.Interface().([]interface{}); ok {
for _, v := range vs {
oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
}
}
} | conditional_block |
legacy_message.go | ))}) {
if vs, ok := v.Interface().([]interface{}); ok {
for _, v := range vs {
oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
}
}
}
}
}
// Obtain a list of the extension ranges.
if fn, ok := t.MethodByName("ExtensionRangeArray"); ok {
vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0]
for i := 0; i < vs.Len(); i++ {
v := vs.Index(i)
md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{
protoreflect.FieldNumber(v.FieldByName("Start").Int()),
protoreflect.FieldNumber(v.FieldByName("End").Int() + 1),
})
md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil)
}
}
// Derive the message fields by inspecting the struct fields.
for i := 0; i < t.Elem().NumField(); i++ {
f := t.Elem().Field(i)
if tag := f.Tag.Get("protobuf"); tag != "" {
tagKey := f.Tag.Get("protobuf_key")
tagVal := f.Tag.Get("protobuf_val")
aberrantAppendField(md, f.Type, tag, tagKey, tagVal)
}
if tag := f.Tag.Get("protobuf_oneof"); tag != "" {
n := len(md.L2.Oneofs.List)
md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{})
od := &md.L2.Oneofs.List[n]
od.L0.FullName = md.FullName().Append(protoreflect.Name(tag))
od.L0.ParentFile = md.L0.ParentFile
od.L0.Parent = md
od.L0.Index = n
for _, t := range oneofWrappers {
if t.Implements(f.Type) {
f := t.Elem().Field(0)
if tag := f.Tag.Get("protobuf"); tag != "" {
aberrantAppendField(md, f.Type, tag, "", "")
fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1]
fd.L1.ContainingOneof = od
od.L1.Fields.List = append(od.L1.Fields.List, fd)
}
}
}
}
}
return md
}
func aberrantDeriveMessageName(t reflect.Type, name protoreflect.FullName) protoreflect.FullName {
if name.IsValid() {
return name
}
func() {
defer func() { recover() }() // swallow possible nil panics
if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok {
name = protoreflect.FullName(m.XXX_MessageName())
}
}()
if name.IsValid() {
return name
}
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
return AberrantDeriveFullName(t)
}
func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, tagVal string) {
t := goType
isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct
isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
if isOptional || isRepeated {
t = t.Elem()
}
fd := ptag.Unmarshal(tag, t, placeholderEnumValues{}).(*filedesc.Field)
// Append field descriptor to the message.
n := len(md.L2.Fields.List)
md.L2.Fields.List = append(md.L2.Fields.List, *fd)
fd = &md.L2.Fields.List[n]
fd.L0.FullName = md.FullName().Append(fd.Name())
fd.L0.ParentFile = md.L0.ParentFile
fd.L0.Parent = md
fd.L0.Index = n
if fd.L1.IsWeak || fd.L1.HasPacked {
fd.L1.Options = func() protoreflect.ProtoMessage {
opts := descopts.Field.ProtoReflect().New()
if fd.L1.IsWeak {
opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
}
if fd.L1.HasPacked {
opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked))
}
return opts.Interface()
}
}
// Populate Enum and Message.
if fd.Enum() == nil && fd.Kind() == protoreflect.EnumKind {
switch v := reflect.Zero(t).Interface().(type) {
case protoreflect.Enum:
fd.L1.Enum = v.Descriptor()
default:
fd.L1.Enum = LegacyLoadEnumDesc(t)
}
}
if fd.Message() == nil && (fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind) {
switch v := reflect.Zero(t).Interface().(type) {
case protoreflect.ProtoMessage:
fd.L1.Message = v.ProtoReflect().Descriptor()
case messageV1:
fd.L1.Message = LegacyLoadMessageDesc(t)
default:
if t.Kind() == reflect.Map {
n := len(md.L1.Messages.List)
md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)})
md2 := &md.L1.Messages.List[n]
md2.L0.FullName = md.FullName().Append(protoreflect.Name(strs.MapEntryName(string(fd.Name()))))
md2.L0.ParentFile = md.L0.ParentFile
md2.L0.Parent = md
md2.L0.Index = n
md2.L1.IsMapEntry = true
md2.L2.Options = func() protoreflect.ProtoMessage {
opts := descopts.Message.ProtoReflect().New()
opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true))
return opts.Interface()
}
aberrantAppendField(md2, t.Key(), tagKey, "", "")
aberrantAppendField(md2, t.Elem(), tagVal, "", "")
fd.L1.Message = md2
break
}
fd.L1.Message = aberrantLoadMessageDescReentrant(t, "")
}
}
}
type placeholderEnumValues struct {
protoreflect.EnumValueDescriptors
}
func (placeholderEnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor {
return filedesc.PlaceholderEnumValue(protoreflect.FullName(fmt.Sprintf("UNKNOWN_%d", n)))
}
// legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder.
type legacyMarshaler interface {
Marshal() ([]byte, error)
}
// legacyUnmarshaler is the proto.Unmarshaler interface superseded by protoiface.Methoder.
type legacyUnmarshaler interface {
Unmarshal([]byte) error
}
// legacyMerger is the proto.Merger interface superseded by protoiface.Methoder.
type legacyMerger interface {
Merge(protoiface.MessageV1)
}
var aberrantProtoMethods = &protoiface.Methods{
Marshal: legacyMarshal,
Unmarshal: legacyUnmarshal,
Merge: legacyMerge,
// We have no way to tell whether the type's Marshal method
// supports deterministic serialization or not, but this
// preserves the v1 implementation's behavior of always
// calling Marshal methods when present.
Flags: protoiface.SupportMarshalDeterministic,
}
func legacyMarshal(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) {
v := in.Message.(unwrapper).protoUnwrap()
marshaler, ok := v.(legacyMarshaler)
if !ok {
return protoiface.MarshalOutput{}, errors.New("%T does not implement Marshal", v)
}
out, err := marshaler.Marshal()
if in.Buf != nil {
out = append(in.Buf, out...)
}
return protoiface.MarshalOutput{
Buf: out,
}, err
}
func legacyUnmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
v := in.Message.(unwrapper).protoUnwrap()
unmarshaler, ok := v.(legacyUnmarshaler)
if !ok {
return protoiface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v)
}
return protoiface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf)
}
func legacyMerge(in protoiface.MergeInput) protoiface.MergeOutput | {
// Check whether this supports the legacy merger.
dstv := in.Destination.(unwrapper).protoUnwrap()
merger, ok := dstv.(legacyMerger)
if ok {
merger.Merge(Export{}.ProtoMessageV1Of(in.Source))
return protoiface.MergeOutput{Flags: protoiface.MergeComplete}
}
// If legacy merger is unavailable, implement merge in terms of
// a marshal and unmarshal operation.
srcv := in.Source.(unwrapper).protoUnwrap()
marshaler, ok := srcv.(legacyMarshaler)
if !ok {
return protoiface.MergeOutput{}
}
dstv = in.Destination.(unwrapper).protoUnwrap()
unmarshaler, ok := dstv.(legacyUnmarshaler)
if !ok {
return protoiface.MergeOutput{} | identifier_body |
|
legacy_message.go | XX_MessageName())
}
}()
if name.IsValid() {
return name
}
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
return AberrantDeriveFullName(t)
}
func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, tagVal string) {
t := goType
isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct
isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
if isOptional || isRepeated {
t = t.Elem()
}
fd := ptag.Unmarshal(tag, t, placeholderEnumValues{}).(*filedesc.Field)
// Append field descriptor to the message.
n := len(md.L2.Fields.List)
md.L2.Fields.List = append(md.L2.Fields.List, *fd)
fd = &md.L2.Fields.List[n]
fd.L0.FullName = md.FullName().Append(fd.Name())
fd.L0.ParentFile = md.L0.ParentFile
fd.L0.Parent = md
fd.L0.Index = n
if fd.L1.IsWeak || fd.L1.HasPacked {
fd.L1.Options = func() protoreflect.ProtoMessage {
opts := descopts.Field.ProtoReflect().New()
if fd.L1.IsWeak {
opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
}
if fd.L1.HasPacked {
opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked))
}
return opts.Interface()
}
}
// Populate Enum and Message.
if fd.Enum() == nil && fd.Kind() == protoreflect.EnumKind {
switch v := reflect.Zero(t).Interface().(type) {
case protoreflect.Enum:
fd.L1.Enum = v.Descriptor()
default:
fd.L1.Enum = LegacyLoadEnumDesc(t)
}
}
if fd.Message() == nil && (fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind) {
switch v := reflect.Zero(t).Interface().(type) {
case protoreflect.ProtoMessage:
fd.L1.Message = v.ProtoReflect().Descriptor()
case messageV1:
fd.L1.Message = LegacyLoadMessageDesc(t)
default:
if t.Kind() == reflect.Map {
n := len(md.L1.Messages.List)
md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)})
md2 := &md.L1.Messages.List[n]
md2.L0.FullName = md.FullName().Append(protoreflect.Name(strs.MapEntryName(string(fd.Name()))))
md2.L0.ParentFile = md.L0.ParentFile
md2.L0.Parent = md
md2.L0.Index = n
md2.L1.IsMapEntry = true
md2.L2.Options = func() protoreflect.ProtoMessage {
opts := descopts.Message.ProtoReflect().New()
opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true))
return opts.Interface()
}
aberrantAppendField(md2, t.Key(), tagKey, "", "")
aberrantAppendField(md2, t.Elem(), tagVal, "", "")
fd.L1.Message = md2
break
}
fd.L1.Message = aberrantLoadMessageDescReentrant(t, "")
}
}
}
type placeholderEnumValues struct {
protoreflect.EnumValueDescriptors
}
func (placeholderEnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor {
return filedesc.PlaceholderEnumValue(protoreflect.FullName(fmt.Sprintf("UNKNOWN_%d", n)))
}
// legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder.
type legacyMarshaler interface {
Marshal() ([]byte, error)
}
// legacyUnmarshaler is the proto.Unmarshaler interface superseded by protoiface.Methoder.
type legacyUnmarshaler interface {
Unmarshal([]byte) error
}
// legacyMerger is the proto.Merger interface superseded by protoiface.Methoder.
type legacyMerger interface {
Merge(protoiface.MessageV1)
}
var aberrantProtoMethods = &protoiface.Methods{
Marshal: legacyMarshal,
Unmarshal: legacyUnmarshal,
Merge: legacyMerge,
// We have no way to tell whether the type's Marshal method
// supports deterministic serialization or not, but this
// preserves the v1 implementation's behavior of always
// calling Marshal methods when present.
Flags: protoiface.SupportMarshalDeterministic,
}
func legacyMarshal(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) {
v := in.Message.(unwrapper).protoUnwrap()
marshaler, ok := v.(legacyMarshaler)
if !ok {
return protoiface.MarshalOutput{}, errors.New("%T does not implement Marshal", v)
}
out, err := marshaler.Marshal()
if in.Buf != nil {
out = append(in.Buf, out...)
}
return protoiface.MarshalOutput{
Buf: out,
}, err
}
func legacyUnmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
v := in.Message.(unwrapper).protoUnwrap()
unmarshaler, ok := v.(legacyUnmarshaler)
if !ok {
return protoiface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v)
}
return protoiface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf)
}
func legacyMerge(in protoiface.MergeInput) protoiface.MergeOutput {
// Check whether this supports the legacy merger.
dstv := in.Destination.(unwrapper).protoUnwrap()
merger, ok := dstv.(legacyMerger)
if ok {
merger.Merge(Export{}.ProtoMessageV1Of(in.Source))
return protoiface.MergeOutput{Flags: protoiface.MergeComplete}
}
// If legacy merger is unavailable, implement merge in terms of
// a marshal and unmarshal operation.
srcv := in.Source.(unwrapper).protoUnwrap()
marshaler, ok := srcv.(legacyMarshaler)
if !ok {
return protoiface.MergeOutput{}
}
dstv = in.Destination.(unwrapper).protoUnwrap()
unmarshaler, ok := dstv.(legacyUnmarshaler)
if !ok {
return protoiface.MergeOutput{}
}
if !in.Source.IsValid() {
// Legacy Marshal methods may not function on nil messages.
// Check for a typed nil source only after we confirm that
// legacy Marshal/Unmarshal methods are present, for
// consistency.
return protoiface.MergeOutput{Flags: protoiface.MergeComplete}
}
b, err := marshaler.Marshal()
if err != nil {
return protoiface.MergeOutput{}
}
err = unmarshaler.Unmarshal(b)
if err != nil {
return protoiface.MergeOutput{}
}
return protoiface.MergeOutput{Flags: protoiface.MergeComplete}
}
// aberrantMessageType implements MessageType for all types other than pointer-to-struct.
type aberrantMessageType struct {
t reflect.Type
}
func (mt aberrantMessageType) New() protoreflect.Message {
if mt.t.Kind() == reflect.Ptr {
return aberrantMessage{reflect.New(mt.t.Elem())}
}
return aberrantMessage{reflect.Zero(mt.t)}
}
func (mt aberrantMessageType) Zero() protoreflect.Message {
return aberrantMessage{reflect.Zero(mt.t)}
}
func (mt aberrantMessageType) GoType() reflect.Type {
return mt.t
}
func (mt aberrantMessageType) Descriptor() protoreflect.MessageDescriptor {
return LegacyLoadMessageDesc(mt.t)
}
// aberrantMessage implements Message for all types other than pointer-to-struct.
//
// When the underlying type implements legacyMarshaler or legacyUnmarshaler,
// the aberrant Message can be marshaled or unmarshaled. Otherwise, there is
// not much that can be done with values of this type.
type aberrantMessage struct {
v reflect.Value
}
// Reset implements the v1 proto.Message.Reset method.
func (m aberrantMessage) Reset() {
if mr, ok := m.v.Interface().(interface{ Reset() }); ok {
mr.Reset()
return
}
if m.v.Kind() == reflect.Ptr && !m.v.IsNil() {
m.v.Elem().Set(reflect.Zero(m.v.Type().Elem()))
}
}
func (m aberrantMessage) ProtoReflect() protoreflect.Message {
return m
}
func (m aberrantMessage) Descriptor() protoreflect.MessageDescriptor {
return LegacyLoadMessageDesc(m.v.Type())
}
func (m aberrantMessage) Type() protoreflect.MessageType {
return aberrantMessageType{m.v.Type()}
}
func (m aberrantMessage) New() protoreflect.Message {
if m.v.Type().Kind() == reflect.Ptr {
return aberrantMessage{reflect.New(m.v.Type().Elem())}
}
return aberrantMessage{reflect.Zero(m.v.Type())}
}
func (m aberrantMessage) Interface() protoreflect.ProtoMessage {
return m
}
func (m aberrantMessage) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
return
}
func (m aberrantMessage) Has(protoreflect.FieldDescriptor) bool {
return false
}
func (m aberrantMessage) | Clear | identifier_name |
|
install.rs | &saved)?;
}
// return a generic error so our exit status is right
bail!("install failed");
}
// Because grub picks /boot by label and the OS picks /boot, we can end up racing/flapping
// between picking a /boot partition on startup. So check amount of filesystems labeled 'boot'
// and warn user if it's not only one
match get_filesystems_with_label("boot", true) {
Ok(pts) => {
if pts.len() > 1 {
let rootdev = fs::canonicalize(device)
.unwrap_or_else(|_| PathBuf::from(device))
.to_string_lossy()
.to_string();
let pts = pts
.iter()
.filter(|pt| !pt.contains(&rootdev))
.collect::<Vec<_>>();
eprintln!("\nNote: detected other devices with a filesystem labeled `boot`:");
for pt in pts {
eprintln!(" - {pt}");
}
eprintln!("The installed OS may not work correctly if there are multiple boot filesystems.
Before rebooting, investigate whether these filesystems are needed and consider
wiping them with `wipefs -a`.\n"
);
}
}
Err(e) => eprintln!("checking filesystems labeled 'boot': {e:?}"),
}
eprintln!("Install complete.");
Ok(())
}
fn parse_partition_filters(labels: &[&str], indexes: &[&str]) -> Result<Vec<PartitionFilter>> {
use PartitionFilter::*;
let mut filters: Vec<PartitionFilter> = Vec::new();
// partition label globs
for glob in labels {
let filter = Label(
glob::Pattern::new(glob)
.with_context(|| format!("couldn't parse label glob '{glob}'"))?,
);
filters.push(filter);
}
// partition index ranges
let parse_index = |i: &str| -> Result<Option<NonZeroU32>> {
match i {
"" => Ok(None), // open end of range
_ => Ok(Some(
NonZeroU32::new(
i.parse()
.with_context(|| format!("couldn't parse partition index '{i}'"))?,
)
.context("partition index cannot be zero")?,
)),
}
};
for range in indexes {
let parts: Vec<&str> = range.split('-').collect();
let filter = match parts.len() {
1 => Index(parse_index(parts[0])?, parse_index(parts[0])?),
2 => Index(parse_index(parts[0])?, parse_index(parts[1])?),
_ => bail!("couldn't parse partition index range '{}'", range),
};
match filter {
Index(None, None) => bail!(
"both ends of partition index range '{}' cannot be open",
range
),
Index(Some(x), Some(y)) if x > y => bail!(
"start of partition index range '{}' cannot be greater than end",
range
),
_ => filters.push(filter),
};
}
Ok(filters)
}
fn ensure_exclusive_access(device: &str) -> Result<()> {
let mut parts = Disk::new(device)?.get_busy_partitions()?;
if parts.is_empty() {
return Ok(());
}
parts.sort_unstable_by_key(|p| p.path.to_string());
eprintln!("Partitions in use on {device}:");
for part in parts {
if let Some(mountpoint) = part.mountpoint.as_ref() {
eprintln!(" {} mounted on {}", part.path, mountpoint);
}
if part.swap {
eprintln!(" {} is swap device", part.path);
}
for holder in part.get_holders()? {
eprintln!(" {} in use by {}", part.path, holder);
}
}
bail!("found busy partitions");
}
/// Copy the image source to the target disk and do all post-processing.
/// If this function fails, the caller should wipe the partition table
/// to ensure the user doesn't boot from a partially-written disk.
fn write_disk(
config: &InstallConfig,
source: &mut ImageSource,
dest: &mut File,
table: &mut dyn PartTable,
saved: &SavedPartitions,
ignition: Option<File>,
network_config: Option<&str>,
) -> Result<()> {
let device = config.dest_device.as_deref().expect("device missing");
// Get sector size of destination, for comparing with image
let sector_size = get_sector_size(dest)?;
// copy the image
#[allow(clippy::match_bool, clippy::match_single_binding)]
let image_copy = match is_dasd(device, Some(dest))? {
#[cfg(target_arch = "s390x")]
true => s390x::image_copy_s390x,
_ => image_copy_default,
};
write_image(
source,
dest,
Path::new(device),
image_copy,
true,
Some(saved),
Some(sector_size),
VerifyKeys::Production,
)?;
table.reread()?;
// postprocess
if ignition.is_some()
|| config.firstboot_args.is_some()
|| !config.append_karg.is_empty()
|| !config.delete_karg.is_empty()
|| config.platform.is_some()
|| !config.console.is_empty()
|| network_config.is_some()
|| cfg!(target_arch = "s390x")
{
let mount = Disk::new(device)?.mount_partition_by_label("boot", mount::MsFlags::empty())?;
if let Some(ignition) = ignition.as_ref() {
write_ignition(mount.mountpoint(), &config.ignition_hash, ignition)
.context("writing Ignition configuration")?;
}
if let Some(platform) = config.platform.as_ref() {
write_platform(mount.mountpoint(), platform).context("writing platform ID")?;
}
if config.platform.is_some() || !config.console.is_empty() {
write_console(
mount.mountpoint(),
config.platform.as_deref(),
&config.console,
)
.context("configuring console")?;
}
if let Some(firstboot_args) = config.firstboot_args.as_ref() {
write_firstboot_kargs(mount.mountpoint(), firstboot_args)
.context("writing firstboot kargs")?;
}
if !config.append_karg.is_empty() || !config.delete_karg.is_empty() {
eprintln!("Modifying kernel arguments");
Console::maybe_warn_on_kargs(&config.append_karg, "--append-karg", "--console");
visit_bls_entry_options(mount.mountpoint(), |orig_options: &str| {
KargsEditor::new()
.append(config.append_karg.as_slice())
.delete(config.delete_karg.as_slice())
.maybe_apply_to(orig_options)
})
.context("deleting and appending kargs")?;
}
if let Some(network_config) = network_config.as_ref() {
copy_network_config(mount.mountpoint(), network_config)?;
}
#[cfg(target_arch = "s390x")]
{
s390x::zipl(
mount.mountpoint(),
None,
None,
s390x::ZiplSecexMode::Disable,
None,
)?;
s390x::chreipl(device)?;
}
}
// detect any latent write errors
dest.sync_all().context("syncing data to disk")?;
Ok(())
}
/// Write the Ignition config.
fn write_ignition(
mountpoint: &Path,
digest_in: &Option<IgnitionHash>,
mut config_in: &File,
) -> Result<()> {
eprintln!("Writing Ignition config");
// Verify configuration digest, if any.
if let Some(digest) = &digest_in {
digest
.validate(&mut config_in)
.context("failed to validate Ignition configuration digest")?;
config_in
.rewind()
.context("rewinding Ignition configuration file")?;
};
// make parent directory
let mut config_dest = mountpoint.to_path_buf();
config_dest.push("ignition");
if !config_dest.is_dir() {
fs::create_dir_all(&config_dest).with_context(|| {
format!(
"creating Ignition config directory {}",
config_dest.display()
)
})?;
// Ignition data may contain secrets; restrict to root
fs::set_permissions(&config_dest, Permissions::from_mode(0o700)).with_context(|| {
format!(
"setting file mode for Ignition directory {}",
config_dest.display()
)
})?;
}
// do the copy
config_dest.push("config.ign");
let mut config_out = OpenOptions::new()
.write(true)
.create_new(true)
.open(&config_dest)
.with_context(|| {
format!(
"opening destination Ignition config {}",
config_dest.display()
)
})?;
// Ignition config may contain secrets; restrict to root
fs::set_permissions(&config_dest, Permissions::from_mode(0o600)).with_context(|| {
format!(
"setting file mode for destination Ignition config {}",
config_dest.display()
)
})?;
io::copy(&mut config_in, &mut config_out).context("writing Ignition config")?;
Ok(())
}
/// Write first-boot kernel arguments.
fn | write_firstboot_kargs | identifier_name |
|
install.rs | _path_buf();
config_dest.push("ignition.firstboot");
// if the file doesn't already exist, fail, since our assumptions
// are wrong
let mut config_out = OpenOptions::new()
.append(true)
.open(&config_dest)
.with_context(|| format!("opening first-boot file {}", config_dest.display()))?;
let contents = format!("set ignition_network_kcmdline=\"{args}\"\n");
config_out
.write_all(contents.as_bytes())
.context("writing first-boot kernel arguments")?;
Ok(())
}
#[derive(Clone, Default, Deserialize)]
struct PlatformSpec {
#[serde(default)]
grub_commands: Vec<String>,
#[serde(default)]
kernel_arguments: Vec<String>,
}
/// Override the platform ID.
fn write_platform(mountpoint: &Path, platform: &str) -> Result<()> {
// early return if setting the platform to the default value, since
// otherwise we'll think we failed to set it
if platform == "metal" {
return Ok(());
}
eprintln!("Setting platform to {platform}");
// We assume that we will only install from metal images and that the
// bootloader configs will always set ignition.platform.id.
visit_bls_entry_options(mountpoint, |orig_options: &str| {
let new_options = KargsEditor::new()
.replace(&[format!("ignition.platform.id=metal={platform}")])
.apply_to(orig_options)
.context("setting platform ID argument")?;
if orig_options == new_options {
bail!("couldn't locate platform ID");
}
Ok(Some(new_options))
})?;
Ok(())
}
/// Configure console kernel arguments and GRUB commands.
fn write_console(mountpoint: &Path, platform: Option<&str>, consoles: &[Console]) -> Result<()> {
// read platforms table
let platforms = match fs::read_to_string(mountpoint.join("coreos/platforms.json")) {
Ok(json) => serde_json::from_str::<HashMap<String, PlatformSpec>>(&json)
.context("parsing platform table")?,
// no table for this image?
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Default::default(),
Err(e) => return Err(e).context("reading platform table"),
};
let mut kargs = Vec::new();
let mut grub_commands = Vec::new();
if !consoles.is_empty() {
// custom console settings completely override platform-specific
// defaults
let mut grub_terminals = Vec::new();
for console in consoles {
kargs.push(console.karg());
if let Some(cmd) = console.grub_command() {
grub_commands.push(cmd);
}
grub_terminals.push(console.grub_terminal());
}
grub_terminals.sort_unstable();
grub_terminals.dedup();
for direction in ["input", "output"] {
grub_commands.push(format!("terminal_{direction} {}", grub_terminals.join(" ")));
}
} else if let Some(platform) = platform {
// platform-specific defaults
if platform == "metal" {
// we're just being asked to apply the defaults which are already
// applied
return Ok(());
}
let spec = platforms.get(platform).cloned().unwrap_or_default();
kargs.extend(spec.kernel_arguments);
grub_commands.extend(spec.grub_commands);
} else {
// nothing to do and the caller shouldn't have called us
unreachable!();
}
// set kargs, removing any metal-specific ones
let metal_spec = platforms.get("metal").cloned().unwrap_or_default();
visit_bls_entry_options(mountpoint, |orig_options: &str| {
KargsEditor::new()
.append(&kargs)
.delete(&metal_spec.kernel_arguments)
.maybe_apply_to(orig_options)
.context("setting platform kernel arguments")
})?;
// set grub commands
if grub_commands != metal_spec.grub_commands {
let path = mountpoint.join("grub2/grub.cfg");
let grub_cfg = fs::read_to_string(&path).context("reading grub.cfg")?;
let new_grub_cfg = update_grub_cfg_console_settings(&grub_cfg, &grub_commands)
.context("updating grub.cfg")?;
fs::write(&path, new_grub_cfg).context("writing grub.cfg")?;
}
Ok(())
}
/// Rewrite the grub.cfg CONSOLE-SETTINGS block to use the specified GRUB
/// commands, and return the result.
fn update_grub_cfg_console_settings(grub_cfg: &str, commands: &[String]) -> Result<String> {
let mut new_commands = commands.join("\n");
if !new_commands.is_empty() {
new_commands.push('\n');
}
let re = Regex::new(GRUB_CFG_CONSOLE_SETTINGS_RE).unwrap();
if !re.is_match(grub_cfg) {
bail!("missing substitution marker in grub.cfg");
}
Ok(re
.replace(grub_cfg, |caps: &Captures| {
format!(
"{}{}{}",
caps.name("prefix").expect("didn't match prefix").as_str(),
new_commands,
caps.name("suffix").expect("didn't match suffix").as_str()
)
})
.into_owned())
}
/// Copy networking config if asked to do so
fn copy_network_config(mountpoint: &Path, net_config_src: &str) -> Result<()> {
eprintln!("Copying networking configuration from {net_config_src}");
// get the path to the destination directory
let net_config_dest = mountpoint.join("coreos-firstboot-network");
// make the directory if it doesn't exist
fs::create_dir_all(&net_config_dest).with_context(|| {
format!(
"creating destination networking config directory {}",
net_config_dest.display()
)
})?;
// copy files from source to destination directories
for entry in fs::read_dir(net_config_src)
.with_context(|| format!("reading directory {net_config_src}"))?
{
let entry = entry.with_context(|| format!("reading directory {net_config_src}"))?;
let srcpath = entry.path();
let destpath = net_config_dest.join(entry.file_name());
if srcpath.is_file() {
eprintln!("Copying {} to installed system", srcpath.display());
fs::copy(&srcpath, destpath).context("Copying networking config")?;
}
}
Ok(())
}
/// Clear the partition table and restore saved partitions. For use after
/// a failure.
fn reset_partition_table(
config: &InstallConfig,
dest: &mut File,
table: &mut dyn PartTable,
saved: &SavedPartitions,
) -> Result<()> {
eprintln!("Resetting partition table");
let device = config.dest_device.as_deref().expect("device missing");
if is_dasd(device, Some(dest))? {
// Don't write out a GPT, since the backup GPT may overwrite
// something we're not allowed to touch. Just clear the first MiB
// of disk.
dest.rewind().context("seeking to start of disk")?;
let zeroes = [0u8; 1024 * 1024];
dest.write_all(&zeroes)
.context("clearing primary partition table")?;
} else {
// Write a new GPT including any saved partitions.
saved
.overwrite(dest)
.context("restoring saved partitions")?;
}
// Finish writeback and reread the partition table.
dest.sync_all().context("syncing partition table to disk")?;
table.reread()?;
Ok(())
}
// Preserve saved partitions by writing them to a file in /tmp and reporting
// the path.
fn stash_saved_partitions(disk: &mut File, saved: &SavedPartitions) -> Result<()> {
let mut stash = tempfile::Builder::new()
.prefix("coreos-installer-partitions.")
.tempfile()
.context("creating partition stash file")?;
let path = stash.path().to_owned();
eprintln!("Storing saved partition entries to {}", path.display());
let len = disk.seek(SeekFrom::End(0)).context("seeking disk")?;
stash
.as_file()
.set_len(len)
.with_context(|| format!("extending partition stash file {}", path.display()))?;
saved
.overwrite(stash.as_file_mut())
.with_context(|| format!("stashing saved partitions to {}", path.display()))?;
stash
.keep()
.with_context(|| format!("retaining saved partition stash in {}", path.display()))?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_partition_filters() | {
use PartitionFilter::*;
let g = |v| Label(glob::Pattern::new(v).unwrap());
let i = |v| Some(NonZeroU32::new(v).unwrap());
assert_eq!(
parse_partition_filters(&["foo", "z*b?", ""], &["1", "7-7", "2-4", "-3", "4-"])
.unwrap(),
vec![
g("foo"),
g("z*b?"),
g(""),
Index(i(1), i(1)),
Index(i(7), i(7)),
Index(i(2), i(4)),
Index(None, i(3)),
Index(i(4), None)
]
); | identifier_body |
|
install.rs | .collect::<Vec<&str>>(),
&config
.save_partindex
.iter()
.map(|s| s.as_str())
.collect::<Vec<&str>>(),
)?;
// compute sector size
// Uninitialized ECKD DASD's blocksize is 512, but after formatting
// it changes to the recommended 4096
// https://bugzilla.redhat.com/show_bug.cgi?id=1905159
#[allow(clippy::match_bool, clippy::match_single_binding)]
let sector_size = match is_dasd(device, None)
.with_context(|| format!("checking whether {device} is an IBM DASD disk"))?
{
#[cfg(target_arch = "s390x")]
true => s390x::dasd_try_get_sector_size(device).transpose(),
_ => None,
};
let sector_size = sector_size
.unwrap_or_else(|| get_sector_size_for_path(Path::new(device)))
.with_context(|| format!("getting sector size of {device}"))?
.get();
// Set up DASD. We need to do this before initiating the download
// because otherwise the download might time out while we're low-level
// formatting the DASD.
#[cfg(target_arch = "s390x")]
{
if is_dasd(device, None)? {
if !save_partitions.is_empty() {
// The user requested partition saving, but SavedPartitions
// doesn't understand DASD VTOCs and won't find any partitions
// to save.
bail!("saving DASD partitions is not supported");
}
s390x::prepare_dasd(device)?;
}
}
// set up image source
// create location
let location: Box<dyn ImageLocation> = if let Some(image_file) = &config.image_file {
Box::new(FileLocation::new(image_file))
} else if let Some(image_url) = &config.image_url {
Box::new(UrlLocation::new(image_url, config.fetch_retries))
} else if config.offline {
match OsmetLocation::new(config.architecture.as_str(), sector_size)? {
Some(osmet) => Box::new(osmet),
None => bail!("cannot perform offline install; metadata missing"),
}
} else {
// For now, using --stream automatically will cause a download. In the future, we could
// opportunistically use osmet if the version and stream match an osmet file/the live ISO.
let maybe_osmet = match config.stream {
Some(_) => None,
None => OsmetLocation::new(config.architecture.as_str(), sector_size)?,
};
if let Some(osmet) = maybe_osmet {
Box::new(osmet)
} else {
let format = match sector_size {
4096 => "4k.raw.xz",
512 => "raw.xz",
n => {
// could bail on non-512, but let's be optimistic and just warn but try the regular
// 512b image
eprintln!(
"Found non-standard sector size {n} for {device}, assuming 512b-compatible"
);
"raw.xz"
}
};
Box::new(StreamLocation::new(
config.stream.as_deref().unwrap_or("stable"),
config.architecture.as_str(),
"metal",
format,
config.stream_base_url.as_ref(),
config.fetch_retries,
)?)
}
};
// report it to the user
eprintln!("{location}");
// we only support installing from a single artifact
let mut sources = location.sources()?;
let mut source = sources.pop().context("no artifacts found")?;
if !sources.is_empty() {
bail!("found multiple artifacts");
}
if source.signature.is_none() && location.require_signature() {
if config.insecure {
eprintln!("Signature not found; skipping verification as requested");
} else {
bail!("--insecure not specified and signature not found");
}
}
// open output; ensure it's a block device and we have exclusive access
let mut dest = OpenOptions::new()
.read(true)
.write(true)
.open(device)
.with_context(|| format!("opening {device}"))?;
if !dest
.metadata()
.with_context(|| format!("getting metadata for {device}"))?
.file_type()
.is_block_device()
{
bail!("{} is not a block device", device);
}
ensure_exclusive_access(device)
.with_context(|| format!("checking for exclusive access to {device}"))?;
// save partitions that we plan to keep
let saved = SavedPartitions::new_from_disk(&mut dest, &save_partitions)
.with_context(|| format!("saving partitions from {device}"))?;
// get reference to partition table
// For kpartx partitioning, this will conditionally call kpartx -d
// when dropped
let mut table = Disk::new(device)?
.get_partition_table()
.with_context(|| format!("getting partition table for {device}"))?;
// copy and postprocess disk image
// On failure, clear and reread the partition table to prevent the disk
// from accidentally being used.
dest.rewind().with_context(|| format!("seeking {device}"))?;
if let Err(err) = write_disk(
&config,
&mut source,
&mut dest,
&mut *table,
&saved,
ignition,
network_config,
) {
// log the error so the details aren't dropped if we encounter
// another error during cleanup
eprintln!("\nError: {err:?}\n");
// clean up
if config.preserve_on_error {
eprintln!("Preserving partition table as requested");
if saved.is_saved() {
// The user asked to preserve the damaged partition table
// for debugging. We also have saved partitions, and those
// may or may not be in the damaged table depending where we
// failed. Preserve the saved partitions by writing them to
// a file in /tmp and telling the user about it. Hey, it's
// a debug flag.
stash_saved_partitions(&mut dest, &saved)?;
}
} else {
reset_partition_table(&config, &mut dest, &mut *table, &saved)?;
}
// return a generic error so our exit status is right
bail!("install failed");
}
// Because grub picks /boot by label and the OS picks /boot, we can end up racing/flapping
// between picking a /boot partition on startup. So check amount of filesystems labeled 'boot'
// and warn user if it's not only one
match get_filesystems_with_label("boot", true) {
Ok(pts) => {
if pts.len() > 1 {
let rootdev = fs::canonicalize(device)
.unwrap_or_else(|_| PathBuf::from(device))
.to_string_lossy()
.to_string();
let pts = pts
.iter()
.filter(|pt| !pt.contains(&rootdev))
.collect::<Vec<_>>();
eprintln!("\nNote: detected other devices with a filesystem labeled `boot`:");
for pt in pts {
eprintln!(" - {pt}");
}
eprintln!("The installed OS may not work correctly if there are multiple boot filesystems.
Before rebooting, investigate whether these filesystems are needed and consider
wiping them with `wipefs -a`.\n"
);
}
}
Err(e) => eprintln!("checking filesystems labeled 'boot': {e:?}"),
}
eprintln!("Install complete.");
Ok(())
}
fn parse_partition_filters(labels: &[&str], indexes: &[&str]) -> Result<Vec<PartitionFilter>> {
use PartitionFilter::*;
let mut filters: Vec<PartitionFilter> = Vec::new();
// partition label globs
for glob in labels {
let filter = Label(
glob::Pattern::new(glob)
.with_context(|| format!("couldn't parse label glob '{glob}'"))?,
);
filters.push(filter);
}
// partition index ranges
let parse_index = |i: &str| -> Result<Option<NonZeroU32>> {
match i {
"" => Ok(None), // open end of range
_ => Ok(Some(
NonZeroU32::new(
i.parse()
.with_context(|| format!("couldn't parse partition index '{i}'"))?,
)
.context("partition index cannot be zero")?,
)),
}
};
for range in indexes {
let parts: Vec<&str> = range.split('-').collect();
let filter = match parts.len() {
1 => Index(parse_index(parts[0])?, parse_index(parts[0])?),
2 => Index(parse_index(parts[0])?, parse_index(parts[1])?),
_ => bail!("couldn't parse partition index range '{}'", range),
};
match filter {
Index(None, None) => bail!(
"both ends of partition index range '{}' cannot be open",
range
),
Index(Some(x | .map(|s| s.as_str()) | random_line_split |
|
lib.rs | _use]
extern crate lazy_static;
extern crate num_cpus;
#[macro_use]
extern crate state_machine_future;
extern crate tokio_core;
extern crate tokio_timer;
extern crate void;
#[macro_use]
pub mod js_native;
mod error;
mod future_ext;
pub mod gc_roots;
pub(crate) mod js_global;
pub mod promise_future_glue;
pub(crate) mod promise_tracker;
pub(crate) mod task;
pub use error::*;
use futures::{Sink, Stream};
use futures::sync::mpsc;
use futures_cpupool::CpuPool;
use std::cmp;
use std::collections::HashMap;
use std::fmt;
use std::mem;
use std::path;
use std::sync::Arc;
use std::thread;
/// Configuration options for building a Starling event loop.
///
/// ```
/// extern crate starling;
///
/// # fn foo() -> starling::Result<()> {
/// // Construct a new `Options` builder, providing the file containing
/// // the main JavaScript task.
/// starling::Options::new("path/to/main.js")
/// // Finish configuring the `Options` builder and run the event
/// // loop!
/// .run()?;
/// # Ok(())
/// # }
/// ```
#[derive(Clone, Debug)]
pub struct Options {
main: path::PathBuf,
sync_io_pool_threads: usize,
cpu_pool_threads: usize,
channel_buffer_size: usize,
}
const DEFAULT_SYNC_IO_POOL_THREADS: usize = 8;
const DEFAULT_CHANNEL_BUFFER_SIZE: usize = 4096;
impl Options {
/// Construct a new `Options` object for configuring the Starling event
/// loop.
///
/// The given `main` JavaScript file will be evaluated as the main task.
pub fn new<P>(main: P) -> Options
where
P: Into<path::PathBuf>,
{
Options {
main: main.into(),
sync_io_pool_threads: DEFAULT_SYNC_IO_POOL_THREADS,
cpu_pool_threads: num_cpus::get(),
channel_buffer_size: DEFAULT_CHANNEL_BUFFER_SIZE,
}
}
/// Configure the number of threads to reserve for the synchronous IO pool.
///
/// The synchronous IO pool is a collection of threads for adapting
/// synchronous IO libraries into the (otherwise completely asynchronous)
/// Starling system.
///
/// ### Panics
///
/// Panics if `threads` is 0.
pub fn sync_io_pool_threads(mut self, threads: usize) -> Self {
assert!(threads > 0);
self.sync_io_pool_threads = threads;
self
}
/// Configure the number of threads to reserve for the CPU pool.
///
/// The CPU pool is a collection of worker threads for CPU-bound native Rust
/// tasks.
///
/// Defaults to the number of logical CPUs on the machine.
///
/// ### Panics
///
/// Panics if `threads` is 0.
pub fn cpu_pool_threads(mut self, threads: usize) -> Self {
assert!(threads > 0);
self.cpu_pool_threads = threads;
self
}
/// Configure the size of mpsc buffers in the system.
///
/// ### Panics
///
/// Panics if `size` is 0.
pub fn channel_buffer_size(mut self, size: usize) -> Self {
assert!(size > 0);
self.channel_buffer_size = size;
self
}
/// Finish this `Options` builder and run the Starling event loop with its
/// specified configuration.
pub fn run(self) -> Result<()> {
Starling::new(self)?.run()
}
}
impl Options {
// Get the number of `T`s that should be buffered in an mpsc channel for the
// current configuration.
fn buffer_capacity_for<T>(&self) -> usize {
let size_of_t = cmp::max(1, mem::size_of::<T>());
let capacity = self.channel_buffer_size / size_of_t;
cmp::max(1, capacity)
}
}
/// The Starling supervisory thread.
///
/// The supervisory thread doesn't do much other than supervise other threads: the IO
/// event loop thread, various utility thread pools, and JavaScript task
/// threads. Its primary responsibility is ensuring clean system shutdown and
/// joining thread handles.
pub(crate) struct Starling {
handle: StarlingHandle,
receiver: mpsc::Receiver<StarlingMessage>,
// Currently there is a 1:1 mapping between JS tasks and native
// threads. That is expected to change in the future, hence the
// distinction between `self.tasks` and `self.threads`.
tasks: HashMap<task::TaskId, task::TaskHandle>,
threads: HashMap<thread::ThreadId, thread::JoinHandle<()>>,
}
impl fmt::Debug for Starling {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Starling {{ .. }}")
}
}
impl Starling {
/// Construct a Starling system from the given options.
pub fn new(opts: Options) -> Result<Starling> {
let tasks = HashMap::new();
let threads = HashMap::new();
let (sender, receiver) = mpsc::channel(opts.buffer_capacity_for::<StarlingMessage>());
let Options {
sync_io_pool_threads,
cpu_pool_threads,
..
} = opts;
let handle = StarlingHandle {
options: Arc::new(opts),
sync_io_pool: CpuPool::new(sync_io_pool_threads),
cpu_pool: CpuPool::new(cpu_pool_threads),
sender,
};
Ok(Starling {
handle,
receiver,
tasks,
threads,
})
}
/// Run the main Starling event loop with the specified options.
pub fn run(mut self) -> Result<()> {
let (main, thread) =
task::Task::spawn_main(self.handle.clone(), self.handle.options().main.clone())?;
self.tasks.insert(main.id(), main.clone());
self.threads.insert(thread.thread().id(), thread);
for msg in self.receiver.wait() {
let msg = msg.map_err(|_| Error::from_kind(ErrorKind::CouldNotReadValueFromChannel))?;
match msg {
StarlingMessage::TaskFinished(id) => {
assert!(self.tasks.remove(&id).is_some());
let thread_id = id.into();
let join_handle = self.threads
.remove(&thread_id)
.expect("should have a thread join handle for the finished task");
join_handle
.join()
.expect("should join finished task's thread OK");
if id == main.id() {
// TODO: notification of shutdown and joining other threads and things.
return Ok(());
}
}
StarlingMessage::TaskErrored(id, error) => {
assert!(self.tasks.remove(&id).is_some());
let thread_id = id.into();
let join_handle = self.threads
.remove(&thread_id)
.expect("should have a thread join handle for the errored task");
join_handle
.join()
.expect("should join errored task's thread OK");
if id == main.id() |
}
StarlingMessage::NewTask(task, join_handle) => {
self.tasks.insert(task.id(), task);
self.threads.insert(join_handle.thread().id(), join_handle);
}
}
}
Ok(())
}
}
/// Messages that threads can send to the Starling supervisory thread.
///
/// This needs to be `pub` because it is used in a trait implementation; don't
/// actually use it!
#[derive(Debug)]
#[doc(hidden)]
pub enum StarlingMessage {
/// The task on the given thread completed successfully.
TaskFinished(task::TaskId),
/// The task on the given thread failed with the given error.
TaskErrored(task::TaskId, Error),
/// A new child task was created.
NewTask(task::TaskHandle, thread::JoinHandle<()>),
}
/// A handle to the Starling system.
///
/// A `StarlingHandle` is a capability to schedule IO on the event loop, spawn
/// work in one of the utility thread pools, and communicate with the Starling
/// supervisory thread. Handles can be cloned and sent across threads,
/// propagating these capabilities.
#[derive(Clone)]
pub(crate) struct StarlingHandle {
options: Arc<Options>,
sync_io_pool: CpuPool,
cpu_pool: CpuPool,
sender: mpsc::Sender<StarlingMessage>,
}
impl fmt::Debug for StarlingHandle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "StarlingHandle {{ .. }}")
}
}
impl StarlingHandle {
/// Get the `Options` that this Starling system was configured with.
pub fn options(&self) -> &Arc<Options> {
&self.options
}
/// Get a handle to the thread pool for adapting synchronous IO (perhaps
/// from a library that wasn't written to be async) into the system.
pub fn sync_io_pool(&self) -> &CpuPool {
&self.sync_io_pool
}
/// Get a handle to the thread pool for performing CPU-bound native Rust
/// tasks.
pub fn cpu_pool(&self) -> &CpuPool {
&self.cpu_pool
}
/// Send | {
// TODO: notification of shutdown and joining other threads and things.
return Err(error);
} | conditional_block |
lib.rs | path/to/main.js")
/// // Finish configuring the `Options` builder and run the event
/// // loop!
/// .run()?;
/// # Ok(())
/// # }
/// ```
#[derive(Clone, Debug)]
pub struct Options {
main: path::PathBuf,
sync_io_pool_threads: usize,
cpu_pool_threads: usize,
channel_buffer_size: usize,
}
const DEFAULT_SYNC_IO_POOL_THREADS: usize = 8;
const DEFAULT_CHANNEL_BUFFER_SIZE: usize = 4096;
impl Options {
/// Construct a new `Options` object for configuring the Starling event
/// loop.
///
/// The given `main` JavaScript file will be evaluated as the main task.
pub fn new<P>(main: P) -> Options
where
P: Into<path::PathBuf>,
{
Options {
main: main.into(),
sync_io_pool_threads: DEFAULT_SYNC_IO_POOL_THREADS,
cpu_pool_threads: num_cpus::get(),
channel_buffer_size: DEFAULT_CHANNEL_BUFFER_SIZE,
}
}
/// Configure the number of threads to reserve for the synchronous IO pool.
///
/// The synchronous IO pool is a collection of threads for adapting
/// synchronous IO libraries into the (otherwise completely asynchronous)
/// Starling system.
///
/// ### Panics
///
/// Panics if `threads` is 0.
pub fn sync_io_pool_threads(mut self, threads: usize) -> Self {
assert!(threads > 0);
self.sync_io_pool_threads = threads;
self
}
/// Configure the number of threads to reserve for the CPU pool.
///
/// The CPU pool is a collection of worker threads for CPU-bound native Rust
/// tasks.
///
/// Defaults to the number of logical CPUs on the machine.
///
/// ### Panics
///
/// Panics if `threads` is 0.
pub fn cpu_pool_threads(mut self, threads: usize) -> Self {
assert!(threads > 0);
self.cpu_pool_threads = threads;
self
}
/// Configure the size of mpsc buffers in the system.
///
/// ### Panics
///
/// Panics if `size` is 0.
pub fn channel_buffer_size(mut self, size: usize) -> Self {
assert!(size > 0);
self.channel_buffer_size = size;
self
}
/// Finish this `Options` builder and run the Starling event loop with its
/// specified configuration.
pub fn run(self) -> Result<()> {
Starling::new(self)?.run()
}
}
impl Options {
// Get the number of `T`s that should be buffered in an mpsc channel for the
// current configuration.
fn buffer_capacity_for<T>(&self) -> usize {
let size_of_t = cmp::max(1, mem::size_of::<T>());
let capacity = self.channel_buffer_size / size_of_t;
cmp::max(1, capacity)
}
}
/// The Starling supervisory thread.
///
/// The supervisory thread doesn't do much other than supervise other threads: the IO
/// event loop thread, various utility thread pools, and JavaScript task
/// threads. Its primary responsibility is ensuring clean system shutdown and
/// joining thread handles.
pub(crate) struct Starling {
handle: StarlingHandle,
receiver: mpsc::Receiver<StarlingMessage>,
// Currently there is a 1:1 mapping between JS tasks and native
// threads. That is expected to change in the future, hence the
// distinction between `self.tasks` and `self.threads`.
tasks: HashMap<task::TaskId, task::TaskHandle>,
threads: HashMap<thread::ThreadId, thread::JoinHandle<()>>,
}
impl fmt::Debug for Starling {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Starling {{ .. }}")
}
}
impl Starling {
/// Construct a Starling system from the given options.
pub fn new(opts: Options) -> Result<Starling> {
let tasks = HashMap::new();
let threads = HashMap::new();
let (sender, receiver) = mpsc::channel(opts.buffer_capacity_for::<StarlingMessage>());
let Options {
sync_io_pool_threads,
cpu_pool_threads,
..
} = opts;
let handle = StarlingHandle {
options: Arc::new(opts),
sync_io_pool: CpuPool::new(sync_io_pool_threads),
cpu_pool: CpuPool::new(cpu_pool_threads),
sender,
};
Ok(Starling {
handle,
receiver,
tasks,
threads,
})
}
/// Run the main Starling event loop with the specified options.
pub fn run(mut self) -> Result<()> {
let (main, thread) =
task::Task::spawn_main(self.handle.clone(), self.handle.options().main.clone())?;
self.tasks.insert(main.id(), main.clone());
self.threads.insert(thread.thread().id(), thread);
for msg in self.receiver.wait() {
let msg = msg.map_err(|_| Error::from_kind(ErrorKind::CouldNotReadValueFromChannel))?;
match msg {
StarlingMessage::TaskFinished(id) => {
assert!(self.tasks.remove(&id).is_some());
let thread_id = id.into();
let join_handle = self.threads
.remove(&thread_id)
.expect("should have a thread join handle for the finished task");
join_handle
.join()
.expect("should join finished task's thread OK");
if id == main.id() {
// TODO: notification of shutdown and joining other threads and things.
return Ok(());
}
}
StarlingMessage::TaskErrored(id, error) => {
assert!(self.tasks.remove(&id).is_some());
let thread_id = id.into();
let join_handle = self.threads
.remove(&thread_id)
.expect("should have a thread join handle for the errored task");
join_handle
.join()
.expect("should join errored task's thread OK");
if id == main.id() {
// TODO: notification of shutdown and joining other threads and things.
return Err(error);
}
}
StarlingMessage::NewTask(task, join_handle) => {
self.tasks.insert(task.id(), task);
self.threads.insert(join_handle.thread().id(), join_handle);
}
}
}
Ok(())
}
}
/// Messages that threads can send to the Starling supervisory thread.
///
/// This needs to be `pub` because it is used in a trait implementation; don't
/// actually use it!
#[derive(Debug)]
#[doc(hidden)]
pub enum StarlingMessage {
/// The task on the given thread completed successfully.
TaskFinished(task::TaskId),
/// The task on the given thread failed with the given error.
TaskErrored(task::TaskId, Error),
/// A new child task was created.
NewTask(task::TaskHandle, thread::JoinHandle<()>),
}
/// A handle to the Starling system.
///
/// A `StarlingHandle` is a capability to schedule IO on the event loop, spawn
/// work in one of the utility thread pools, and communicate with the Starling
/// supervisory thread. Handles can be cloned and sent across threads,
/// propagating these capabilities.
#[derive(Clone)]
pub(crate) struct StarlingHandle {
options: Arc<Options>,
sync_io_pool: CpuPool,
cpu_pool: CpuPool,
sender: mpsc::Sender<StarlingMessage>,
}
impl fmt::Debug for StarlingHandle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "StarlingHandle {{ .. }}")
}
}
impl StarlingHandle {
/// Get the `Options` that this Starling system was configured with.
pub fn options(&self) -> &Arc<Options> {
&self.options
}
/// Get a handle to the thread pool for adapting synchronous IO (perhaps
/// from a library that wasn't written to be async) into the system.
pub fn sync_io_pool(&self) -> &CpuPool {
&self.sync_io_pool
}
/// Get a handle to the thread pool for performing CPU-bound native Rust
/// tasks.
pub fn cpu_pool(&self) -> &CpuPool {
&self.cpu_pool
}
/// Send a message to the Starling supervisory thread.
pub fn send(&self, msg: StarlingMessage) -> futures::sink::Send<mpsc::Sender<StarlingMessage>> {
self.sender.clone().send(msg)
}
}
#[cfg(test)]
mod tests {
use super::*;
use task::{TaskHandle, TaskMessage};
fn assert_clone<T: Clone>() {}
fn assert_send<T: Send>() {}
#[test]
fn error_is_send() {
assert_send::<Error>();
}
#[test]
fn options_is_send_clone() {
assert_clone::<Options>();
assert_send::<Options>();
}
#[test]
fn starling_handle_is_send_clone() {
assert_clone::<StarlingHandle>();
assert_send::<StarlingHandle>();
}
#[test]
fn task_handle_is_send_clone() {
assert_clone::<TaskHandle>();
assert_send::<TaskHandle>();
}
#[test]
fn starling_message_is_send() | {
assert_send::<StarlingMessage>();
} | identifier_body |
|
lib.rs | _use]
extern crate lazy_static;
extern crate num_cpus;
#[macro_use]
extern crate state_machine_future;
extern crate tokio_core;
extern crate tokio_timer;
extern crate void;
#[macro_use]
pub mod js_native;
mod error;
mod future_ext;
pub mod gc_roots;
pub(crate) mod js_global;
pub mod promise_future_glue;
pub(crate) mod promise_tracker;
pub(crate) mod task;
pub use error::*;
use futures::{Sink, Stream};
use futures::sync::mpsc;
use futures_cpupool::CpuPool;
use std::cmp;
use std::collections::HashMap;
use std::fmt;
use std::mem;
use std::path;
use std::sync::Arc;
use std::thread;
/// Configuration options for building a Starling event loop.
///
/// ```
/// extern crate starling;
///
/// # fn foo() -> starling::Result<()> {
/// // Construct a new `Options` builder, providing the file containing
/// // the main JavaScript task.
/// starling::Options::new("path/to/main.js")
/// // Finish configuring the `Options` builder and run the event
/// // loop!
/// .run()?;
/// # Ok(())
/// # }
/// ```
#[derive(Clone, Debug)]
pub struct Options {
main: path::PathBuf,
sync_io_pool_threads: usize,
cpu_pool_threads: usize,
channel_buffer_size: usize,
}
const DEFAULT_SYNC_IO_POOL_THREADS: usize = 8;
const DEFAULT_CHANNEL_BUFFER_SIZE: usize = 4096;
impl Options {
/// Construct a new `Options` object for configuring the Starling event
/// loop.
///
/// The given `main` JavaScript file will be evaluated as the main task.
pub fn new<P>(main: P) -> Options
where
P: Into<path::PathBuf>,
{
Options {
main: main.into(),
sync_io_pool_threads: DEFAULT_SYNC_IO_POOL_THREADS,
cpu_pool_threads: num_cpus::get(),
channel_buffer_size: DEFAULT_CHANNEL_BUFFER_SIZE,
}
}
/// Configure the number of threads to reserve for the synchronous IO pool.
///
/// The synchronous IO pool is a collection of threads for adapting
/// synchronous IO libraries into the (otherwise completely asynchronous)
/// Starling system.
///
/// ### Panics
///
/// Panics if `threads` is 0.
pub fn sync_io_pool_threads(mut self, threads: usize) -> Self {
assert!(threads > 0);
self.sync_io_pool_threads = threads;
self
}
/// Configure the number of threads to reserve for the CPU pool.
///
/// The CPU pool is a collection of worker threads for CPU-bound native Rust
/// tasks.
///
/// Defaults to the number of logical CPUs on the machine.
///
/// ### Panics
///
/// Panics if `threads` is 0.
pub fn cpu_pool_threads(mut self, threads: usize) -> Self {
assert!(threads > 0);
self.cpu_pool_threads = threads;
self
}
/// Configure the size of mpsc buffers in the system.
///
/// ### Panics
///
/// Panics if `size` is 0.
pub fn channel_buffer_size(mut self, size: usize) -> Self {
assert!(size > 0);
self.channel_buffer_size = size;
self
}
/// Finish this `Options` builder and run the Starling event loop with its
/// specified configuration.
pub fn run(self) -> Result<()> {
Starling::new(self)?.run()
}
}
impl Options {
// Get the number of `T`s that should be buffered in an mpsc channel for the
// current configuration.
fn buffer_capacity_for<T>(&self) -> usize {
let size_of_t = cmp::max(1, mem::size_of::<T>());
let capacity = self.channel_buffer_size / size_of_t;
cmp::max(1, capacity)
}
}
/// The Starling supervisory thread.
///
/// The supervisory thread doesn't do much other than supervise other threads: the IO
/// event loop thread, various utility thread pools, and JavaScript task
/// threads. Its primary responsibility is ensuring clean system shutdown and
/// joining thread handles.
pub(crate) struct Starling {
handle: StarlingHandle,
receiver: mpsc::Receiver<StarlingMessage>,
// Currently there is a 1:1 mapping between JS tasks and native
// threads. That is expected to change in the future, hence the
// distinction between `self.tasks` and `self.threads`.
tasks: HashMap<task::TaskId, task::TaskHandle>,
threads: HashMap<thread::ThreadId, thread::JoinHandle<()>>,
}
impl fmt::Debug for Starling {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Starling {{ .. }}")
}
}
impl Starling {
/// Construct a Starling system from the given options.
pub fn new(opts: Options) -> Result<Starling> {
let tasks = HashMap::new();
let threads = HashMap::new();
let (sender, receiver) = mpsc::channel(opts.buffer_capacity_for::<StarlingMessage>());
let Options {
sync_io_pool_threads,
cpu_pool_threads,
..
} = opts;
let handle = StarlingHandle { | cpu_pool: CpuPool::new(cpu_pool_threads),
sender,
};
Ok(Starling {
handle,
receiver,
tasks,
threads,
})
}
/// Run the main Starling event loop with the specified options.
pub fn run(mut self) -> Result<()> {
let (main, thread) =
task::Task::spawn_main(self.handle.clone(), self.handle.options().main.clone())?;
self.tasks.insert(main.id(), main.clone());
self.threads.insert(thread.thread().id(), thread);
for msg in self.receiver.wait() {
let msg = msg.map_err(|_| Error::from_kind(ErrorKind::CouldNotReadValueFromChannel))?;
match msg {
StarlingMessage::TaskFinished(id) => {
assert!(self.tasks.remove(&id).is_some());
let thread_id = id.into();
let join_handle = self.threads
.remove(&thread_id)
.expect("should have a thread join handle for the finished task");
join_handle
.join()
.expect("should join finished task's thread OK");
if id == main.id() {
// TODO: notification of shutdown and joining other threads and things.
return Ok(());
}
}
StarlingMessage::TaskErrored(id, error) => {
assert!(self.tasks.remove(&id).is_some());
let thread_id = id.into();
let join_handle = self.threads
.remove(&thread_id)
.expect("should have a thread join handle for the errored task");
join_handle
.join()
.expect("should join errored task's thread OK");
if id == main.id() {
// TODO: notification of shutdown and joining other threads and things.
return Err(error);
}
}
StarlingMessage::NewTask(task, join_handle) => {
self.tasks.insert(task.id(), task);
self.threads.insert(join_handle.thread().id(), join_handle);
}
}
}
Ok(())
}
}
/// Messages that threads can send to the Starling supervisory thread.
///
/// This needs to be `pub` because it is used in a trait implementation; don't
/// actually use it!
#[derive(Debug)]
#[doc(hidden)]
pub enum StarlingMessage {
/// The task on the given thread completed successfully.
TaskFinished(task::TaskId),
/// The task on the given thread failed with the given error.
TaskErrored(task::TaskId, Error),
/// A new child task was created.
NewTask(task::TaskHandle, thread::JoinHandle<()>),
}
/// A handle to the Starling system.
///
/// A `StarlingHandle` is a capability to schedule IO on the event loop, spawn
/// work in one of the utility thread pools, and communicate with the Starling
/// supervisory thread. Handles can be cloned and sent across threads,
/// propagating these capabilities.
#[derive(Clone)]
pub(crate) struct StarlingHandle {
options: Arc<Options>,
sync_io_pool: CpuPool,
cpu_pool: CpuPool,
sender: mpsc::Sender<StarlingMessage>,
}
impl fmt::Debug for StarlingHandle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "StarlingHandle {{ .. }}")
}
}
impl StarlingHandle {
/// Get the `Options` that this Starling system was configured with.
pub fn options(&self) -> &Arc<Options> {
&self.options
}
/// Get a handle to the thread pool for adapting synchronous IO (perhaps
/// from a library that wasn't written to be async) into the system.
pub fn sync_io_pool(&self) -> &CpuPool {
&self.sync_io_pool
}
/// Get a handle to the thread pool for performing CPU-bound native Rust
/// tasks.
pub fn cpu_pool(&self) -> &CpuPool {
&self.cpu_pool
}
/// Send a message | options: Arc::new(opts),
sync_io_pool: CpuPool::new(sync_io_pool_threads), | random_line_split |
lib.rs | _use]
extern crate lazy_static;
extern crate num_cpus;
#[macro_use]
extern crate state_machine_future;
extern crate tokio_core;
extern crate tokio_timer;
extern crate void;
#[macro_use]
pub mod js_native;
mod error;
mod future_ext;
pub mod gc_roots;
pub(crate) mod js_global;
pub mod promise_future_glue;
pub(crate) mod promise_tracker;
pub(crate) mod task;
pub use error::*;
use futures::{Sink, Stream};
use futures::sync::mpsc;
use futures_cpupool::CpuPool;
use std::cmp;
use std::collections::HashMap;
use std::fmt;
use std::mem;
use std::path;
use std::sync::Arc;
use std::thread;
/// Configuration options for building a Starling event loop.
///
/// ```
/// extern crate starling;
///
/// # fn foo() -> starling::Result<()> {
/// // Construct a new `Options` builder, providing the file containing
/// // the main JavaScript task.
/// starling::Options::new("path/to/main.js")
/// // Finish configuring the `Options` builder and run the event
/// // loop!
/// .run()?;
/// # Ok(())
/// # }
/// ```
#[derive(Clone, Debug)]
pub struct Options {
main: path::PathBuf,
sync_io_pool_threads: usize,
cpu_pool_threads: usize,
channel_buffer_size: usize,
}
const DEFAULT_SYNC_IO_POOL_THREADS: usize = 8;
const DEFAULT_CHANNEL_BUFFER_SIZE: usize = 4096;
impl Options {
/// Construct a new `Options` object for configuring the Starling event
/// loop.
///
/// The given `main` JavaScript file will be evaluated as the main task.
pub fn new<P>(main: P) -> Options
where
P: Into<path::PathBuf>,
{
Options {
main: main.into(),
sync_io_pool_threads: DEFAULT_SYNC_IO_POOL_THREADS,
cpu_pool_threads: num_cpus::get(),
channel_buffer_size: DEFAULT_CHANNEL_BUFFER_SIZE,
}
}
/// Configure the number of threads to reserve for the synchronous IO pool.
///
/// The synchronous IO pool is a collection of threads for adapting
/// synchronous IO libraries into the (otherwise completely asynchronous)
/// Starling system.
///
/// ### Panics
///
/// Panics if `threads` is 0.
pub fn sync_io_pool_threads(mut self, threads: usize) -> Self {
assert!(threads > 0);
self.sync_io_pool_threads = threads;
self
}
/// Configure the number of threads to reserve for the CPU pool.
///
/// The CPU pool is a collection of worker threads for CPU-bound native Rust
/// tasks.
///
/// Defaults to the number of logical CPUs on the machine.
///
/// ### Panics
///
/// Panics if `threads` is 0.
pub fn cpu_pool_threads(mut self, threads: usize) -> Self {
assert!(threads > 0);
self.cpu_pool_threads = threads;
self
}
/// Configure the size of mpsc buffers in the system.
///
/// ### Panics
///
/// Panics if `size` is 0.
pub fn channel_buffer_size(mut self, size: usize) -> Self {
assert!(size > 0);
self.channel_buffer_size = size;
self
}
/// Finish this `Options` builder and run the Starling event loop with its
/// specified configuration.
pub fn run(self) -> Result<()> {
Starling::new(self)?.run()
}
}
impl Options {
// Get the number of `T`s that should be buffered in an mpsc channel for the
// current configuration.
fn | <T>(&self) -> usize {
let size_of_t = cmp::max(1, mem::size_of::<T>());
let capacity = self.channel_buffer_size / size_of_t;
cmp::max(1, capacity)
}
}
/// The Starling supervisory thread.
///
/// The supervisory thread doesn't do much other than supervise other threads: the IO
/// event loop thread, various utility thread pools, and JavaScript task
/// threads. Its primary responsibility is ensuring clean system shutdown and
/// joining thread handles.
pub(crate) struct Starling {
handle: StarlingHandle,
receiver: mpsc::Receiver<StarlingMessage>,
// Currently there is a 1:1 mapping between JS tasks and native
// threads. That is expected to change in the future, hence the
// distinction between `self.tasks` and `self.threads`.
tasks: HashMap<task::TaskId, task::TaskHandle>,
threads: HashMap<thread::ThreadId, thread::JoinHandle<()>>,
}
impl fmt::Debug for Starling {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Starling {{ .. }}")
}
}
impl Starling {
/// Construct a Starling system from the given options.
pub fn new(opts: Options) -> Result<Starling> {
let tasks = HashMap::new();
let threads = HashMap::new();
let (sender, receiver) = mpsc::channel(opts.buffer_capacity_for::<StarlingMessage>());
let Options {
sync_io_pool_threads,
cpu_pool_threads,
..
} = opts;
let handle = StarlingHandle {
options: Arc::new(opts),
sync_io_pool: CpuPool::new(sync_io_pool_threads),
cpu_pool: CpuPool::new(cpu_pool_threads),
sender,
};
Ok(Starling {
handle,
receiver,
tasks,
threads,
})
}
/// Run the main Starling event loop with the specified options.
pub fn run(mut self) -> Result<()> {
let (main, thread) =
task::Task::spawn_main(self.handle.clone(), self.handle.options().main.clone())?;
self.tasks.insert(main.id(), main.clone());
self.threads.insert(thread.thread().id(), thread);
for msg in self.receiver.wait() {
let msg = msg.map_err(|_| Error::from_kind(ErrorKind::CouldNotReadValueFromChannel))?;
match msg {
StarlingMessage::TaskFinished(id) => {
assert!(self.tasks.remove(&id).is_some());
let thread_id = id.into();
let join_handle = self.threads
.remove(&thread_id)
.expect("should have a thread join handle for the finished task");
join_handle
.join()
.expect("should join finished task's thread OK");
if id == main.id() {
// TODO: notification of shutdown and joining other threads and things.
return Ok(());
}
}
StarlingMessage::TaskErrored(id, error) => {
assert!(self.tasks.remove(&id).is_some());
let thread_id = id.into();
let join_handle = self.threads
.remove(&thread_id)
.expect("should have a thread join handle for the errored task");
join_handle
.join()
.expect("should join errored task's thread OK");
if id == main.id() {
// TODO: notification of shutdown and joining other threads and things.
return Err(error);
}
}
StarlingMessage::NewTask(task, join_handle) => {
self.tasks.insert(task.id(), task);
self.threads.insert(join_handle.thread().id(), join_handle);
}
}
}
Ok(())
}
}
/// Messages that threads can send to the Starling supervisory thread.
///
/// This needs to be `pub` because it is used in a trait implementation; don't
/// actually use it!
#[derive(Debug)]
#[doc(hidden)]
pub enum StarlingMessage {
/// The task on the given thread completed successfully.
TaskFinished(task::TaskId),
/// The task on the given thread failed with the given error.
TaskErrored(task::TaskId, Error),
/// A new child task was created.
NewTask(task::TaskHandle, thread::JoinHandle<()>),
}
/// A handle to the Starling system.
///
/// A `StarlingHandle` is a capability to schedule IO on the event loop, spawn
/// work in one of the utility thread pools, and communicate with the Starling
/// supervisory thread. Handles can be cloned and sent across threads,
/// propagating these capabilities.
#[derive(Clone)]
pub(crate) struct StarlingHandle {
options: Arc<Options>,
sync_io_pool: CpuPool,
cpu_pool: CpuPool,
sender: mpsc::Sender<StarlingMessage>,
}
impl fmt::Debug for StarlingHandle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "StarlingHandle {{ .. }}")
}
}
impl StarlingHandle {
/// Get the `Options` that this Starling system was configured with.
pub fn options(&self) -> &Arc<Options> {
&self.options
}
/// Get a handle to the thread pool for adapting synchronous IO (perhaps
/// from a library that wasn't written to be async) into the system.
pub fn sync_io_pool(&self) -> &CpuPool {
&self.sync_io_pool
}
/// Get a handle to the thread pool for performing CPU-bound native Rust
/// tasks.
pub fn cpu_pool(&self) -> &CpuPool {
&self.cpu_pool
}
/// Send a | buffer_capacity_for | identifier_name |
random_state.rs | fn with_fixed_keys() -> RandomState {
let [k0, k1, k2, k3] = get_fixed_seeds()[0];
RandomState { k0, k1, k2, k3 }
}
/// Build a `RandomState` from a single key. The provided key does not need to be of high quality,
/// but all `RandomState`s created from the same key will produce identical hashers.
/// (In contrast to `generate_with` above)
///
/// This allows for explicitly setting the seed to be used.
///
/// Note: This method does not require the provided seed to be strong.
#[inline]
pub fn with_seed(key: usize) -> RandomState {
let fixed = get_fixed_seeds();
RandomState::from_keys(&fixed[0], &fixed[1], key)
}
/// Allows for explicitly setting the seeds to used.
/// All `RandomState`s created with the same set of keys key will produce identical hashers.
/// (In contrast to `generate_with` above)
///
/// Note: If DOS resistance is desired one of these should be a decent quality random number.
/// If 4 high quality random number are not cheaply available this method is robust against 0s being passed for
/// one or more of the parameters or the same value being passed for more than one parameter.
/// It is recommended to pass numbers in order from highest to lowest quality (if there is any difference).
#[inline]
pub const fn with_seeds(k0: u64, k1: u64, k2: u64, k3: u64) -> RandomState {
RandomState {
k0: k0 ^ PI2[0],
k1: k1 ^ PI2[1],
k2: k2 ^ PI2[2],
k3: k3 ^ PI2[3],
}
}
/// Calculates the hash of a single value. This provides a more convenient (and faster) way to obtain a hash:
/// For example:
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use std::hash::BuildHasher;
use ahash::RandomState;
let hash_builder = RandomState::new();
let hash = hash_builder.hash_one("Some Data");
```
"##
)]
/// This is similar to:
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use std::hash::{BuildHasher, Hash, Hasher};
use ahash::RandomState;
let hash_builder = RandomState::new();
let mut hasher = hash_builder.build_hasher();
"Some Data".hash(&mut hasher);
let hash = hasher.finish();
```
"##
)]
/// (Note that these two ways to get a hash may not produce the same value for the same data)
///
/// This is intended as a convenience for code which *consumes* hashes, such
/// as the implementation of a hash table or in unit tests that check
/// whether a custom [`Hash`] implementation behaves as expected.
///
/// This must not be used in any code which *creates* hashes, such as in an
/// implementation of [`Hash`]. The way to create a combined hash of
/// multiple values is to call [`Hash::hash`] multiple times using the same
/// [`Hasher`], not to call this method repeatedly and combine the results.
#[inline]
pub fn hash_one<T: Hash>(&self, x: T) -> u64
where
Self: Sized,
{
use crate::specialize::CallHasher;
T::get_hash(&x, self)
}
}
/// Creates an instance of RandomState using keys obtained from the random number generator.
/// Each instance created in this way will have a unique set of keys. (But the resulting instance
/// can be used to create many hashers each or which will have the same keys.)
///
/// This is the same as [RandomState::new()]
///
/// NOTE: For safety this trait impl is only available available if either of the flags `runtime-rng` (on by default) or
/// `compile-time-rng` are enabled. This is to prevent weakly keyed maps from being accidentally created. Instead one of
/// constructors for [RandomState] must be used.
#[cfg(any(feature = "compile-time-rng", feature = "runtime-rng", feature = "no-rng"))]
impl Default for RandomState {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl BuildHasher for RandomState {
type Hasher = AHasher;
/// Constructs a new [AHasher] with keys based on this [RandomState] object.
/// This means that two different [RandomState]s will will generate
/// [AHasher]s that will return different hashcodes, but [Hasher]s created from the same [BuildHasher]
/// will generate the same hashes for the same input data.
///
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use ahash::{AHasher, RandomState};
use std::hash::{Hasher, BuildHasher};
let build_hasher = RandomState::new();
let mut hasher_1 = build_hasher.build_hasher();
let mut hasher_2 = build_hasher.build_hasher();
hasher_1.write_u32(1234);
hasher_2.write_u32(1234);
assert_eq!(hasher_1.finish(), hasher_2.finish());
let other_build_hasher = RandomState::new();
let mut different_hasher = other_build_hasher.build_hasher();
different_hasher.write_u32(1234);
assert_ne!(different_hasher.finish(), hasher_1.finish());
```
"##
)]
/// [Hasher]: std::hash::Hasher
/// [BuildHasher]: std::hash::BuildHasher
/// [HashMap]: std::collections::HashMap
#[inline]
fn build_hasher(&self) -> AHasher {
AHasher::from_random_state(self)
}
/// Calculates the hash of a single value. This provides a more convenient (and faster) way to obtain a hash:
/// For example:
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use std::hash::BuildHasher;
use ahash::RandomState;
let hash_builder = RandomState::new();
let hash = hash_builder.hash_one("Some Data");
```
"##
)]
/// This is similar to:
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use std::hash::{BuildHasher, Hash, Hasher};
use ahash::RandomState;
let hash_builder = RandomState::new();
let mut hasher = hash_builder.build_hasher();
"Some Data".hash(&mut hasher);
let hash = hasher.finish();
```
"##
)]
/// (Note that these two ways to get a hash may not produce the same value for the same data)
///
/// This is intended as a convenience for code which *consumes* hashes, such
/// as the implementation of a hash table or in unit tests that check
/// whether a custom [`Hash`] implementation behaves as expected.
///
/// This must not be used in any code which *creates* hashes, such as in an
/// implementation of [`Hash`]. The way to create a combined hash of
/// multiple values is to call [`Hash::hash`] multiple times using the same
/// [`Hasher`], not to call this method repeatedly and combine the results.
#[cfg(feature = "specialize")]
#[inline]
fn hash_one<T: Hash>(&self, x: T) -> u64 {
RandomState::hash_one(self, x)
}
}
#[cfg(feature = "specialize")]
impl BuildHasherExt for RandomState {
#[inline]
fn hash_as_u64<T: Hash + ?Sized>(&self, value: &T) -> u64 {
let mut hasher = AHasherU64 {
buffer: self.k0,
pad: self.k1,
};
value.hash(&mut hasher);
hasher.finish()
}
#[inline]
fn hash_as_fixed_length<T: Hash + ?Sized>(&self, value: &T) -> u64 {
let mut hasher = AHasherFixed(self.build_hasher());
value.hash(&mut hasher);
hasher.finish()
}
#[inline]
fn hash_as_str<T: Hash + ?Sized>(&self, value: &T) -> u64 {
let mut hasher = AHasherStr(self.build_hasher());
value.hash(&mut hasher);
hasher.finish()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_unique() { | let a = RandomState::generate_with(1, 2, 3, 4);
let b = RandomState::generate_with(1, 2, 3, 4);
assert_ne!(a.build_hasher().finish(), b.build_hasher().finish());
}
| random_line_split |
|
random_state.rs | the same key will produce identical hashers.
/// (In contrast to `generate_with` above)
///
/// This allows for explicitly setting the seed to be used.
///
/// Note: This method does not require the provided seed to be strong.
#[inline]
pub fn with_seed(key: usize) -> RandomState {
let fixed = get_fixed_seeds();
RandomState::from_keys(&fixed[0], &fixed[1], key)
}
/// Allows for explicitly setting the seeds to used.
/// All `RandomState`s created with the same set of keys key will produce identical hashers.
/// (In contrast to `generate_with` above)
///
/// Note: If DOS resistance is desired one of these should be a decent quality random number.
/// If 4 high quality random number are not cheaply available this method is robust against 0s being passed for
/// one or more of the parameters or the same value being passed for more than one parameter.
/// It is recommended to pass numbers in order from highest to lowest quality (if there is any difference).
#[inline]
pub const fn with_seeds(k0: u64, k1: u64, k2: u64, k3: u64) -> RandomState {
RandomState {
k0: k0 ^ PI2[0],
k1: k1 ^ PI2[1],
k2: k2 ^ PI2[2],
k3: k3 ^ PI2[3],
}
}
/// Calculates the hash of a single value. This provides a more convenient (and faster) way to obtain a hash:
/// For example:
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use std::hash::BuildHasher;
use ahash::RandomState;
let hash_builder = RandomState::new();
let hash = hash_builder.hash_one("Some Data");
```
"##
)]
/// This is similar to:
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use std::hash::{BuildHasher, Hash, Hasher};
use ahash::RandomState;
let hash_builder = RandomState::new();
let mut hasher = hash_builder.build_hasher();
"Some Data".hash(&mut hasher);
let hash = hasher.finish();
```
"##
)]
/// (Note that these two ways to get a hash may not produce the same value for the same data)
///
/// This is intended as a convenience for code which *consumes* hashes, such
/// as the implementation of a hash table or in unit tests that check
/// whether a custom [`Hash`] implementation behaves as expected.
///
/// This must not be used in any code which *creates* hashes, such as in an
/// implementation of [`Hash`]. The way to create a combined hash of
/// multiple values is to call [`Hash::hash`] multiple times using the same
/// [`Hasher`], not to call this method repeatedly and combine the results.
#[inline]
pub fn hash_one<T: Hash>(&self, x: T) -> u64
where
Self: Sized,
{
use crate::specialize::CallHasher;
T::get_hash(&x, self)
}
}
/// Creates an instance of RandomState using keys obtained from the random number generator.
/// Each instance created in this way will have a unique set of keys. (But the resulting instance
/// can be used to create many hashers each or which will have the same keys.)
///
/// This is the same as [RandomState::new()]
///
/// NOTE: For safety this trait impl is only available available if either of the flags `runtime-rng` (on by default) or
/// `compile-time-rng` are enabled. This is to prevent weakly keyed maps from being accidentally created. Instead one of
/// constructors for [RandomState] must be used.
#[cfg(any(feature = "compile-time-rng", feature = "runtime-rng", feature = "no-rng"))]
impl Default for RandomState {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl BuildHasher for RandomState {
type Hasher = AHasher;
/// Constructs a new [AHasher] with keys based on this [RandomState] object.
/// This means that two different [RandomState]s will will generate
/// [AHasher]s that will return different hashcodes, but [Hasher]s created from the same [BuildHasher]
/// will generate the same hashes for the same input data.
///
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use ahash::{AHasher, RandomState};
use std::hash::{Hasher, BuildHasher};
let build_hasher = RandomState::new();
let mut hasher_1 = build_hasher.build_hasher();
let mut hasher_2 = build_hasher.build_hasher();
hasher_1.write_u32(1234);
hasher_2.write_u32(1234);
assert_eq!(hasher_1.finish(), hasher_2.finish());
let other_build_hasher = RandomState::new();
let mut different_hasher = other_build_hasher.build_hasher();
different_hasher.write_u32(1234);
assert_ne!(different_hasher.finish(), hasher_1.finish());
```
"##
)]
/// [Hasher]: std::hash::Hasher
/// [BuildHasher]: std::hash::BuildHasher
/// [HashMap]: std::collections::HashMap
#[inline]
fn build_hasher(&self) -> AHasher {
AHasher::from_random_state(self)
}
/// Calculates the hash of a single value. This provides a more convenient (and faster) way to obtain a hash:
/// For example:
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use std::hash::BuildHasher;
use ahash::RandomState;
let hash_builder = RandomState::new();
let hash = hash_builder.hash_one("Some Data");
```
"##
)]
/// This is similar to:
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use std::hash::{BuildHasher, Hash, Hasher};
use ahash::RandomState;
let hash_builder = RandomState::new();
let mut hasher = hash_builder.build_hasher();
"Some Data".hash(&mut hasher);
let hash = hasher.finish();
```
"##
)]
/// (Note that these two ways to get a hash may not produce the same value for the same data)
///
/// This is intended as a convenience for code which *consumes* hashes, such
/// as the implementation of a hash table or in unit tests that check
/// whether a custom [`Hash`] implementation behaves as expected.
///
/// This must not be used in any code which *creates* hashes, such as in an
/// implementation of [`Hash`]. The way to create a combined hash of
/// multiple values is to call [`Hash::hash`] multiple times using the same
/// [`Hasher`], not to call this method repeatedly and combine the results.
#[cfg(feature = "specialize")]
#[inline]
fn hash_one<T: Hash>(&self, x: T) -> u64 {
RandomState::hash_one(self, x)
}
}
#[cfg(feature = "specialize")]
impl BuildHasherExt for RandomState {
#[inline]
fn hash_as_u64<T: Hash + ?Sized>(&self, value: &T) -> u64 {
let mut hasher = AHasherU64 {
buffer: self.k0,
pad: self.k1,
};
value.hash(&mut hasher);
hasher.finish()
}
#[inline]
fn hash_as_fixed_length<T: Hash + ?Sized>(&self, value: &T) -> u64 {
let mut hasher = AHasherFixed(self.build_hasher());
value.hash(&mut hasher);
hasher.finish()
}
#[inline]
fn hash_as_str<T: Hash + ?Sized>(&self, value: &T) -> u64 {
let mut hasher = AHasherStr(self.build_hasher());
value.hash(&mut hasher);
hasher.finish()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_unique() {
let a = RandomState::generate_with(1, 2, 3, 4);
let b = RandomState::generate_with(1, 2, 3, 4);
assert_ne!(a.build_hasher().finish(), b.build_hasher().finish());
}
#[cfg(all(feature = "runtime-rng", not(all(feature = "compile-time-rng", test))))]
#[test]
fn test_not_pi() {
assert_ne!(PI, get_fixed_seeds()[0]);
}
#[cfg(all(feature = "compile-time-rng", any(not(feature = "runtime-rng"), test)))]
#[test]
fn | test_not_pi_const | identifier_name |
|
random_state.rs | u64, k1: u64, k2: u64, k3: u64) -> RandomState {
let src = get_src();
let fixed = get_fixed_seeds();
RandomState::from_keys(&fixed[0], &[k0, k1, k2, k3], src.gen_hasher_seed())
}
fn from_keys(a: &[u64; 4], b: &[u64; 4], c: usize) -> RandomState {
let &[k0, k1, k2, k3] = a;
let mut hasher = AHasher::from_random_state(&RandomState { k0, k1, k2, k3 });
hasher.write_usize(c);
let mix = |l: u64, r: u64| {
let mut h = hasher.clone();
h.write_u64(l);
h.write_u64(r);
h.finish()
};
RandomState {
k0: mix(b[0], b[2]),
k1: mix(b[1], b[3]),
k2: mix(b[2], b[1]),
k3: mix(b[3], b[0]),
}
}
/// Internal. Used by Default.
#[inline]
pub(crate) fn with_fixed_keys() -> RandomState {
let [k0, k1, k2, k3] = get_fixed_seeds()[0];
RandomState { k0, k1, k2, k3 }
}
/// Build a `RandomState` from a single key. The provided key does not need to be of high quality,
/// but all `RandomState`s created from the same key will produce identical hashers.
/// (In contrast to `generate_with` above)
///
/// This allows for explicitly setting the seed to be used.
///
/// Note: This method does not require the provided seed to be strong.
#[inline]
pub fn with_seed(key: usize) -> RandomState {
let fixed = get_fixed_seeds();
RandomState::from_keys(&fixed[0], &fixed[1], key)
}
/// Allows for explicitly setting the seeds to used.
/// All `RandomState`s created with the same set of keys key will produce identical hashers.
/// (In contrast to `generate_with` above)
///
/// Note: If DOS resistance is desired one of these should be a decent quality random number.
/// If 4 high quality random number are not cheaply available this method is robust against 0s being passed for
/// one or more of the parameters or the same value being passed for more than one parameter.
/// It is recommended to pass numbers in order from highest to lowest quality (if there is any difference).
#[inline]
pub const fn with_seeds(k0: u64, k1: u64, k2: u64, k3: u64) -> RandomState {
RandomState {
k0: k0 ^ PI2[0],
k1: k1 ^ PI2[1],
k2: k2 ^ PI2[2],
k3: k3 ^ PI2[3],
}
}
/// Calculates the hash of a single value. This provides a more convenient (and faster) way to obtain a hash:
/// For example:
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use std::hash::BuildHasher;
use ahash::RandomState;
let hash_builder = RandomState::new();
let hash = hash_builder.hash_one("Some Data");
```
"##
)]
/// This is similar to:
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use std::hash::{BuildHasher, Hash, Hasher};
use ahash::RandomState;
let hash_builder = RandomState::new();
let mut hasher = hash_builder.build_hasher();
"Some Data".hash(&mut hasher);
let hash = hasher.finish();
```
"##
)]
/// (Note that these two ways to get a hash may not produce the same value for the same data)
///
/// This is intended as a convenience for code which *consumes* hashes, such
/// as the implementation of a hash table or in unit tests that check
/// whether a custom [`Hash`] implementation behaves as expected.
///
/// This must not be used in any code which *creates* hashes, such as in an
/// implementation of [`Hash`]. The way to create a combined hash of
/// multiple values is to call [`Hash::hash`] multiple times using the same
/// [`Hasher`], not to call this method repeatedly and combine the results.
#[inline]
pub fn hash_one<T: Hash>(&self, x: T) -> u64
where
Self: Sized,
{
use crate::specialize::CallHasher;
T::get_hash(&x, self)
}
}
/// Creates an instance of RandomState using keys obtained from the random number generator.
/// Each instance created in this way will have a unique set of keys. (But the resulting instance
/// can be used to create many hashers each or which will have the same keys.)
///
/// This is the same as [RandomState::new()]
///
/// NOTE: For safety this trait impl is only available available if either of the flags `runtime-rng` (on by default) or
/// `compile-time-rng` are enabled. This is to prevent weakly keyed maps from being accidentally created. Instead one of
/// constructors for [RandomState] must be used.
#[cfg(any(feature = "compile-time-rng", feature = "runtime-rng", feature = "no-rng"))]
impl Default for RandomState {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl BuildHasher for RandomState {
type Hasher = AHasher;
/// Constructs a new [AHasher] with keys based on this [RandomState] object.
/// This means that two different [RandomState]s will will generate
/// [AHasher]s that will return different hashcodes, but [Hasher]s created from the same [BuildHasher]
/// will generate the same hashes for the same input data.
///
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use ahash::{AHasher, RandomState};
use std::hash::{Hasher, BuildHasher};
let build_hasher = RandomState::new();
let mut hasher_1 = build_hasher.build_hasher();
let mut hasher_2 = build_hasher.build_hasher();
hasher_1.write_u32(1234);
hasher_2.write_u32(1234);
assert_eq!(hasher_1.finish(), hasher_2.finish());
let other_build_hasher = RandomState::new();
let mut different_hasher = other_build_hasher.build_hasher();
different_hasher.write_u32(1234);
assert_ne!(different_hasher.finish(), hasher_1.finish());
```
"##
)]
/// [Hasher]: std::hash::Hasher
/// [BuildHasher]: std::hash::BuildHasher
/// [HashMap]: std::collections::HashMap
#[inline]
fn build_hasher(&self) -> AHasher {
AHasher::from_random_state(self)
}
/// Calculates the hash of a single value. This provides a more convenient (and faster) way to obtain a hash:
/// For example:
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use std::hash::BuildHasher;
use ahash::RandomState;
let hash_builder = RandomState::new();
let hash = hash_builder.hash_one("Some Data");
```
"##
)]
/// This is similar to:
#[cfg_attr(
feature = "std",
doc = r##" # Examples
```
use std::hash::{BuildHasher, Hash, Hasher};
use ahash::RandomState;
let hash_builder = RandomState::new();
let mut hasher = hash_builder.build_hasher();
"Some Data".hash(&mut hasher);
let hash = hasher.finish();
```
"##
)]
/// (Note that these two ways to get a hash may not produce the same value for the same data)
///
/// This is intended as a convenience for code which *consumes* hashes, such
/// as the implementation of a hash table or in unit tests that check
/// whether a custom [`Hash`] implementation behaves as expected.
///
/// This must not be used in any code which *creates* hashes, such as in an
/// implementation of [`Hash`]. The way to create a combined hash of
/// multiple values is to call [`Hash::hash`] multiple times using the same
/// [`Hasher`], not to call this method repeatedly and combine the results.
#[cfg(feature = "specialize")]
#[inline]
fn hash_one<T: Hash>(&self, x: T) -> u64 | {
RandomState::hash_one(self, x)
} | identifier_body |
|
Pretty.ts | .sessionId, PASS);
this._log.push('✓ ' + test.id);
}
}
@eventHandler()
tunnelDownloadProgress(message: TunnelMessage) {
const progress = message.progress;
this.tunnelState = 'Downloading ' + (progress.received / progress.total * 100).toFixed(2) + '%';
}
@eventHandler()
tunnelStatus(message: TunnelMessage) {
this.tunnelState = message.status;
}
@eventHandler()
error(error: Error) {
const message = '! ' + error.message;
this._log.push(message + '\n' + this.formatter.format(error));
// stop the render timeout on a fatal error so Intern can exit
clearTimeout(this._renderTimeout);
}
@eventHandler()
deprecated(message: DeprecationMessage) {
let text = '⚠ ' + message.original + ' is deprecated.';
if (message.replacement) {
text += ' Use ' + message.replacement + ' instead.';
}
if (message.message) {
text += ' ' + message.message;
}
this._log.push(text);
}
/**
* Return the reporter for a given session, creating it if necessary.
*/
private _getReporter(suite: Suite): Report {
if (!this._reports[suite.sessionId]) {
this._reports[suite.sessionId] = new Report(suite.remote && suite.remote.environmentType.toString());
}
return this._reports[suite.sessionId];
}
/**
* Create the charm instance used by this reporter.
*/
private _newCharm(): charm.Charm {
const c = charm();
c.pipe(process.stdout);
return c;
}
private _record(sessionId: string, result: number) {
const reporter = this._reports[sessionId];
reporter && reporter.record(result);
this._total.record(result);
}
/**
* Render the progress bar
* [✔︎~✔︎×✔︎✔︎✔︎✔︎✔︎✔︎] 99/100
* @param report the report data to render
* @param width the maximum width for the entire progress bar
*/
private _drawProgressBar(report: Report, width: number) {
const spinnerCharacter = SPINNER_STATES[this._spinnerOffset];
const charm = this._charm;
if (!report.numTotal) {
charm.write('Pending');
return;
}
const totalTextSize = String(report.numTotal).length;
const remainingWidth = Math.max(width - 4 - (totalTextSize * 2), 1);
const barSize = Math.min(remainingWidth, report.numTotal, this.maxProgressBarWidth);
const results = report.getCompressedResults(barSize);
charm.write('[' + results.map(value => this._getColor(value)).join(''));
charm.display('reset').write(fit(spinnerCharacter, barSize - results.length) + '] ' +
fit(report.finished, totalTextSize, true) + '/' + report.numTotal);
}
/**
* Render a single line
* TITLE: [✔︎~✔︎×✔︎✔︎✔︎✔︎✔︎✔︎] 100/100, 2 fail, 1 skip
* TODO split this into two lines. The first line will display the
* title, OS and code coverage and the progress bar on the second
*/
private _drawSessionReport(report: Report) {
const charm = this._charm;
const titleWidth = this.titleWidth;
const leftOfBar = fit(this._abbreviateEnvironment(report.environment).slice(0, titleWidth - 2) + ': ',
titleWidth);
const rightOfBar = '' +
(report.numFailed ? ', ' + report.numFailed + ' fail' : '') +
(report.numSkipped ? ', ' + report.numSkipped + ' skip' : '');
const barWidth = this.dimensions.width - rightOfBar.length - titleWidth;
charm.write(leftOfBar);
this._drawProgressBar(report, barWidth);
charm.write(rightOfBar + '\n');
}
/**
* Abbreviate the environment information for rendering
* @param env the test environment
* @returns {string} abbreviated environment information
*/
private _abbreviateEnvironment(env: any): string {
const browser = (<{ [key: string]: any }>BROWSERS)[env.browserName.toLowerCase()] || env.browserName.slice(0, 4);
const result = [browser];
if (env.version) {
let version = String(env.version);
if (version.indexOf('.') > -1) {
version = version.slice(0, version.indexOf('.'));
}
result.push(version);
}
if (env.platform) {
result.push(env.platform.slice(0, 3));
}
return result.join(' ');
}
private _render(omitLogs: boolean = false) {
const charm = this._charm;
const numReporters = Object.keys(this._reports).length;
const logLength = this.dimensions.height - numReporters - 4 /* last line & total */ -
(this.tunnelState ? 2 : 0) - (numReporters ? 1 : 0) - (this._header ? 1 : 0);
this._spinnerOffset = (++this._spinnerOffset) % SPINNER_STATES.length;
charm.display('reset');
if (this._header) {
charm.display('bright');
charm.write(this._header + '\n');
charm.display('reset');
}
this.tunnelState && charm.write('Tunnel: ' + this.tunnelState + '\n\n');
this._drawTotalReporter(this._total);
// TODO if there is not room to render all reporters only render
// active ones or only the total with less space
if (numReporters) {
charm.write('\n');
for (let key in this._reports) {
this._drawSessionReport(this._reports[key]);
}
}
if (!omitLogs && logLength > 0 && this._log.length) {
const allowed = { '×': true, '⚠': true, '!': true };
const logs = this._log.filter(line => {
return (<{ [key: string]: any }>allowed)[line.charAt(0)];
}).slice(-logLength).map(line => {
// truncate long lines
const color = this._getColor(line);
line = line.split('\n', 1)[0];
return color + line.slice(0, this.dimensions.width) + ANSI_COLOR.reset;
}).join('\n');
charm.write('\n');
charm.write(logs);
}
}
private _drawTotalReporter(report: Report) {
const charm = this._charm;
const title = 'Total: ';
const totalTextSize = String(report.numTotal).length;
charm.write(title);
this._drawProgressBar(report, this.dimensions.width - title.length);
charm.write(format('\nPassed: %s Failed: %s Skipped: %d\n',
fit(report.numPassed, totalTextSize), fit(report.numFailed, totalTextSize), report.numSkipped));
}
private _getColor(value: string | number): string {
if (typeof value === 'string') {
value = value[0];
}
return this.colorReplacement[value] || ANSI_COLOR.reset;
}
}
export interface PrettyProperties extends ReporterProperties {
colorReplacement: { [key: string]: string };
dimensions: any;
maxProgressBarWidth: number;
titleWidth: number;
watermarks: Watermarks;
}
export type PrettyOptions = Partial<PrettyProperties>;
/**
* Model tracking test results
* @param environment the environment associated with the report
* @param sessionId the sessionId associated with the report
*/
export class Report {
environment: string;
sessionId: string;
numTotal = 0;
numPassed = 0;
numFailed = 0;
numSkipped = 0;
results: number[] = [];
coverage: Collector = new Collector();
constructor(environment?: string, sessionId?: string) {
this.environment = environment;
this.sessionId = sessionId;
}
get finished() {
return this.results.length;
}
record(result: number) {
this.results.push(result);
switch (result) {
case PASS:
++this.numPassed;
break;
case SKIP:
++this.numSkipped;
break;
case FAIL:
++this.numFailed;
break;
}
}
getCompressedResults(maxWidth: number): number[] {
const total = Math.max(this.numTotal, this.results.length);
const width = Math.min(maxWidth, total);
const resultList: number[] = [];
for (let i = 0; i < this.results.length; ++i) {
const pos = Math.floor(i / total * width);
resultList[pos] = Math.max(resultList[pos] || PASS, this.results[i]);
}
return resultList;
}
}
const PAD = new Array(100).join(' ');
const SPINNER_STATES = ['/', '-', '\\', '|'];
const PASS = 0;
const SKIP = 1;
const FAIL = 2;
const BROWSERS = {
chrome: 'Chr', | random_line_split |
||
Pretty.ts | ._header = '';
this._reports = {};
this._log = [];
this.tunnelState = '';
this._renderTimeout = undefined;
this._total = new Report();
}
@eventHandler()
runStart() {
this._header = this.executor.config.name;
this._charm = this._charm || this._newCharm();
const resize = () => {
this.dimensions.width = (<any>process.stdout).columns || 80;
this.dimensions.height = (<any>process.stdout).rows || 24;
};
resize();
process.stdout.on('resize', resize);
const rerender = () => {
this._charm.erase('screen').position(0, 0);
this._render();
this._renderTimeout = setTimeout(rerender, 200);
};
rerender();
}
@eventHandler()
runEnd() {
const charm = this._charm;
clearTimeout(this._renderTimeout);
charm.erase('screen').position(0, 0);
// write a full log of errors
// Sort logs: pass < deprecated < skip < errors < fail
const ERROR_LOG_WEIGHT = { '!': 4, '×': 3, '~': 2, '⚠': 1, '✓': 0 };
const logs = this._log.sort((a: any, b: any) => {
a = (<{ [key: string]: any }>ERROR_LOG_WEIGHT)[a.charAt(0)] || 0;
b = (<{ [key: string]: any }>ERROR_LOG_WEIGHT)[b.charAt(0)] || 0;
return a - b;
}).map(line => this._getColor(line) + line).join('\n');
charm.write(logs);
charm.write('\n\n');
// Display the pretty results
this._render(true);
// Display coverage information
if (this._total.coverage.files().length > 0) {
charm.write('\n');
(new TextReport({
watermarks: this.watermarks
})).writeReport(this._total.coverage, true);
}
}
@eventHandler()
coverage(data: CoverageMessage) {
const reporter = this._reports[data.sessionId];
reporter && reporter.coverage.add(data.coverage);
this._total.coverage.add(data.coverage);
}
@eventHandler()
suiteStart(suite: Suite) {
if (!suite.hasParent) {
const nu | andler()
suiteEnd(suite: Suite) {
if (suite.error) {
this._record(suite.sessionId, FAIL);
const message = '! ' + suite.id;
this._log.push(message + '\n' + this.formatter.format(suite.error));
}
}
@eventHandler()
testEnd(test: Test) {
if (test.skipped) {
this._record(test.sessionId, SKIP);
this._log.push('~ ' + test.id + ': ' + (test.skipped || 'skipped'));
}
else if (test.error) {
const message = '× ' + test.id;
this._record(test.sessionId, FAIL);
this._log.push(message + '\n' + this.formatter.format(test.error));
}
else {
this._record(test.sessionId, PASS);
this._log.push('✓ ' + test.id);
}
}
@eventHandler()
tunnelDownloadProgress(message: TunnelMessage) {
const progress = message.progress;
this.tunnelState = 'Downloading ' + (progress.received / progress.total * 100).toFixed(2) + '%';
}
@eventHandler()
tunnelStatus(message: TunnelMessage) {
this.tunnelState = message.status;
}
@eventHandler()
error(error: Error) {
const message = '! ' + error.message;
this._log.push(message + '\n' + this.formatter.format(error));
// stop the render timeout on a fatal error so Intern can exit
clearTimeout(this._renderTimeout);
}
@eventHandler()
deprecated(message: DeprecationMessage) {
let text = '⚠ ' + message.original + ' is deprecated.';
if (message.replacement) {
text += ' Use ' + message.replacement + ' instead.';
}
if (message.message) {
text += ' ' + message.message;
}
this._log.push(text);
}
/**
* Return the reporter for a given session, creating it if necessary.
*/
private _getReporter(suite: Suite): Report {
if (!this._reports[suite.sessionId]) {
this._reports[suite.sessionId] = new Report(suite.remote && suite.remote.environmentType.toString());
}
return this._reports[suite.sessionId];
}
/**
* Create the charm instance used by this reporter.
*/
private _newCharm(): charm.Charm {
const c = charm();
c.pipe(process.stdout);
return c;
}
private _record(sessionId: string, result: number) {
const reporter = this._reports[sessionId];
reporter && reporter.record(result);
this._total.record(result);
}
/**
* Render the progress bar
* [✔︎~✔︎×✔︎✔︎✔︎✔︎✔︎✔︎] 99/100
* @param report the report data to render
* @param width the maximum width for the entire progress bar
*/
private _drawProgressBar(report: Report, width: number) {
const spinnerCharacter = SPINNER_STATES[this._spinnerOffset];
const charm = this._charm;
if (!report.numTotal) {
charm.write('Pending');
return;
}
const totalTextSize = String(report.numTotal).length;
const remainingWidth = Math.max(width - 4 - (totalTextSize * 2), 1);
const barSize = Math.min(remainingWidth, report.numTotal, this.maxProgressBarWidth);
const results = report.getCompressedResults(barSize);
charm.write('[' + results.map(value => this._getColor(value)).join(''));
charm.display('reset').write(fit(spinnerCharacter, barSize - results.length) + '] ' +
fit(report.finished, totalTextSize, true) + '/' + report.numTotal);
}
/**
* Render a single line
* TITLE: [✔︎~✔︎×✔︎✔︎✔︎✔︎✔︎✔︎] 100/100, 2 fail, 1 skip
* TODO split this into two lines. The first line will display the
* title, OS and code coverage and the progress bar on the second
*/
private _drawSessionReport(report: Report) {
const charm = this._charm;
const titleWidth = this.titleWidth;
const leftOfBar = fit(this._abbreviateEnvironment(report.environment).slice(0, titleWidth - 2) + ': ',
titleWidth);
const rightOfBar = '' +
(report.numFailed ? ', ' + report.numFailed + ' fail' : '') +
(report.numSkipped ? ', ' + report.numSkipped + ' skip' : '');
const barWidth = this.dimensions.width - rightOfBar.length - titleWidth;
charm.write(leftOfBar);
this._drawProgressBar(report, barWidth);
charm.write(rightOfBar + '\n');
}
/**
* Abbreviate the environment information for rendering
* @param env the test environment
* @returns {string} abbreviated environment information
*/
private _abbreviateEnvironment(env: any): string {
const browser = (<{ [key: string]: any }>BROWSERS)[env.browserName.toLowerCase()] || env.browserName.slice(0, 4);
const result = [browser];
if (env.version) {
let version = String(env.version);
if (version.indexOf('.') > -1) {
version = version.slice(0, version.indexOf('.'));
}
result.push(version);
}
if (env.platform) {
result.push(env.platform.slice(0, 3));
}
return result.join(' ');
}
private _render(omitLogs: boolean = false) {
const charm = this._charm;
const numReporters = Object.keys(this._reports).length;
const logLength = this.dimensions.height - numReporters - 4 /* last line & total */ -
(this.tunnelState ? 2 : 0) - (numReporters ? 1 : 0) - (this._header ? 1 : 0);
this._spinnerOffset = (++this._spinnerOffset) % SPINNER_STATES.length;
charm.display('reset');
if (this._header) {
charm.display('bright');
charm.write(this._header + '\n');
charm.display('reset');
}
this.tunnelState && charm.write('Tunnel: ' + this.tunnelState + '\n\n');
this._drawTotalReporter(this._total);
// TODO if there is not room to render all | mTests = suite.numTests;
this._total.numTotal += numTests;
if (suite.sessionId) {
this._getReporter(suite).numTotal += numTests;
}
}
}
@eventH | conditional_block |
Pretty.ts | ._header = '';
this._reports = {};
this._log = [];
this.tunnelState = '';
this._renderTimeout = undefined;
this._total = new Report();
}
@eventHandler()
runStart() {
this._header = this.executor.config.name;
this._charm = this._charm || this._newCharm();
const resize = () => {
this.dimensions.width = (<any>process.stdout).columns || 80;
this.dimensions.height = (<any>process.stdout).rows || 24;
};
resize();
process.stdout.on('resize', resize);
const rerender = () => {
this._charm.erase('screen').position(0, 0);
this._render();
this._renderTimeout = setTimeout(rerender, 200);
};
rerender();
}
@eventHandler()
runEnd() {
const charm = this._charm;
clearTimeout(this._renderTimeout);
charm.erase('screen').position(0, 0);
// write a full log of errors
// Sort logs: pass < deprecated < skip < errors < fail
const ERROR_LOG_WEIGHT = { '!': 4, '×': 3, '~': 2, '⚠': 1, '✓': 0 };
const logs = this._log.sort((a: any, b: any) => {
a = (<{ [key: string]: any }>ERROR_LOG_WEIGHT)[a.charAt(0)] || 0;
b = (<{ [key: string]: any }>ERROR_LOG_WEIGHT)[b.charAt(0)] || 0;
return a - b;
}).map(line => this._getColor(line) + line).join('\n');
charm.write(logs);
charm.write('\n\n');
// Display the pretty results
this._render(true);
// Display coverage information
if (this._total.coverage.files().length > 0) {
charm.write('\n');
(new TextReport({
watermarks: this.watermarks
})).writeReport(this._total.coverage, true);
}
}
@eventHandler()
coverage(data: CoverageMessage) {
const reporter = this._reports[data.sessionId];
reporter && reporter.coverage.add(data.coverage);
this._total.coverage.add(data.coverage);
}
@eventHandler()
suiteStart(su | ) {
if (!suite.hasParent) {
const numTests = suite.numTests;
this._total.numTotal += numTests;
if (suite.sessionId) {
this._getReporter(suite).numTotal += numTests;
}
}
}
@eventHandler()
suiteEnd(suite: Suite) {
if (suite.error) {
this._record(suite.sessionId, FAIL);
const message = '! ' + suite.id;
this._log.push(message + '\n' + this.formatter.format(suite.error));
}
}
@eventHandler()
testEnd(test: Test) {
if (test.skipped) {
this._record(test.sessionId, SKIP);
this._log.push('~ ' + test.id + ': ' + (test.skipped || 'skipped'));
}
else if (test.error) {
const message = '× ' + test.id;
this._record(test.sessionId, FAIL);
this._log.push(message + '\n' + this.formatter.format(test.error));
}
else {
this._record(test.sessionId, PASS);
this._log.push('✓ ' + test.id);
}
}
@eventHandler()
tunnelDownloadProgress(message: TunnelMessage) {
const progress = message.progress;
this.tunnelState = 'Downloading ' + (progress.received / progress.total * 100).toFixed(2) + '%';
}
@eventHandler()
tunnelStatus(message: TunnelMessage) {
this.tunnelState = message.status;
}
@eventHandler()
error(error: Error) {
const message = '! ' + error.message;
this._log.push(message + '\n' + this.formatter.format(error));
// stop the render timeout on a fatal error so Intern can exit
clearTimeout(this._renderTimeout);
}
@eventHandler()
deprecated(message: DeprecationMessage) {
let text = '⚠ ' + message.original + ' is deprecated.';
if (message.replacement) {
text += ' Use ' + message.replacement + ' instead.';
}
if (message.message) {
text += ' ' + message.message;
}
this._log.push(text);
}
/**
* Return the reporter for a given session, creating it if necessary.
*/
private _getReporter(suite: Suite): Report {
if (!this._reports[suite.sessionId]) {
this._reports[suite.sessionId] = new Report(suite.remote && suite.remote.environmentType.toString());
}
return this._reports[suite.sessionId];
}
/**
* Create the charm instance used by this reporter.
*/
private _newCharm(): charm.Charm {
const c = charm();
c.pipe(process.stdout);
return c;
}
private _record(sessionId: string, result: number) {
const reporter = this._reports[sessionId];
reporter && reporter.record(result);
this._total.record(result);
}
/**
* Render the progress bar
* [✔︎~✔︎×✔︎✔︎✔︎✔︎✔︎✔︎] 99/100
* @param report the report data to render
* @param width the maximum width for the entire progress bar
*/
private _drawProgressBar(report: Report, width: number) {
const spinnerCharacter = SPINNER_STATES[this._spinnerOffset];
const charm = this._charm;
if (!report.numTotal) {
charm.write('Pending');
return;
}
const totalTextSize = String(report.numTotal).length;
const remainingWidth = Math.max(width - 4 - (totalTextSize * 2), 1);
const barSize = Math.min(remainingWidth, report.numTotal, this.maxProgressBarWidth);
const results = report.getCompressedResults(barSize);
charm.write('[' + results.map(value => this._getColor(value)).join(''));
charm.display('reset').write(fit(spinnerCharacter, barSize - results.length) + '] ' +
fit(report.finished, totalTextSize, true) + '/' + report.numTotal);
}
/**
* Render a single line
* TITLE: [✔︎~✔︎×✔︎✔︎✔︎✔︎✔︎✔︎] 100/100, 2 fail, 1 skip
* TODO split this into two lines. The first line will display the
* title, OS and code coverage and the progress bar on the second
*/
private _drawSessionReport(report: Report) {
const charm = this._charm;
const titleWidth = this.titleWidth;
const leftOfBar = fit(this._abbreviateEnvironment(report.environment).slice(0, titleWidth - 2) + ': ',
titleWidth);
const rightOfBar = '' +
(report.numFailed ? ', ' + report.numFailed + ' fail' : '') +
(report.numSkipped ? ', ' + report.numSkipped + ' skip' : '');
const barWidth = this.dimensions.width - rightOfBar.length - titleWidth;
charm.write(leftOfBar);
this._drawProgressBar(report, barWidth);
charm.write(rightOfBar + '\n');
}
/**
* Abbreviate the environment information for rendering
* @param env the test environment
* @returns {string} abbreviated environment information
*/
private _abbreviateEnvironment(env: any): string {
const browser = (<{ [key: string]: any }>BROWSERS)[env.browserName.toLowerCase()] || env.browserName.slice(0, 4);
const result = [browser];
if (env.version) {
let version = String(env.version);
if (version.indexOf('.') > -1) {
version = version.slice(0, version.indexOf('.'));
}
result.push(version);
}
if (env.platform) {
result.push(env.platform.slice(0, 3));
}
return result.join(' ');
}
private _render(omitLogs: boolean = false) {
const charm = this._charm;
const numReporters = Object.keys(this._reports).length;
const logLength = this.dimensions.height - numReporters - 4 /* last line & total */ -
(this.tunnelState ? 2 : 0) - (numReporters ? 1 : 0) - (this._header ? 1 : 0);
this._spinnerOffset = (++this._spinnerOffset) % SPINNER_STATES.length;
charm.display('reset');
if (this._header) {
charm.display('bright');
charm.write(this._header + '\n');
charm.display('reset');
}
this.tunnelState && charm.write('Tunnel: ' + this.tunnelState + '\n\n');
this._drawTotalReporter(this._total);
// TODO if there is not room to render all | ite: Suite | identifier_name |
docker.go | err := tlsconfig.Client(options)
if err != nil {
panic(err)
}
httpClient := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
// ForceAttemptHTTP2: true, TODO: uncomment with Go 1.13
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: tlsc,
},
CheckRedirect: docker.CheckRedirect,
}
c, err = docker.NewClient(common.Flags.Docker.Host, "", httpClient, nil)
} else {
c, err = docker.NewClient(common.Flags.Docker.Host, "", nil, nil)
}
if err != nil {
panic(err)
}
client = c
}
}
// ContainerRunningByImageName returns true if a container, built
// on the given image, is running
func ContainerRunningByImageName(name string) (string, bool, error) {
containers, err := getContainers()
if err != nil {
return "", false, nil
}
for _, c := range containers {
if c.Image == name || c.Image+":latest" == name {
return c.ID, true, nil
}
}
return "", false, nil
}
//containerRunningById returns true if a container with the given id is running
func containerRunningById(id string) (bool, error) {
containers, err := getContainers()
if err != nil {
return false, err
}
for _, c := range containers {
if c.ID == id {
return true, nil
}
}
return false, nil
}
//stopContainerById stops a container corresponding to the provider id
func StopContainerById(id string, done chan bool) error {
if err := client.ContainerStop(context.Background(), id, nil); err != nil {
return err
}
if err := client.ContainerRemove(context.Background(), id, types.ContainerRemoveOptions{}); err != nil {
return err
}
for {
common.Logger.Printf(common.LOG_WAITING_STOP)
time.Sleep(500 * time.Millisecond)
stillRunning, err := containerRunningById(id)
if err != nil {
return err
}
if !stillRunning {
common.Logger.Printf(common.LOG_STOPPED)
done <- true
return nil
}
}
}
// StartContainer builds or updates a container base on the provided image name
// Once built the container will be started.
// The method will wait until the container is started and
// will notify it using the chanel
func StartContainer(url string, imageName string, done chan bool, ef util.ExchangeFolder, a action.ActionID) (int, error) {
envVar := []string{}
envVar = append(envVar, util.StarterEnvVariableKey+"="+url)
envVar = append(envVar, util.StarterEnvNameVariableKey+"="+common.Flags.Descriptor.File)
envVar = append(envVar, util.StarterEnvLoginVariableKey+"="+common.Flags.Descriptor.Login)
envVar = append(envVar, util.StarterEnvPasswordVariableKey+"="+common.Flags.Descriptor.Password) | envVar = append(envVar, util.ActionEnvVariableSkip+"="+strconv.Itoa(common.Flags.Skipping.SkippingLevel()))
envVar = append(envVar, util.ActionEnvVariableKey+"="+a.String())
envVar = append(envVar, "http_proxy="+common.Flags.Proxy.HTTP)
envVar = append(envVar, "https_proxy="+common.Flags.Proxy.HTTPS)
envVar = append(envVar, "no_proxy="+common.Flags.Proxy.Exclusions)
common.Logger.Printf(common.LOG_PASSING_CONTAINER_ENVARS, envVar)
// Check if we need to load parameters from the comand line
if common.Flags.Descriptor.ParamFile != "" {
copyExtraParameters(common.Flags.Descriptor.ParamFile, ef)
}
startedAt := time.Now().UTC()
startedAt = startedAt.Add(time.Second * -2)
resp, err := client.ContainerCreate(context.Background(), &container.Config{
Image: imageName,
WorkingDir: util.InstallerVolume,
Env: envVar,
}, &container.HostConfig{
Mounts: []mount.Mount{
{
Type: mount.TypeBind,
Source: ef.Location.AdaptedPath(),
Target: util.InstallerVolume,
},
{
Type: mount.TypeBind,
Source: "/var/run/docker.sock",
Target: "/var/run/docker.sock",
},
},
}, nil, "")
if err != nil {
return 0, err
}
// Chan used to turn off the rolling log
stopLogReading := make(chan bool)
// Rolling output of the container logs
go func(start time.Time, exit chan bool) {
logMap := make(map[string]string)
// Trick to avoid tracing twice the same log line
notExist := func(s string) (bool, string) {
tab := strings.Split(s, util.InstallerLogPrefix)
if len(tab) > 1 {
sTrim := strings.Trim(tab[1], " ")
if _, ok := logMap[sTrim]; ok {
return false, ""
}
logMap[sTrim] = ""
return true, util.InstallerLogPrefix + sTrim
} else {
return true, s
}
}
// Request to get the logs content from the container
req := func(sr string) {
out, err := client.ContainerLogs(context.Background(), resp.ID, types.ContainerLogsOptions{Since: sr, ShowStdout: true, ShowStderr: true})
if err != nil {
stopLogReading <- true
}
s := bufio.NewScanner(out)
for s.Scan() {
str := s.Text()
if b, sTrim := notExist(str); b {
idx := strings.Index(sTrim, util.FeedbackPrefix)
if idx != -1 {
fU := util.FeedbackUpdate{}
err = json.Unmarshal([]byte(sTrim[idx+len(util.FeedbackPrefix):]), &fU)
if err != nil {
common.Logger.Println("Unable to parse progress update: " + err.Error())
} else if !common.Flags.Logging.ShouldOutputLogs() {
switch fU.Type {
case "I":
common.CliFeedbackNotifier.Info(fU.Message)
break
case "E":
common.CliFeedbackNotifier.Error(fU.Message)
break
case "P":
common.CliFeedbackNotifier.ProgressG(fU.Key, fU.Goal, fU.Message)
break
case "D":
common.CliFeedbackNotifier.Detail(fU.Message)
break
}
}
} else if common.Flags.Logging.ShouldOutputLogs() {
fmt.Println(sTrim)
}
}
}
err = out.Close()
if err != nil {
common.Logger.Println("Unable to close container log reader: " + err.Error())
}
}
Loop:
for {
select {
case <-exit:
// Last call to be sure to get the end of the logs content
now := time.Now()
now = now.Add(time.Second * -1)
sinceReq := strconv.FormatInt(now.Unix(), 10)
req(sinceReq)
break Loop
default:
// Running call to trace the container logs every 500ms
sinceReq := strconv.FormatInt(start.Unix(), 10)
start = start.Add(time.Millisecond * 500)
req(sinceReq)
time.Sleep(time.Millisecond * 500)
}
}
}(startedAt, stopLogReading)
defer func() {
if err := LogAllFromContainer(resp.ID, ef, done); err != nil {
common.Logger.Println("Unable to fetch logs from container")
}
}()
if err := client.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
common.CliFeedbackNotifier.Error("Unable to start container: %s", err.Error())
return 0, err
}
statusCh, errCh := client.ContainerWait(context.Background(), resp.ID, container.WaitConditionNotRunning)
select {
case err := <-errCh:
stopLogReading <- true
return 0, err
case status := <-statusCh:
stopLogReading <- true
return int(status.StatusCode), nil
}
}
func LogAllFromContainer(id string, ef util.ExchangeFolder, done chan bool) error {
out, err := client.ContainerLogs(context.Background(), id, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
if err != nil {
// we stop now (cannot fetch any more log)
done <- true
return err
}
logFile, err := containerLog(ef)
if err != nil {
// we stop now (cannot fetch any more log)
done <- true
return err
}
defer logFile.Close | envVar = append(envVar, util.StarterVerbosityVariableKey+"="+strconv.Itoa(common.Flags.Logging.VerbosityLevel())) | random_line_split |
docker.go | }
}
// ContainerRunningByImageName returns true if a container, built
// on the given image, is running
func ContainerRunningByImageName(name string) (string, bool, error) {
containers, err := getContainers()
if err != nil {
return "", false, nil
}
for _, c := range containers {
if c.Image == name || c.Image+":latest" == name {
return c.ID, true, nil
}
}
return "", false, nil
}
//containerRunningById returns true if a container with the given id is running
func containerRunningById(id string) (bool, error) {
containers, err := getContainers()
if err != nil {
return false, err
}
for _, c := range containers {
if c.ID == id {
return true, nil
}
}
return false, nil
}
//stopContainerById stops a container corresponding to the provider id
func StopContainerById(id string, done chan bool) error {
if err := client.ContainerStop(context.Background(), id, nil); err != nil {
return err
}
if err := client.ContainerRemove(context.Background(), id, types.ContainerRemoveOptions{}); err != nil {
return err
}
for {
common.Logger.Printf(common.LOG_WAITING_STOP)
time.Sleep(500 * time.Millisecond)
stillRunning, err := containerRunningById(id)
if err != nil {
return err
}
if !stillRunning {
common.Logger.Printf(common.LOG_STOPPED)
done <- true
return nil
}
}
}
// StartContainer builds or updates a container base on the provided image name
// Once built the container will be started.
// The method will wait until the container is started and
// will notify it using the chanel
func StartContainer(url string, imageName string, done chan bool, ef util.ExchangeFolder, a action.ActionID) (int, error) {
envVar := []string{}
envVar = append(envVar, util.StarterEnvVariableKey+"="+url)
envVar = append(envVar, util.StarterEnvNameVariableKey+"="+common.Flags.Descriptor.File)
envVar = append(envVar, util.StarterEnvLoginVariableKey+"="+common.Flags.Descriptor.Login)
envVar = append(envVar, util.StarterEnvPasswordVariableKey+"="+common.Flags.Descriptor.Password)
envVar = append(envVar, util.StarterVerbosityVariableKey+"="+strconv.Itoa(common.Flags.Logging.VerbosityLevel()))
envVar = append(envVar, util.ActionEnvVariableSkip+"="+strconv.Itoa(common.Flags.Skipping.SkippingLevel()))
envVar = append(envVar, util.ActionEnvVariableKey+"="+a.String())
envVar = append(envVar, "http_proxy="+common.Flags.Proxy.HTTP)
envVar = append(envVar, "https_proxy="+common.Flags.Proxy.HTTPS)
envVar = append(envVar, "no_proxy="+common.Flags.Proxy.Exclusions)
common.Logger.Printf(common.LOG_PASSING_CONTAINER_ENVARS, envVar)
// Check if we need to load parameters from the comand line
if common.Flags.Descriptor.ParamFile != "" {
copyExtraParameters(common.Flags.Descriptor.ParamFile, ef)
}
startedAt := time.Now().UTC()
startedAt = startedAt.Add(time.Second * -2)
resp, err := client.ContainerCreate(context.Background(), &container.Config{
Image: imageName,
WorkingDir: util.InstallerVolume,
Env: envVar,
}, &container.HostConfig{
Mounts: []mount.Mount{
{
Type: mount.TypeBind,
Source: ef.Location.AdaptedPath(),
Target: util.InstallerVolume,
},
{
Type: mount.TypeBind,
Source: "/var/run/docker.sock",
Target: "/var/run/docker.sock",
},
},
}, nil, "")
if err != nil {
return 0, err
}
// Chan used to turn off the rolling log
stopLogReading := make(chan bool)
// Rolling output of the container logs
go func(start time.Time, exit chan bool) {
logMap := make(map[string]string)
// Trick to avoid tracing twice the same log line
notExist := func(s string) (bool, string) {
tab := strings.Split(s, util.InstallerLogPrefix)
if len(tab) > 1 {
sTrim := strings.Trim(tab[1], " ")
if _, ok := logMap[sTrim]; ok {
return false, ""
}
logMap[sTrim] = ""
return true, util.InstallerLogPrefix + sTrim
} else {
return true, s
}
}
// Request to get the logs content from the container
req := func(sr string) {
out, err := client.ContainerLogs(context.Background(), resp.ID, types.ContainerLogsOptions{Since: sr, ShowStdout: true, ShowStderr: true})
if err != nil {
stopLogReading <- true
}
s := bufio.NewScanner(out)
for s.Scan() {
str := s.Text()
if b, sTrim := notExist(str); b {
idx := strings.Index(sTrim, util.FeedbackPrefix)
if idx != -1 {
fU := util.FeedbackUpdate{}
err = json.Unmarshal([]byte(sTrim[idx+len(util.FeedbackPrefix):]), &fU)
if err != nil {
common.Logger.Println("Unable to parse progress update: " + err.Error())
} else if !common.Flags.Logging.ShouldOutputLogs() {
switch fU.Type {
case "I":
common.CliFeedbackNotifier.Info(fU.Message)
break
case "E":
common.CliFeedbackNotifier.Error(fU.Message)
break
case "P":
common.CliFeedbackNotifier.ProgressG(fU.Key, fU.Goal, fU.Message)
break
case "D":
common.CliFeedbackNotifier.Detail(fU.Message)
break
}
}
} else if common.Flags.Logging.ShouldOutputLogs() {
fmt.Println(sTrim)
}
}
}
err = out.Close()
if err != nil {
common.Logger.Println("Unable to close container log reader: " + err.Error())
}
}
Loop:
for {
select {
case <-exit:
// Last call to be sure to get the end of the logs content
now := time.Now()
now = now.Add(time.Second * -1)
sinceReq := strconv.FormatInt(now.Unix(), 10)
req(sinceReq)
break Loop
default:
// Running call to trace the container logs every 500ms
sinceReq := strconv.FormatInt(start.Unix(), 10)
start = start.Add(time.Millisecond * 500)
req(sinceReq)
time.Sleep(time.Millisecond * 500)
}
}
}(startedAt, stopLogReading)
defer func() {
if err := LogAllFromContainer(resp.ID, ef, done); err != nil {
common.Logger.Println("Unable to fetch logs from container")
}
}()
if err := client.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
common.CliFeedbackNotifier.Error("Unable to start container: %s", err.Error())
return 0, err
}
statusCh, errCh := client.ContainerWait(context.Background(), resp.ID, container.WaitConditionNotRunning)
select {
case err := <-errCh:
stopLogReading <- true
return 0, err
case status := <-statusCh:
stopLogReading <- true
return int(status.StatusCode), nil
}
}
func LogAllFromContainer(id string, ef util.ExchangeFolder, done chan bool) error {
out, err := client.ContainerLogs(context.Background(), id, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
if err != nil {
// we stop now (cannot fetch any more log)
done <- true
return err
}
logFile, err := containerLog(ef)
if err != nil {
// we stop now (cannot fetch any more log)
done <- true
return err
}
defer logFile.Close()
_, err = stdcopy.StdCopy(logFile, logFile, out)
if err != nil {
// we stop now (cannot fetch any more log)
done <- true
return err
}
// We are done!
common.Logger.Printf(common.LOG_CONTAINER_LOG_WRITTEN, logFile.Name())
done <- true
return nil
}
// getContainers returns the detail of all running containers
func getContainers() ([]types.Container, error) {
containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
return []types.Container{}, err
}
return containers, nil
}
// imageExistsByName returns true if an image corresponding
// to the given name has been already downloaded
func imageExistsByName(name string) (bool, error) {
images, err := getImages()
if err != nil {
return false, err
}
for _, image := range images {
for _, tag := range image.RepoTags {
if tag == name {
return true, nil
}
}
}
return false, nil
}
// getImages returns the summary of all images already downloaded
func | getImages | identifier_name |
|
docker.go | Var, util.StarterEnvVariableKey+"="+url)
envVar = append(envVar, util.StarterEnvNameVariableKey+"="+common.Flags.Descriptor.File)
envVar = append(envVar, util.StarterEnvLoginVariableKey+"="+common.Flags.Descriptor.Login)
envVar = append(envVar, util.StarterEnvPasswordVariableKey+"="+common.Flags.Descriptor.Password)
envVar = append(envVar, util.StarterVerbosityVariableKey+"="+strconv.Itoa(common.Flags.Logging.VerbosityLevel()))
envVar = append(envVar, util.ActionEnvVariableSkip+"="+strconv.Itoa(common.Flags.Skipping.SkippingLevel()))
envVar = append(envVar, util.ActionEnvVariableKey+"="+a.String())
envVar = append(envVar, "http_proxy="+common.Flags.Proxy.HTTP)
envVar = append(envVar, "https_proxy="+common.Flags.Proxy.HTTPS)
envVar = append(envVar, "no_proxy="+common.Flags.Proxy.Exclusions)
common.Logger.Printf(common.LOG_PASSING_CONTAINER_ENVARS, envVar)
// Check if we need to load parameters from the comand line
if common.Flags.Descriptor.ParamFile != "" {
copyExtraParameters(common.Flags.Descriptor.ParamFile, ef)
}
startedAt := time.Now().UTC()
startedAt = startedAt.Add(time.Second * -2)
resp, err := client.ContainerCreate(context.Background(), &container.Config{
Image: imageName,
WorkingDir: util.InstallerVolume,
Env: envVar,
}, &container.HostConfig{
Mounts: []mount.Mount{
{
Type: mount.TypeBind,
Source: ef.Location.AdaptedPath(),
Target: util.InstallerVolume,
},
{
Type: mount.TypeBind,
Source: "/var/run/docker.sock",
Target: "/var/run/docker.sock",
},
},
}, nil, "")
if err != nil {
return 0, err
}
// Chan used to turn off the rolling log
stopLogReading := make(chan bool)
// Rolling output of the container logs
go func(start time.Time, exit chan bool) {
logMap := make(map[string]string)
// Trick to avoid tracing twice the same log line
notExist := func(s string) (bool, string) {
tab := strings.Split(s, util.InstallerLogPrefix)
if len(tab) > 1 {
sTrim := strings.Trim(tab[1], " ")
if _, ok := logMap[sTrim]; ok {
return false, ""
}
logMap[sTrim] = ""
return true, util.InstallerLogPrefix + sTrim
} else {
return true, s
}
}
// Request to get the logs content from the container
req := func(sr string) {
out, err := client.ContainerLogs(context.Background(), resp.ID, types.ContainerLogsOptions{Since: sr, ShowStdout: true, ShowStderr: true})
if err != nil {
stopLogReading <- true
}
s := bufio.NewScanner(out)
for s.Scan() {
str := s.Text()
if b, sTrim := notExist(str); b {
idx := strings.Index(sTrim, util.FeedbackPrefix)
if idx != -1 {
fU := util.FeedbackUpdate{}
err = json.Unmarshal([]byte(sTrim[idx+len(util.FeedbackPrefix):]), &fU)
if err != nil {
common.Logger.Println("Unable to parse progress update: " + err.Error())
} else if !common.Flags.Logging.ShouldOutputLogs() {
switch fU.Type {
case "I":
common.CliFeedbackNotifier.Info(fU.Message)
break
case "E":
common.CliFeedbackNotifier.Error(fU.Message)
break
case "P":
common.CliFeedbackNotifier.ProgressG(fU.Key, fU.Goal, fU.Message)
break
case "D":
common.CliFeedbackNotifier.Detail(fU.Message)
break
}
}
} else if common.Flags.Logging.ShouldOutputLogs() {
fmt.Println(sTrim)
}
}
}
err = out.Close()
if err != nil {
common.Logger.Println("Unable to close container log reader: " + err.Error())
}
}
Loop:
for {
select {
case <-exit:
// Last call to be sure to get the end of the logs content
now := time.Now()
now = now.Add(time.Second * -1)
sinceReq := strconv.FormatInt(now.Unix(), 10)
req(sinceReq)
break Loop
default:
// Running call to trace the container logs every 500ms
sinceReq := strconv.FormatInt(start.Unix(), 10)
start = start.Add(time.Millisecond * 500)
req(sinceReq)
time.Sleep(time.Millisecond * 500)
}
}
}(startedAt, stopLogReading)
defer func() {
if err := LogAllFromContainer(resp.ID, ef, done); err != nil {
common.Logger.Println("Unable to fetch logs from container")
}
}()
if err := client.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
common.CliFeedbackNotifier.Error("Unable to start container: %s", err.Error())
return 0, err
}
statusCh, errCh := client.ContainerWait(context.Background(), resp.ID, container.WaitConditionNotRunning)
select {
case err := <-errCh:
stopLogReading <- true
return 0, err
case status := <-statusCh:
stopLogReading <- true
return int(status.StatusCode), nil
}
}
func LogAllFromContainer(id string, ef util.ExchangeFolder, done chan bool) error {
out, err := client.ContainerLogs(context.Background(), id, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
if err != nil {
// we stop now (cannot fetch any more log)
done <- true
return err
}
logFile, err := containerLog(ef)
if err != nil {
// we stop now (cannot fetch any more log)
done <- true
return err
}
defer logFile.Close()
_, err = stdcopy.StdCopy(logFile, logFile, out)
if err != nil {
// we stop now (cannot fetch any more log)
done <- true
return err
}
// We are done!
common.Logger.Printf(common.LOG_CONTAINER_LOG_WRITTEN, logFile.Name())
done <- true
return nil
}
// getContainers returns the detail of all running containers
func getContainers() ([]types.Container, error) {
containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
return []types.Container{}, err
}
return containers, nil
}
// imageExistsByName returns true if an image corresponding
// to the given name has been already downloaded
func imageExistsByName(name string) (bool, error) {
images, err := getImages()
if err != nil {
return false, err
}
for _, image := range images {
for _, tag := range image.RepoTags {
if tag == name {
return true, nil
}
}
}
return false, nil
}
// getImages returns the summary of all images already downloaded
func getImages() ([]types.ImageSummary, error) {
images, err := client.ImageList(context.Background(), types.ImageListOptions{})
if err != nil {
return []types.ImageSummary{}, err
}
return images, nil
}
// ImagePull pulls the image corresponding to th given name
// and wait for the download to be completed.
//
// The completion of the download will be notified using the chanel
func ImagePull(taggedName string, done chan bool, failed chan error) {
img, err := imageExistsByName(taggedName)
if err != nil {
failed <- err
return
}
if !img {
if r, err := client.ImagePull(context.Background(), taggedName, types.ImagePullOptions{}); err != nil {
failed <- err
return
} else {
defer r.Close()
}
common.CliFeedbackNotifier.Progress("cli.docker.download", "Downloading installer image")
for {
common.Logger.Printf(common.LOG_WAITING_DOWNLOAD)
time.Sleep(1000 * time.Millisecond)
img, err := imageExistsByName(taggedName)
if err != nil {
failed <- err
return
}
if img {
common.Logger.Printf(common.LOG_DOWNLOAD_COMPLETED)
break
}
}
}
done <- true
}
func copyExtraParameters(file string, ef util.ExchangeFolder) error {
if _, err := os.Stat(file); err != nil {
if os.IsNotExist(err) {
common.Logger.Fatalf(common.ERROR_UNREACHABLE_PARAM_FILE, file)
}
}
b, err := ioutil.ReadFile(file)
if err != nil {
return err
}
err = ef.Location.Write(b, util.ExternalVarsFilename)
if err != nil {
return err
}
return nil
}
func containerLog(ef util.ExchangeFolder) (*os.File, error) {
f, e := os.Create(filepath.Join(ef.Output.Path(), common.Flags.Logging.File))
if e != nil | {
return nil, e
} | conditional_block |
|
docker.go | err := tlsconfig.Client(options)
if err != nil {
panic(err)
}
httpClient := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
// ForceAttemptHTTP2: true, TODO: uncomment with Go 1.13
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: tlsc,
},
CheckRedirect: docker.CheckRedirect,
}
c, err = docker.NewClient(common.Flags.Docker.Host, "", httpClient, nil)
} else {
c, err = docker.NewClient(common.Flags.Docker.Host, "", nil, nil)
}
if err != nil {
panic(err)
}
client = c
}
}
// ContainerRunningByImageName returns true if a container, built
// on the given image, is running
func ContainerRunningByImageName(name string) (string, bool, error) |
//containerRunningById returns true if a container with the given id is running
func containerRunningById(id string) (bool, error) {
containers, err := getContainers()
if err != nil {
return false, err
}
for _, c := range containers {
if c.ID == id {
return true, nil
}
}
return false, nil
}
//stopContainerById stops a container corresponding to the provider id
func StopContainerById(id string, done chan bool) error {
if err := client.ContainerStop(context.Background(), id, nil); err != nil {
return err
}
if err := client.ContainerRemove(context.Background(), id, types.ContainerRemoveOptions{}); err != nil {
return err
}
for {
common.Logger.Printf(common.LOG_WAITING_STOP)
time.Sleep(500 * time.Millisecond)
stillRunning, err := containerRunningById(id)
if err != nil {
return err
}
if !stillRunning {
common.Logger.Printf(common.LOG_STOPPED)
done <- true
return nil
}
}
}
// StartContainer builds or updates a container base on the provided image name
// Once built the container will be started.
// The method will wait until the container is started and
// will notify it using the chanel
func StartContainer(url string, imageName string, done chan bool, ef util.ExchangeFolder, a action.ActionID) (int, error) {
envVar := []string{}
envVar = append(envVar, util.StarterEnvVariableKey+"="+url)
envVar = append(envVar, util.StarterEnvNameVariableKey+"="+common.Flags.Descriptor.File)
envVar = append(envVar, util.StarterEnvLoginVariableKey+"="+common.Flags.Descriptor.Login)
envVar = append(envVar, util.StarterEnvPasswordVariableKey+"="+common.Flags.Descriptor.Password)
envVar = append(envVar, util.StarterVerbosityVariableKey+"="+strconv.Itoa(common.Flags.Logging.VerbosityLevel()))
envVar = append(envVar, util.ActionEnvVariableSkip+"="+strconv.Itoa(common.Flags.Skipping.SkippingLevel()))
envVar = append(envVar, util.ActionEnvVariableKey+"="+a.String())
envVar = append(envVar, "http_proxy="+common.Flags.Proxy.HTTP)
envVar = append(envVar, "https_proxy="+common.Flags.Proxy.HTTPS)
envVar = append(envVar, "no_proxy="+common.Flags.Proxy.Exclusions)
common.Logger.Printf(common.LOG_PASSING_CONTAINER_ENVARS, envVar)
// Check if we need to load parameters from the comand line
if common.Flags.Descriptor.ParamFile != "" {
copyExtraParameters(common.Flags.Descriptor.ParamFile, ef)
}
startedAt := time.Now().UTC()
startedAt = startedAt.Add(time.Second * -2)
resp, err := client.ContainerCreate(context.Background(), &container.Config{
Image: imageName,
WorkingDir: util.InstallerVolume,
Env: envVar,
}, &container.HostConfig{
Mounts: []mount.Mount{
{
Type: mount.TypeBind,
Source: ef.Location.AdaptedPath(),
Target: util.InstallerVolume,
},
{
Type: mount.TypeBind,
Source: "/var/run/docker.sock",
Target: "/var/run/docker.sock",
},
},
}, nil, "")
if err != nil {
return 0, err
}
// Chan used to turn off the rolling log
stopLogReading := make(chan bool)
// Rolling output of the container logs
go func(start time.Time, exit chan bool) {
logMap := make(map[string]string)
// Trick to avoid tracing twice the same log line
notExist := func(s string) (bool, string) {
tab := strings.Split(s, util.InstallerLogPrefix)
if len(tab) > 1 {
sTrim := strings.Trim(tab[1], " ")
if _, ok := logMap[sTrim]; ok {
return false, ""
}
logMap[sTrim] = ""
return true, util.InstallerLogPrefix + sTrim
} else {
return true, s
}
}
// Request to get the logs content from the container
req := func(sr string) {
out, err := client.ContainerLogs(context.Background(), resp.ID, types.ContainerLogsOptions{Since: sr, ShowStdout: true, ShowStderr: true})
if err != nil {
stopLogReading <- true
}
s := bufio.NewScanner(out)
for s.Scan() {
str := s.Text()
if b, sTrim := notExist(str); b {
idx := strings.Index(sTrim, util.FeedbackPrefix)
if idx != -1 {
fU := util.FeedbackUpdate{}
err = json.Unmarshal([]byte(sTrim[idx+len(util.FeedbackPrefix):]), &fU)
if err != nil {
common.Logger.Println("Unable to parse progress update: " + err.Error())
} else if !common.Flags.Logging.ShouldOutputLogs() {
switch fU.Type {
case "I":
common.CliFeedbackNotifier.Info(fU.Message)
break
case "E":
common.CliFeedbackNotifier.Error(fU.Message)
break
case "P":
common.CliFeedbackNotifier.ProgressG(fU.Key, fU.Goal, fU.Message)
break
case "D":
common.CliFeedbackNotifier.Detail(fU.Message)
break
}
}
} else if common.Flags.Logging.ShouldOutputLogs() {
fmt.Println(sTrim)
}
}
}
err = out.Close()
if err != nil {
common.Logger.Println("Unable to close container log reader: " + err.Error())
}
}
Loop:
for {
select {
case <-exit:
// Last call to be sure to get the end of the logs content
now := time.Now()
now = now.Add(time.Second * -1)
sinceReq := strconv.FormatInt(now.Unix(), 10)
req(sinceReq)
break Loop
default:
// Running call to trace the container logs every 500ms
sinceReq := strconv.FormatInt(start.Unix(), 10)
start = start.Add(time.Millisecond * 500)
req(sinceReq)
time.Sleep(time.Millisecond * 500)
}
}
}(startedAt, stopLogReading)
defer func() {
if err := LogAllFromContainer(resp.ID, ef, done); err != nil {
common.Logger.Println("Unable to fetch logs from container")
}
}()
if err := client.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil {
common.CliFeedbackNotifier.Error("Unable to start container: %s", err.Error())
return 0, err
}
statusCh, errCh := client.ContainerWait(context.Background(), resp.ID, container.WaitConditionNotRunning)
select {
case err := <-errCh:
stopLogReading <- true
return 0, err
case status := <-statusCh:
stopLogReading <- true
return int(status.StatusCode), nil
}
}
func LogAllFromContainer(id string, ef util.ExchangeFolder, done chan bool) error {
out, err := client.ContainerLogs(context.Background(), id, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
if err != nil {
// we stop now (cannot fetch any more log)
done <- true
return err
}
logFile, err := containerLog(ef)
if err != nil {
// we stop now (cannot fetch any more log)
done <- true
return err
}
defer log | {
containers, err := getContainers()
if err != nil {
return "", false, nil
}
for _, c := range containers {
if c.Image == name || c.Image+":latest" == name {
return c.ID, true, nil
}
}
return "", false, nil
} | identifier_body |
quiz.js | // set answers into the quiz
var quizItems = modal.find("ol.quiz > li");
for( prop in data) {
if( prop.startsWith("answer")) {
var n = prop.replace("answer",""); // get the answer number
log("answer number:", n, "quizItems", quizItems);
//var li = $(quizItems[n]);
//var li = quizItems.find("#" + n);
var li = hackyFind(quizItems, n);
log("li", li);
log("answer",n, prop, data[prop], li);
var input = li.find("input[type=text],select,textarea");
if( input.length > 0 ) {
input.val(data[prop]); // set answer on textboxes, selects and textareas
log("restored input", input, data[prop]);
} else {
var radios = li.find("input[type=radio]");
log("radios", radios);
radios.attr("checked", "");
var val = data[prop];
var radio = radios.filter("[value=" + val + "]");
log("radio val", val, radio);
radio.attr("checked", "true"); // set radio buttons
log("restored radio", radio);
}
}
}
modal.find("input[type=radio]").closest("ol").each(function(i,n) {
var ol = $(n);
ensureOneEmptyRadio(ol);
});
}
function hackyFind( arr, id) {
log("hackyFind id=", id, "array", arr);
var found = null;
$.each(arr, function(i,n) {
var node = $(n);
var nodeId = node.attr("id")
var answerClass = "answer" + id; // old way of identifying answers
if( nodeId === id || node.hasClass(answerClass) ) {
found = node;
}
});
return found;
}
function prepareQuizForSave(form, data) {
log("prepareQuizForSave: build data object for quiz");
var quiz = form.find("#quizQuestions");
// Set names onto all inputs. Just number them answer0, answer1, etc. And add a class to the li with the name of the input, to use in front end validation
var questions = quiz.find("ol.quiz > li");
questions.each(function(q, n) { | setClass(li, "answer", id); // will remove old classes
li.data("type","input");
//setClass(li, "answer", q); // will remove old classes
li.find("input,select,textarea").each(function(inpNum, inp) {
$(inp).attr("name", "answer" + q);
});
});
if( data ) {
data.pageName = form.find("input[name=pageName]").val();
data.pageTitle = form.find("input[name=pageTitle]").val();
data.template = form.find("input[name=template]").val();
} else {
data = {
pageName: form.find("input[name=pageName]").val(),
pageTitle: form.find("input[name=pageTitle]").val(),
template: form.find("input[name=template]").val()
};
}
// Update the names of all inputs to be the class on the question li
var inputs = quiz.find("input,select,textarea").not(".newQuestionType,input[name=pageTitle],input[name=pageName]");
log("update input names", inputs);
inputs.each(function(i,n){
var inp = $(n);
var name = inp.closest("li[id]").attr("class"); // the question li is the closest one with an id. question name is its class
inp.attr("name", name);
log("set name", name, inp);
});
// Find all inputs and add them to the data object
var inputs = quiz.find("input[type=text],select,textarea,input[type=radio]:checked").not(".newQuestionType,input[name=pageTitle],input[name=pageName]");
log("add inputs", inputs);
inputs.each(function(i,n){
var inp = $(n);
var name = inp.attr("name");
var val = inp.val();
data[name] = val;
log("set data att", name, val);
});
// remove any "temp" elements that have been added as part of editing
form.find(".temp").remove();
quiz.find("input[type=radio]").removeAttr("checked");
removeEmptyRadios(quiz.find("ol ol"));
data.body = quiz.html();
log("html", data.body);
return data;
}
function initQuizBuilder() {
var form = $("#quizQuestions");
log("form", form);
form.on("click", "h3,p,label", function(e){
var target = $(e.target);
log("editable item clicked", target);
e.stopPropagation();
e.preventDefault();
var inp = $("<input class='" + target[0].tagName + "' type='text'/>");
log("created inp", inp);
var txt = target.text();
if( txt.startsWith("[")) { // place holder text
txt = "";
}
inp.val(txt);
inp.insertAfter(target);
target.detach();
log("detached target", target);
inp.focus();
inp.focusout(function() {
// put back original element
var newText = inp.val().trim();
log("focusout", inp, target, "newText", newText);
target.text(inp.val());
// If its a label, and its empty, then remove it
if( target.hasClass("LABEL") && newText.length == 0 ) {
inp.closest("li").remove();
} else {
target.insertAfter(inp);
inp.remove();
}
if( target.is("label")) {
ensureOneEmptyRadio(target.closest("ol"));
}
});
});
form.on("keyup", "input.radioLabel", function(e){
var inp = $(e.target);
var li = inp.closest("li");
var ul = li.closest("ul");
var last = ul.find("li").filter(":last");
log("last", li, last, li==last);
if( li.is(last) ) {
addRadioToMulti(ul);
}
});
// Suppress enter key to prevent users from submitting, and closing, the modal edit window accidentally
form.on("keypress", "input", function(e) {
if( e.which == 13 ) {
e.preventDefault();
e.stopPropagation();
$(e.target).focusout();
}
});
// insert a delete X on hover of a question li
form.on("mouseenter", "ol.quiz > li", function(e) {
var li = $(e.target).closest("li");
if( li.find("span.delete").length > 0 ) {
return;
}
var span = $("<span class='delete temp'></span>");
li.prepend(span);
li.mouseleave(function() {
li.find("span.delete").remove();
});
});
form.on("click", "span.delete", function(e) {
var li = $(e.target).closest("li");
li.remove();
});
$("select.newQuestionType").click(function() {
var $this = $(this);
log("new question", $this);
var type = $this.val();
$this.val("");
if( type && type.length > 0 ) {
addQuestion(type);
}
});
function addQuestion(type) {
log("add question", type);
if( type === "textbox") {
addQuestionTextbox();
} else if( type === "multi") {
addQuestionMulti();
}
}
function addQuestionTextbox() {
var questions = form.find("ol.quiz");
log("addQuestionTextbox", questions);
var li = createQuestionLi(questions);
li.append($("<textarea class='wide autoresize' cols='50' rows='1'></textarea>"));
}
function addQuestionMulti() {
var questions = form.find("ol.quiz");
log("addQuestionMulti", questions);
var li = createQuestionLi(questions);
var olRadios = $("<ol></ol>");
olRadios.attr("id", "answers_" + li.attr("id"));
li.append(olRadios);
addRadioToMulti(olRadios);
}
function createQuestionLi(form) {
var id = Math.floor(Math.random()*1000000);
var li = $(
"<li id='f" + id + "'>" +
"<h3>[Enter the question here]</h3>" +
"<p>[Enter help text here]</p>" +
"</li>"
);
form.append(li);
return li;
}
}
function removeEmptyRadios(ol) {
ol.find("li").each(function(i, n) {
var li = $(n);
var txt = li.find("label").text().trim()
if( txt == "" || txt.startsWith("[")) {
li.remove();
}
});
}
function ensureOneEmptyRadio(ol) {
// remove any li's containing empty labels, then add one empty one
removeEmptyRadios(ol);
addRadioToMulti(ol);
}
function addRadioToMulti(ol) {
var question = | var li = $(n);
var id = li.attr("id"); | random_line_split |
quiz.js | // set answers into the quiz
var quizItems = modal.find("ol.quiz > li");
for( prop in data) {
if( prop.startsWith("answer")) {
var n = prop.replace("answer",""); // get the answer number
log("answer number:", n, "quizItems", quizItems);
//var li = $(quizItems[n]);
//var li = quizItems.find("#" + n);
var li = hackyFind(quizItems, n);
log("li", li);
log("answer",n, prop, data[prop], li);
var input = li.find("input[type=text],select,textarea");
if( input.length > 0 ) {
input.val(data[prop]); // set answer on textboxes, selects and textareas
log("restored input", input, data[prop]);
} else {
var radios = li.find("input[type=radio]");
log("radios", radios);
radios.attr("checked", "");
var val = data[prop];
var radio = radios.filter("[value=" + val + "]");
log("radio val", val, radio);
radio.attr("checked", "true"); // set radio buttons
log("restored radio", radio);
}
}
}
modal.find("input[type=radio]").closest("ol").each(function(i,n) {
var ol = $(n);
ensureOneEmptyRadio(ol);
});
}
function hackyFind( arr, id) {
log("hackyFind id=", id, "array", arr);
var found = null;
$.each(arr, function(i,n) {
var node = $(n);
var nodeId = node.attr("id")
var answerClass = "answer" + id; // old way of identifying answers
if( nodeId === id || node.hasClass(answerClass) ) {
found = node;
}
});
return found;
}
function prepareQuizForSave(form, data) {
log("prepareQuizForSave: build data object for quiz");
var quiz = form.find("#quizQuestions");
// Set names onto all inputs. Just number them answer0, answer1, etc. And add a class to the li with the name of the input, to use in front end validation
var questions = quiz.find("ol.quiz > li");
questions.each(function(q, n) {
var li = $(n);
var id = li.attr("id");
setClass(li, "answer", id); // will remove old classes
li.data("type","input");
//setClass(li, "answer", q); // will remove old classes
li.find("input,select,textarea").each(function(inpNum, inp) {
$(inp).attr("name", "answer" + q);
});
});
if( data ) {
data.pageName = form.find("input[name=pageName]").val();
data.pageTitle = form.find("input[name=pageTitle]").val();
data.template = form.find("input[name=template]").val();
} else {
data = {
pageName: form.find("input[name=pageName]").val(),
pageTitle: form.find("input[name=pageTitle]").val(),
template: form.find("input[name=template]").val()
};
}
// Update the names of all inputs to be the class on the question li
var inputs = quiz.find("input,select,textarea").not(".newQuestionType,input[name=pageTitle],input[name=pageName]");
log("update input names", inputs);
inputs.each(function(i,n){
var inp = $(n);
var name = inp.closest("li[id]").attr("class"); // the question li is the closest one with an id. question name is its class
inp.attr("name", name);
log("set name", name, inp);
});
// Find all inputs and add them to the data object
var inputs = quiz.find("input[type=text],select,textarea,input[type=radio]:checked").not(".newQuestionType,input[name=pageTitle],input[name=pageName]");
log("add inputs", inputs);
inputs.each(function(i,n){
var inp = $(n);
var name = inp.attr("name");
var val = inp.val();
data[name] = val;
log("set data att", name, val);
});
// remove any "temp" elements that have been added as part of editing
form.find(".temp").remove();
quiz.find("input[type=radio]").removeAttr("checked");
removeEmptyRadios(quiz.find("ol ol"));
data.body = quiz.html();
log("html", data.body);
return data;
}
function initQuizBuilder() {
var form = $("#quizQuestions");
log("form", form);
form.on("click", "h3,p,label", function(e){
var target = $(e.target);
log("editable item clicked", target);
e.stopPropagation();
e.preventDefault();
var inp = $("<input class='" + target[0].tagName + "' type='text'/>");
log("created inp", inp);
var txt = target.text();
if( txt.startsWith("[")) { // place holder text
txt = "";
}
inp.val(txt);
inp.insertAfter(target);
target.detach();
log("detached target", target);
inp.focus();
inp.focusout(function() {
// put back original element
var newText = inp.val().trim();
log("focusout", inp, target, "newText", newText);
target.text(inp.val());
// If its a label, and its empty, then remove it
if( target.hasClass("LABEL") && newText.length == 0 ) {
inp.closest("li").remove();
} else {
target.insertAfter(inp);
inp.remove();
}
if( target.is("label")) {
ensureOneEmptyRadio(target.closest("ol"));
}
});
});
form.on("keyup", "input.radioLabel", function(e){
var inp = $(e.target);
var li = inp.closest("li");
var ul = li.closest("ul");
var last = ul.find("li").filter(":last");
log("last", li, last, li==last);
if( li.is(last) ) {
addRadioToMulti(ul);
}
});
// Suppress enter key to prevent users from submitting, and closing, the modal edit window accidentally
form.on("keypress", "input", function(e) {
if( e.which == 13 ) {
e.preventDefault();
e.stopPropagation();
$(e.target).focusout();
}
});
// insert a delete X on hover of a question li
form.on("mouseenter", "ol.quiz > li", function(e) {
var li = $(e.target).closest("li");
if( li.find("span.delete").length > 0 ) {
return;
}
var span = $("<span class='delete temp'></span>");
li.prepend(span);
li.mouseleave(function() {
li.find("span.delete").remove();
});
});
form.on("click", "span.delete", function(e) {
var li = $(e.target).closest("li");
li.remove();
});
$("select.newQuestionType").click(function() {
var $this = $(this);
log("new question", $this);
var type = $this.val();
$this.val("");
if( type && type.length > 0 ) {
addQuestion(type);
}
});
function addQuestion(type) {
log("add question", type);
if( type === "textbox") {
addQuestionTextbox();
} else if( type === "multi") {
addQuestionMulti();
}
}
function addQuestionTextbox() {
var questions = form.find("ol.quiz");
log("addQuestionTextbox", questions);
var li = createQuestionLi(questions);
li.append($("<textarea class='wide autoresize' cols='50' rows='1'></textarea>"));
}
function addQuestionMulti() {
var questions = form.find("ol.quiz");
log("addQuestionMulti", questions);
var li = createQuestionLi(questions);
var olRadios = $("<ol></ol>");
olRadios.attr("id", "answers_" + li.attr("id"));
li.append(olRadios);
addRadioToMulti(olRadios);
}
function createQuestionLi(form) {
var id = Math.floor(Math.random()*1000000);
var li = $(
"<li id='f" + id + "'>" +
"<h3>[Enter the question here]</h3>" +
"<p>[Enter help text here]</p>" +
"</li>"
);
form.append(li);
return li;
}
}
function removeEmptyRadios(ol) |
function ensureOneEmptyRadio(ol) {
// remove any li's containing empty labels, then add one empty one
removeEmptyRadios(ol);
addRadioToMulti(ol);
}
function addRadioToMulti(ol) {
var | {
ol.find("li").each(function(i, n) {
var li = $(n);
var txt = li.find("label").text().trim()
if( txt == "" || txt.startsWith("[")) {
li.remove();
}
});
} | identifier_body |
quiz.js | // set answers into the quiz
var quizItems = modal.find("ol.quiz > li");
for( prop in data) {
if( prop.startsWith("answer")) {
var n = prop.replace("answer",""); // get the answer number
log("answer number:", n, "quizItems", quizItems);
//var li = $(quizItems[n]);
//var li = quizItems.find("#" + n);
var li = hackyFind(quizItems, n);
log("li", li);
log("answer",n, prop, data[prop], li);
var input = li.find("input[type=text],select,textarea");
if( input.length > 0 ) {
input.val(data[prop]); // set answer on textboxes, selects and textareas
log("restored input", input, data[prop]);
} else {
var radios = li.find("input[type=radio]");
log("radios", radios);
radios.attr("checked", "");
var val = data[prop];
var radio = radios.filter("[value=" + val + "]");
log("radio val", val, radio);
radio.attr("checked", "true"); // set radio buttons
log("restored radio", radio);
}
}
}
modal.find("input[type=radio]").closest("ol").each(function(i,n) {
var ol = $(n);
ensureOneEmptyRadio(ol);
});
}
function hackyFind( arr, id) {
log("hackyFind id=", id, "array", arr);
var found = null;
$.each(arr, function(i,n) {
var node = $(n);
var nodeId = node.attr("id")
var answerClass = "answer" + id; // old way of identifying answers
if( nodeId === id || node.hasClass(answerClass) ) {
found = node;
}
});
return found;
}
function | (form, data) {
log("prepareQuizForSave: build data object for quiz");
var quiz = form.find("#quizQuestions");
// Set names onto all inputs. Just number them answer0, answer1, etc. And add a class to the li with the name of the input, to use in front end validation
var questions = quiz.find("ol.quiz > li");
questions.each(function(q, n) {
var li = $(n);
var id = li.attr("id");
setClass(li, "answer", id); // will remove old classes
li.data("type","input");
//setClass(li, "answer", q); // will remove old classes
li.find("input,select,textarea").each(function(inpNum, inp) {
$(inp).attr("name", "answer" + q);
});
});
if( data ) {
data.pageName = form.find("input[name=pageName]").val();
data.pageTitle = form.find("input[name=pageTitle]").val();
data.template = form.find("input[name=template]").val();
} else {
data = {
pageName: form.find("input[name=pageName]").val(),
pageTitle: form.find("input[name=pageTitle]").val(),
template: form.find("input[name=template]").val()
};
}
// Update the names of all inputs to be the class on the question li
var inputs = quiz.find("input,select,textarea").not(".newQuestionType,input[name=pageTitle],input[name=pageName]");
log("update input names", inputs);
inputs.each(function(i,n){
var inp = $(n);
var name = inp.closest("li[id]").attr("class"); // the question li is the closest one with an id. question name is its class
inp.attr("name", name);
log("set name", name, inp);
});
// Find all inputs and add them to the data object
var inputs = quiz.find("input[type=text],select,textarea,input[type=radio]:checked").not(".newQuestionType,input[name=pageTitle],input[name=pageName]");
log("add inputs", inputs);
inputs.each(function(i,n){
var inp = $(n);
var name = inp.attr("name");
var val = inp.val();
data[name] = val;
log("set data att", name, val);
});
// remove any "temp" elements that have been added as part of editing
form.find(".temp").remove();
quiz.find("input[type=radio]").removeAttr("checked");
removeEmptyRadios(quiz.find("ol ol"));
data.body = quiz.html();
log("html", data.body);
return data;
}
function initQuizBuilder() {
var form = $("#quizQuestions");
log("form", form);
form.on("click", "h3,p,label", function(e){
var target = $(e.target);
log("editable item clicked", target);
e.stopPropagation();
e.preventDefault();
var inp = $("<input class='" + target[0].tagName + "' type='text'/>");
log("created inp", inp);
var txt = target.text();
if( txt.startsWith("[")) { // place holder text
txt = "";
}
inp.val(txt);
inp.insertAfter(target);
target.detach();
log("detached target", target);
inp.focus();
inp.focusout(function() {
// put back original element
var newText = inp.val().trim();
log("focusout", inp, target, "newText", newText);
target.text(inp.val());
// If its a label, and its empty, then remove it
if( target.hasClass("LABEL") && newText.length == 0 ) {
inp.closest("li").remove();
} else {
target.insertAfter(inp);
inp.remove();
}
if( target.is("label")) {
ensureOneEmptyRadio(target.closest("ol"));
}
});
});
form.on("keyup", "input.radioLabel", function(e){
var inp = $(e.target);
var li = inp.closest("li");
var ul = li.closest("ul");
var last = ul.find("li").filter(":last");
log("last", li, last, li==last);
if( li.is(last) ) {
addRadioToMulti(ul);
}
});
// Suppress enter key to prevent users from submitting, and closing, the modal edit window accidentally
form.on("keypress", "input", function(e) {
if( e.which == 13 ) {
e.preventDefault();
e.stopPropagation();
$(e.target).focusout();
}
});
// insert a delete X on hover of a question li
form.on("mouseenter", "ol.quiz > li", function(e) {
var li = $(e.target).closest("li");
if( li.find("span.delete").length > 0 ) {
return;
}
var span = $("<span class='delete temp'></span>");
li.prepend(span);
li.mouseleave(function() {
li.find("span.delete").remove();
});
});
form.on("click", "span.delete", function(e) {
var li = $(e.target).closest("li");
li.remove();
});
$("select.newQuestionType").click(function() {
var $this = $(this);
log("new question", $this);
var type = $this.val();
$this.val("");
if( type && type.length > 0 ) {
addQuestion(type);
}
});
function addQuestion(type) {
log("add question", type);
if( type === "textbox") {
addQuestionTextbox();
} else if( type === "multi") {
addQuestionMulti();
}
}
function addQuestionTextbox() {
var questions = form.find("ol.quiz");
log("addQuestionTextbox", questions);
var li = createQuestionLi(questions);
li.append($("<textarea class='wide autoresize' cols='50' rows='1'></textarea>"));
}
function addQuestionMulti() {
var questions = form.find("ol.quiz");
log("addQuestionMulti", questions);
var li = createQuestionLi(questions);
var olRadios = $("<ol></ol>");
olRadios.attr("id", "answers_" + li.attr("id"));
li.append(olRadios);
addRadioToMulti(olRadios);
}
function createQuestionLi(form) {
var id = Math.floor(Math.random()*1000000);
var li = $(
"<li id='f" + id + "'>" +
"<h3>[Enter the question here]</h3>" +
"<p>[Enter help text here]</p>" +
"</li>"
);
form.append(li);
return li;
}
}
function removeEmptyRadios(ol) {
ol.find("li").each(function(i, n) {
var li = $(n);
var txt = li.find("label").text().trim()
if( txt == "" || txt.startsWith("[")) {
li.remove();
}
});
}
function ensureOneEmptyRadio(ol) {
// remove any li's containing empty labels, then add one empty one
removeEmptyRadios(ol);
addRadioToMulti(ol);
}
function addRadioToMulti(ol) {
var question | prepareQuizForSave | identifier_name |
quiz.js | radios.filter("[value=" + val + "]");
log("radio val", val, radio);
radio.attr("checked", "true"); // set radio buttons
log("restored radio", radio);
}
}
}
modal.find("input[type=radio]").closest("ol").each(function(i,n) {
var ol = $(n);
ensureOneEmptyRadio(ol);
});
}
function hackyFind( arr, id) {
log("hackyFind id=", id, "array", arr);
var found = null;
$.each(arr, function(i,n) {
var node = $(n);
var nodeId = node.attr("id")
var answerClass = "answer" + id; // old way of identifying answers
if( nodeId === id || node.hasClass(answerClass) ) {
found = node;
}
});
return found;
}
function prepareQuizForSave(form, data) {
log("prepareQuizForSave: build data object for quiz");
var quiz = form.find("#quizQuestions");
// Set names onto all inputs. Just number them answer0, answer1, etc. And add a class to the li with the name of the input, to use in front end validation
var questions = quiz.find("ol.quiz > li");
questions.each(function(q, n) {
var li = $(n);
var id = li.attr("id");
setClass(li, "answer", id); // will remove old classes
li.data("type","input");
//setClass(li, "answer", q); // will remove old classes
li.find("input,select,textarea").each(function(inpNum, inp) {
$(inp).attr("name", "answer" + q);
});
});
if( data ) {
data.pageName = form.find("input[name=pageName]").val();
data.pageTitle = form.find("input[name=pageTitle]").val();
data.template = form.find("input[name=template]").val();
} else {
data = {
pageName: form.find("input[name=pageName]").val(),
pageTitle: form.find("input[name=pageTitle]").val(),
template: form.find("input[name=template]").val()
};
}
// Update the names of all inputs to be the class on the question li
var inputs = quiz.find("input,select,textarea").not(".newQuestionType,input[name=pageTitle],input[name=pageName]");
log("update input names", inputs);
inputs.each(function(i,n){
var inp = $(n);
var name = inp.closest("li[id]").attr("class"); // the question li is the closest one with an id. question name is its class
inp.attr("name", name);
log("set name", name, inp);
});
// Find all inputs and add them to the data object
var inputs = quiz.find("input[type=text],select,textarea,input[type=radio]:checked").not(".newQuestionType,input[name=pageTitle],input[name=pageName]");
log("add inputs", inputs);
inputs.each(function(i,n){
var inp = $(n);
var name = inp.attr("name");
var val = inp.val();
data[name] = val;
log("set data att", name, val);
});
// remove any "temp" elements that have been added as part of editing
form.find(".temp").remove();
quiz.find("input[type=radio]").removeAttr("checked");
removeEmptyRadios(quiz.find("ol ol"));
data.body = quiz.html();
log("html", data.body);
return data;
}
function initQuizBuilder() {
var form = $("#quizQuestions");
log("form", form);
form.on("click", "h3,p,label", function(e){
var target = $(e.target);
log("editable item clicked", target);
e.stopPropagation();
e.preventDefault();
var inp = $("<input class='" + target[0].tagName + "' type='text'/>");
log("created inp", inp);
var txt = target.text();
if( txt.startsWith("[")) { // place holder text
txt = "";
}
inp.val(txt);
inp.insertAfter(target);
target.detach();
log("detached target", target);
inp.focus();
inp.focusout(function() {
// put back original element
var newText = inp.val().trim();
log("focusout", inp, target, "newText", newText);
target.text(inp.val());
// If its a label, and its empty, then remove it
if( target.hasClass("LABEL") && newText.length == 0 ) {
inp.closest("li").remove();
} else {
target.insertAfter(inp);
inp.remove();
}
if( target.is("label")) {
ensureOneEmptyRadio(target.closest("ol"));
}
});
});
form.on("keyup", "input.radioLabel", function(e){
var inp = $(e.target);
var li = inp.closest("li");
var ul = li.closest("ul");
var last = ul.find("li").filter(":last");
log("last", li, last, li==last);
if( li.is(last) ) {
addRadioToMulti(ul);
}
});
// Suppress enter key to prevent users from submitting, and closing, the modal edit window accidentally
form.on("keypress", "input", function(e) {
if( e.which == 13 ) {
e.preventDefault();
e.stopPropagation();
$(e.target).focusout();
}
});
// insert a delete X on hover of a question li
form.on("mouseenter", "ol.quiz > li", function(e) {
var li = $(e.target).closest("li");
if( li.find("span.delete").length > 0 ) {
return;
}
var span = $("<span class='delete temp'></span>");
li.prepend(span);
li.mouseleave(function() {
li.find("span.delete").remove();
});
});
form.on("click", "span.delete", function(e) {
var li = $(e.target).closest("li");
li.remove();
});
$("select.newQuestionType").click(function() {
var $this = $(this);
log("new question", $this);
var type = $this.val();
$this.val("");
if( type && type.length > 0 ) {
addQuestion(type);
}
});
function addQuestion(type) {
log("add question", type);
if( type === "textbox") {
addQuestionTextbox();
} else if( type === "multi") {
addQuestionMulti();
}
}
function addQuestionTextbox() {
var questions = form.find("ol.quiz");
log("addQuestionTextbox", questions);
var li = createQuestionLi(questions);
li.append($("<textarea class='wide autoresize' cols='50' rows='1'></textarea>"));
}
function addQuestionMulti() {
var questions = form.find("ol.quiz");
log("addQuestionMulti", questions);
var li = createQuestionLi(questions);
var olRadios = $("<ol></ol>");
olRadios.attr("id", "answers_" + li.attr("id"));
li.append(olRadios);
addRadioToMulti(olRadios);
}
function createQuestionLi(form) {
var id = Math.floor(Math.random()*1000000);
var li = $(
"<li id='f" + id + "'>" +
"<h3>[Enter the question here]</h3>" +
"<p>[Enter help text here]</p>" +
"</li>"
);
form.append(li);
return li;
}
}
function removeEmptyRadios(ol) {
ol.find("li").each(function(i, n) {
var li = $(n);
var txt = li.find("label").text().trim()
if( txt == "" || txt.startsWith("[")) {
li.remove();
}
});
}
function ensureOneEmptyRadio(ol) {
// remove any li's containing empty labels, then add one empty one
removeEmptyRadios(ol);
addRadioToMulti(ol);
}
function addRadioToMulti(ol) {
var question = ol.closest("li").attr("class");
log("addRadioToMulti", ol, question);
var answerId = Math.floor(Math.random()*1000000);
var li = $("<li></li>");
li.append($("<input type='radio' id='answer_" + answerId +"' value='" + answerId + "'/>"));
li.append($("<label for='answer_" + answerId + "'>[Enter answer text here]</label>"));
li.find("input").attr("name", question); // make the name of all radios the question
ol.append(li);
}
/**
* Remove all other answer classes, and and the new one
*/
function setClass(element, prefix, suffix) {
var el = $(element);
log("setClass", el, el.attr("class"));
var classes = el.attr("class");
if( classes ) | {
$.each(classes.split(" "), function(i, n) {
if( n.startsWith(prefix)) {
el.removeClass(n);
}
});
} | conditional_block |
|
PaymentChannelsClient.js | (address) {
const { provider } = Engine.context.NetworkController.state;
this.selectedAddress = address;
this.state = {
ready: false,
provider,
hubUrl: null,
tokenAddress: null,
contractAddress: null,
hubWalletAddress: null,
ethprovider: null,
tokenContract: null,
connext: null,
channelManagerAddress: null,
ethChainId: null,
authorized: false,
address: null,
channelState: null,
connextState: null,
persistent: null,
runtime: null,
exchangeRate: 0,
sendAmount: '',
sendRecipient: '',
depositAmount: '',
status: {
txHash: '',
type: '',
reset: false
},
depositPending: false,
withdrawalPending: false,
withdrawalPendingValue: undefined,
blocked: false,
transactions: [],
swapPending: false
};
}
setState = data => {
Object.keys(data).forEach(key => {
this.state[key] = data[key];
});
};
async setConnext(provider) {
const { type } = provider;
const infuraProvider = createInfuraProvider({ network: type });
let hubUrl;
const ethprovider = new EthQuery(infuraProvider);
switch (type) {
case 'rinkeby':
hubUrl = `${PUBLIC_URL}/api/rinkeby/hub`;
break;
case 'mainnet':
hubUrl = `${PUBLIC_URL}/api/mainnet/hub`;
break;
default:
throw new Error(`Unrecognized network: ${type}`);
}
const { KeyringController, TransactionController } = Engine.context;
const opts = {
hubUrl,
externalWallet: {
external: true,
address: this.selectedAddress,
getAddress: () => Promise.resolve(this.selectedAddress),
signMessage: message => {
const hexMessage = byteArrayToHex(message);
return KeyringController.signPersonalMessage({ data: hexMessage, from: this.selectedAddress });
},
sign: async txMeta => {
// We have to normalize the values
delete txMeta.gas;
delete txMeta.gasPrice;
const weiValue = txMeta.value.toString();
const bnValue = toBN(weiValue);
const normalizedTxMeta = {
...txMeta,
value: BNToHex(bnValue),
silent: true
};
try {
const signedTx = await TransactionController.addTransaction(normalizedTxMeta);
const hash = await signedTx.result;
return new Promise(resolve => {
TransactionController.hub.on(`${signedTx.transactionMeta.id}:finished`, async () => {
TransactionController.hub.removeAllListeners(`${signedTx.transactionMeta.id}:finished`);
});
TransactionController.hub.on(`${signedTx.transactionMeta.id}:confirmed`, async () => {
TransactionController.hub.removeAllListeners(
`${signedTx.transactionMeta.id}:confirmed`
);
setTimeout(() => {
TransactionsNotificationManager.showInstantPaymentNotification('pending_deposit');
}, 1000);
resolve({
hash,
wait: () => Promise.resolve(1)
});
});
});
} catch (e) {
if (!this.state.blocked) {
this.setState({ blocked: true });
setTimeout(() => {
this.setState({ blocked: false });
}, 60 * BLOCKED_DEPOSIT_DURATION_MINUTES * 1000);
}
Logger.error('ExternalWallet::sign', e);
throw e;
}
}
},
web3Provider: Engine.context.NetworkController.provider
};
// *** Instantiate the connext client ***
try {
Logger.log('PC::createClient about to call');
const connext = await Connext.createClient(opts);
Logger.log('PC::createClient success');
this.setState({
connext,
tokenAddress: connext.opts.tokenAddress,
channelManagerAddress: connext.opts.contractAddress,
hubWalletAddress: connext.opts.hubAddress,
ethChainId: connext.opts.ethChainId,
ethprovider
});
} catch (e) {
this.logCurrentState('PC::createClient');
Logger.error('PC::createClient', e);
throw e;
}
}
getBalance = () => {
const amount = (this.state && this.state.channelState && this.state.channelState.balanceTokenUser) || '0';
const ret = parseFloat(renderFromWei(amount, 18));
if (ret === 0) {
return '0.00';
}
return ret.toFixed(2).toString();
};
async pollConnextState() {
Logger.log('PC::createClient success');
const { connext } = this.state;
// start polling
try {
Logger.log('PC::pollConnextState connext.start');
await connext.start();
Logger.log('PC::pollConnextState connext.start succesful');
} catch (e) {
this.logCurrentState('PC::start');
Logger.error('PC::start', e);
}
// register connext listeners
connext.on('onStateChange', async state => {
try {
this.checkForBalanceChange(state);
this.setState({
ready: true,
channelState: state.persistent.channel,
connextState: state,
runtime: state.runtime,
exchangeRate: state.runtime.exchangeRate ? state.runtime.exchangeRate.rates.DAI : 0
});
this.checkStatus();
const transactions = await this.state.connext.getPaymentHistory();
hub.emit('state::change', {
balance: this.getBalance(),
status: this.state.status,
transactions,
ready: true
});
if (state.runtime.channelStatus === 'CS_CHAINSAW_ERROR') {
hub.emit('state::cs_chainsaw_error', { channelState: state.persistent.channel });
}
} catch (e) {
this.logCurrentState('PC::onStateChange');
Logger.error('PC::onStateChange', e);
}
});
}
checkPaymentHistory = async () => {
const paymentHistory = await this.state.connext.getPaymentHistory();
const lastKnownPaymentIDStr = await AsyncStorage.getItem('@MetaMask:lastKnownInstantPaymentID');
let lastKnownPaymentID = 0;
const latestPayment = paymentHistory.find(
payment => payment.recipient.toLowerCase() === this.selectedAddress.toLowerCase()
);
if (latestPayment) {
const latestPaymentID = parseInt(latestPayment.id, 10);
if (lastKnownPaymentIDStr) {
lastKnownPaymentID = parseInt(lastKnownPaymentIDStr, 10);
if (lastKnownPaymentID < latestPaymentID) {
const amountToken = renderFromWei(latestPayment.amount.amountToken);
setTimeout(() => {
TransactionsNotificationManager.showIncomingPaymentNotification(amountToken);
}, 300);
await AsyncStorage.setItem('@MetaMask:lastKnownInstantPaymentID', latestPaymentID.toString());
}
} else {
// For first time flow
await AsyncStorage.setItem('@MetaMask:lastKnownInstantPaymentID', latestPaymentID.toString());
}
}
this.setState({ transactions: paymentHistory });
};
pollAndSwap = async () => {
try {
await this.autoSwap();
} catch (e) {
this.logCurrentState('PC::autoswap');
Logger.error('PC::autoswap', e);
this.setState({ swapPending: false });
}
this.autoswapHandler = setTimeout(() => {
this.pollAndSwap();
}, 1000);
};
async autoSwap() {
const { channelState, connextState, swapPending } = this.state;
if (!connextState || hasPendingOps(channelState) || swapPending) {
!swapPending && this.logCurrentState('PC::autoswap::exception');
return;
}
const weiBalance = toBN(channelState.balanceWeiUser);
const tokenBalance = toBN(channelState.balanceTokenUser);
if (channelState && weiBalance.gt(toBN('0')) && tokenBalance.lte(HUB_EXCHANGE_CEILING)) {
this.setState({ swapPending: true });
Logger.log('PC::pollAndSwap autoSwap exchanging');
await this.state.connext.exchange(channelState.balanceWeiUser, 'wei');
Logger.log('PC::pollAndSwap autoSwap exchanging succesful');
this.setState({ swapPending: false });
}
}
checkForBalanceChange = async newState => {
// Check for balance changes
const prevBalance = (this.state && this.state.channelState && this.state.channelState.balanceTokenUser) || '0';
const currentBalance =
(newState && newState.persistent.channel && newState.persistent.channel.balanceTokenUser) || '0';
if (toBN(prevBalance).lt(toBN(currentBalance))) {
this.checkPaymentHistory();
}
};
handleInternalTransactions = txHash => {
const { withdrawalPendingValue } = this.state;
const networkID = Networks[Engine.context.NetworkController.state.provider.type].networkId.toString();
const newInternalTxs = Engine.context.TransactionController.state.internalTransactions | constructor | identifier_name |
|
PaymentChannelsClient.js | break;
default:
throw new Error(`Unrecognized network: ${type}`);
}
const { KeyringController, TransactionController } = Engine.context;
const opts = {
hubUrl,
externalWallet: {
external: true,
address: this.selectedAddress,
getAddress: () => Promise.resolve(this.selectedAddress),
signMessage: message => {
const hexMessage = byteArrayToHex(message);
return KeyringController.signPersonalMessage({ data: hexMessage, from: this.selectedAddress });
},
sign: async txMeta => {
// We have to normalize the values
delete txMeta.gas;
delete txMeta.gasPrice;
const weiValue = txMeta.value.toString();
const bnValue = toBN(weiValue);
const normalizedTxMeta = {
...txMeta,
value: BNToHex(bnValue),
silent: true
};
try {
const signedTx = await TransactionController.addTransaction(normalizedTxMeta);
const hash = await signedTx.result;
return new Promise(resolve => {
TransactionController.hub.on(`${signedTx.transactionMeta.id}:finished`, async () => {
TransactionController.hub.removeAllListeners(`${signedTx.transactionMeta.id}:finished`);
});
TransactionController.hub.on(`${signedTx.transactionMeta.id}:confirmed`, async () => {
TransactionController.hub.removeAllListeners(
`${signedTx.transactionMeta.id}:confirmed`
);
setTimeout(() => {
TransactionsNotificationManager.showInstantPaymentNotification('pending_deposit');
}, 1000);
resolve({
hash,
wait: () => Promise.resolve(1)
});
});
});
} catch (e) {
if (!this.state.blocked) {
this.setState({ blocked: true });
setTimeout(() => {
this.setState({ blocked: false });
}, 60 * BLOCKED_DEPOSIT_DURATION_MINUTES * 1000);
}
Logger.error('ExternalWallet::sign', e);
throw e;
}
}
},
web3Provider: Engine.context.NetworkController.provider
};
// *** Instantiate the connext client ***
try {
Logger.log('PC::createClient about to call');
const connext = await Connext.createClient(opts);
Logger.log('PC::createClient success');
this.setState({
connext,
tokenAddress: connext.opts.tokenAddress,
channelManagerAddress: connext.opts.contractAddress,
hubWalletAddress: connext.opts.hubAddress,
ethChainId: connext.opts.ethChainId,
ethprovider
});
} catch (e) {
this.logCurrentState('PC::createClient');
Logger.error('PC::createClient', e);
throw e;
}
}
getBalance = () => {
const amount = (this.state && this.state.channelState && this.state.channelState.balanceTokenUser) || '0';
const ret = parseFloat(renderFromWei(amount, 18));
if (ret === 0) {
return '0.00';
}
return ret.toFixed(2).toString();
};
async pollConnextState() {
Logger.log('PC::createClient success');
const { connext } = this.state;
// start polling
try {
Logger.log('PC::pollConnextState connext.start');
await connext.start();
Logger.log('PC::pollConnextState connext.start succesful');
} catch (e) {
this.logCurrentState('PC::start');
Logger.error('PC::start', e);
}
// register connext listeners
connext.on('onStateChange', async state => {
try {
this.checkForBalanceChange(state);
this.setState({
ready: true,
channelState: state.persistent.channel,
connextState: state,
runtime: state.runtime,
exchangeRate: state.runtime.exchangeRate ? state.runtime.exchangeRate.rates.DAI : 0
});
this.checkStatus();
const transactions = await this.state.connext.getPaymentHistory();
hub.emit('state::change', {
balance: this.getBalance(),
status: this.state.status,
transactions,
ready: true
});
if (state.runtime.channelStatus === 'CS_CHAINSAW_ERROR') {
hub.emit('state::cs_chainsaw_error', { channelState: state.persistent.channel });
}
} catch (e) {
this.logCurrentState('PC::onStateChange');
Logger.error('PC::onStateChange', e);
}
});
}
checkPaymentHistory = async () => {
const paymentHistory = await this.state.connext.getPaymentHistory();
const lastKnownPaymentIDStr = await AsyncStorage.getItem('@MetaMask:lastKnownInstantPaymentID');
let lastKnownPaymentID = 0;
const latestPayment = paymentHistory.find(
payment => payment.recipient.toLowerCase() === this.selectedAddress.toLowerCase()
);
if (latestPayment) {
const latestPaymentID = parseInt(latestPayment.id, 10);
if (lastKnownPaymentIDStr) {
lastKnownPaymentID = parseInt(lastKnownPaymentIDStr, 10);
if (lastKnownPaymentID < latestPaymentID) {
const amountToken = renderFromWei(latestPayment.amount.amountToken);
setTimeout(() => {
TransactionsNotificationManager.showIncomingPaymentNotification(amountToken);
}, 300);
await AsyncStorage.setItem('@MetaMask:lastKnownInstantPaymentID', latestPaymentID.toString());
}
} else {
// For first time flow
await AsyncStorage.setItem('@MetaMask:lastKnownInstantPaymentID', latestPaymentID.toString());
}
}
this.setState({ transactions: paymentHistory });
};
pollAndSwap = async () => {
try {
await this.autoSwap();
} catch (e) {
this.logCurrentState('PC::autoswap');
Logger.error('PC::autoswap', e);
this.setState({ swapPending: false });
}
this.autoswapHandler = setTimeout(() => {
this.pollAndSwap();
}, 1000);
};
async autoSwap() {
const { channelState, connextState, swapPending } = this.state;
if (!connextState || hasPendingOps(channelState) || swapPending) {
!swapPending && this.logCurrentState('PC::autoswap::exception');
return;
}
const weiBalance = toBN(channelState.balanceWeiUser);
const tokenBalance = toBN(channelState.balanceTokenUser);
if (channelState && weiBalance.gt(toBN('0')) && tokenBalance.lte(HUB_EXCHANGE_CEILING)) {
this.setState({ swapPending: true });
Logger.log('PC::pollAndSwap autoSwap exchanging');
await this.state.connext.exchange(channelState.balanceWeiUser, 'wei');
Logger.log('PC::pollAndSwap autoSwap exchanging succesful');
this.setState({ swapPending: false });
} | const currentBalance =
(newState && newState.persistent.channel && newState.persistent.channel.balanceTokenUser) || '0';
if (toBN(prevBalance).lt(toBN(currentBalance))) {
this.checkPaymentHistory();
}
};
handleInternalTransactions = txHash => {
const { withdrawalPendingValue } = this.state;
const networkID = Networks[Engine.context.NetworkController.state.provider.type].networkId.toString();
const newInternalTxs = Engine.context.TransactionController.state.internalTransactions || [];
newInternalTxs.push({
time: Date.now(),
status: 'confirmed',
paymentChannelTransaction: true,
networkID,
transaction: {
from: CONTRACTS[networkID],
to: Engine.context.PreferencesController.state.selectedAddress,
value: BNToHex(withdrawalPendingValue)
},
transactionHash: txHash
});
return newInternalTxs;
};
checkStatus() {
const { runtime, status, depositPending, withdrawalPending } = this.state;
const newStatus = {
reset: status.reset
};
if (runtime) {
if (depositPending && runtime.deposit.submitted) {
if (!runtime.deposit.detected) {
newStatus.type = 'DEPOSIT_PENDING';
} else {
newStatus.type = 'DEPOSIT_SUCCESS';
newStatus.txHash = runtime.deposit.transactionHash;
this.setState({ depositPending: false });
}
}
if (withdrawalPending && runtime.withdrawal.submitted) {
if (!runtime.withdrawal.detected) {
newStatus.type = 'WITHDRAWAL_PENDING';
} else {
newStatus.type = 'WITHDRAWAL_SUCCESS';
newStatus.txHash = runtime.withdrawal.transactionHash;
const newInternalTxs = this.handleInternalTransactions(newStatus.txHash);
Engine.context.TransactionController.update({ internalTransactions: newInternalTxs });
this.setState({ withdrawalPending: false, withdrawalPendingValue: undefined });
}
}
}
if (newStatus.type !== status.type) {
newStatus.reset = true;
if (newStatus | }
checkForBalanceChange = async newState => {
// Check for balance changes
const prevBalance = (this.state && this.state.channelState && this.state.channelState.balanceTokenUser) || '0'; | random_line_split |
PaymentChannelsClient.js | runtime: null,
exchangeRate: 0,
sendAmount: '',
sendRecipient: '',
depositAmount: '',
status: {
txHash: '',
type: '',
reset: false
},
depositPending: false,
withdrawalPending: false,
withdrawalPendingValue: undefined,
blocked: false,
transactions: [],
swapPending: false
};
}
setState = data => {
Object.keys(data).forEach(key => {
this.state[key] = data[key];
});
};
async setConnext(provider) {
const { type } = provider;
const infuraProvider = createInfuraProvider({ network: type });
let hubUrl;
const ethprovider = new EthQuery(infuraProvider);
switch (type) {
case 'rinkeby':
hubUrl = `${PUBLIC_URL}/api/rinkeby/hub`;
break;
case 'mainnet':
hubUrl = `${PUBLIC_URL}/api/mainnet/hub`;
break;
default:
throw new Error(`Unrecognized network: ${type}`);
}
const { KeyringController, TransactionController } = Engine.context;
const opts = {
hubUrl,
externalWallet: {
external: true,
address: this.selectedAddress,
getAddress: () => Promise.resolve(this.selectedAddress),
signMessage: message => {
const hexMessage = byteArrayToHex(message);
return KeyringController.signPersonalMessage({ data: hexMessage, from: this.selectedAddress });
},
sign: async txMeta => {
// We have to normalize the values
delete txMeta.gas;
delete txMeta.gasPrice;
const weiValue = txMeta.value.toString();
const bnValue = toBN(weiValue);
const normalizedTxMeta = {
...txMeta,
value: BNToHex(bnValue),
silent: true
};
try {
const signedTx = await TransactionController.addTransaction(normalizedTxMeta);
const hash = await signedTx.result;
return new Promise(resolve => {
TransactionController.hub.on(`${signedTx.transactionMeta.id}:finished`, async () => {
TransactionController.hub.removeAllListeners(`${signedTx.transactionMeta.id}:finished`);
});
TransactionController.hub.on(`${signedTx.transactionMeta.id}:confirmed`, async () => {
TransactionController.hub.removeAllListeners(
`${signedTx.transactionMeta.id}:confirmed`
);
setTimeout(() => {
TransactionsNotificationManager.showInstantPaymentNotification('pending_deposit');
}, 1000);
resolve({
hash,
wait: () => Promise.resolve(1)
});
});
});
} catch (e) {
if (!this.state.blocked) {
this.setState({ blocked: true });
setTimeout(() => {
this.setState({ blocked: false });
}, 60 * BLOCKED_DEPOSIT_DURATION_MINUTES * 1000);
}
Logger.error('ExternalWallet::sign', e);
throw e;
}
}
},
web3Provider: Engine.context.NetworkController.provider
};
// *** Instantiate the connext client ***
try {
Logger.log('PC::createClient about to call');
const connext = await Connext.createClient(opts);
Logger.log('PC::createClient success');
this.setState({
connext,
tokenAddress: connext.opts.tokenAddress,
channelManagerAddress: connext.opts.contractAddress,
hubWalletAddress: connext.opts.hubAddress,
ethChainId: connext.opts.ethChainId,
ethprovider
});
} catch (e) {
this.logCurrentState('PC::createClient');
Logger.error('PC::createClient', e);
throw e;
}
}
getBalance = () => {
const amount = (this.state && this.state.channelState && this.state.channelState.balanceTokenUser) || '0';
const ret = parseFloat(renderFromWei(amount, 18));
if (ret === 0) {
return '0.00';
}
return ret.toFixed(2).toString();
};
async pollConnextState() {
Logger.log('PC::createClient success');
const { connext } = this.state;
// start polling
try {
Logger.log('PC::pollConnextState connext.start');
await connext.start();
Logger.log('PC::pollConnextState connext.start succesful');
} catch (e) {
this.logCurrentState('PC::start');
Logger.error('PC::start', e);
}
// register connext listeners
connext.on('onStateChange', async state => {
try {
this.checkForBalanceChange(state);
this.setState({
ready: true,
channelState: state.persistent.channel,
connextState: state,
runtime: state.runtime,
exchangeRate: state.runtime.exchangeRate ? state.runtime.exchangeRate.rates.DAI : 0
});
this.checkStatus();
const transactions = await this.state.connext.getPaymentHistory();
hub.emit('state::change', {
balance: this.getBalance(),
status: this.state.status,
transactions,
ready: true
});
if (state.runtime.channelStatus === 'CS_CHAINSAW_ERROR') {
hub.emit('state::cs_chainsaw_error', { channelState: state.persistent.channel });
}
} catch (e) {
this.logCurrentState('PC::onStateChange');
Logger.error('PC::onStateChange', e);
}
});
}
checkPaymentHistory = async () => {
const paymentHistory = await this.state.connext.getPaymentHistory();
const lastKnownPaymentIDStr = await AsyncStorage.getItem('@MetaMask:lastKnownInstantPaymentID');
let lastKnownPaymentID = 0;
const latestPayment = paymentHistory.find(
payment => payment.recipient.toLowerCase() === this.selectedAddress.toLowerCase()
);
if (latestPayment) {
const latestPaymentID = parseInt(latestPayment.id, 10);
if (lastKnownPaymentIDStr) {
lastKnownPaymentID = parseInt(lastKnownPaymentIDStr, 10);
if (lastKnownPaymentID < latestPaymentID) {
const amountToken = renderFromWei(latestPayment.amount.amountToken);
setTimeout(() => {
TransactionsNotificationManager.showIncomingPaymentNotification(amountToken);
}, 300);
await AsyncStorage.setItem('@MetaMask:lastKnownInstantPaymentID', latestPaymentID.toString());
}
} else {
// For first time flow
await AsyncStorage.setItem('@MetaMask:lastKnownInstantPaymentID', latestPaymentID.toString());
}
}
this.setState({ transactions: paymentHistory });
};
pollAndSwap = async () => {
try {
await this.autoSwap();
} catch (e) {
this.logCurrentState('PC::autoswap');
Logger.error('PC::autoswap', e);
this.setState({ swapPending: false });
}
this.autoswapHandler = setTimeout(() => {
this.pollAndSwap();
}, 1000);
};
async autoSwap() {
const { channelState, connextState, swapPending } = this.state;
if (!connextState || hasPendingOps(channelState) || swapPending) {
!swapPending && this.logCurrentState('PC::autoswap::exception');
return;
}
const weiBalance = toBN(channelState.balanceWeiUser);
const tokenBalance = toBN(channelState.balanceTokenUser);
if (channelState && weiBalance.gt(toBN('0')) && tokenBalance.lte(HUB_EXCHANGE_CEILING)) {
this.setState({ swapPending: true });
Logger.log('PC::pollAndSwap autoSwap exchanging');
await this.state.connext.exchange(channelState.balanceWeiUser, 'wei');
Logger.log('PC::pollAndSwap autoSwap exchanging succesful');
this.setState({ swapPending: false });
}
}
checkForBalanceChange = async newState => {
// Check for balance changes
const prevBalance = (this.state && this.state.channelState && this.state.channelState.balanceTokenUser) || '0';
const currentBalance =
(newState && newState.persistent.channel && newState.persistent.channel.balanceTokenUser) || '0';
if (toBN(prevBalance).lt(toBN(currentBalance))) {
this.checkPaymentHistory();
}
};
handleInternalTransactions = txHash => {
const { withdrawalPendingValue } = this.state;
const networkID = Networks[Engine.context.NetworkController.state.provider.type].networkId.toString();
const newInternalTxs = Engine.context.TransactionController.state.internalTransactions || [];
| {
const { provider } = Engine.context.NetworkController.state;
this.selectedAddress = address;
this.state = {
ready: false,
provider,
hubUrl: null,
tokenAddress: null,
contractAddress: null,
hubWalletAddress: null,
ethprovider: null,
tokenContract: null,
connext: null,
channelManagerAddress: null,
ethChainId: null,
authorized: false,
address: null,
channelState: null,
connextState: null,
persistent: null, | identifier_body |
|
PaymentChannelsClient.js | async () => {
TransactionController.hub.removeAllListeners(
`${signedTx.transactionMeta.id}:confirmed`
);
setTimeout(() => {
TransactionsNotificationManager.showInstantPaymentNotification('pending_deposit');
}, 1000);
resolve({
hash,
wait: () => Promise.resolve(1)
});
});
});
} catch (e) {
if (!this.state.blocked) {
this.setState({ blocked: true });
setTimeout(() => {
this.setState({ blocked: false });
}, 60 * BLOCKED_DEPOSIT_DURATION_MINUTES * 1000);
}
Logger.error('ExternalWallet::sign', e);
throw e;
}
}
},
web3Provider: Engine.context.NetworkController.provider
};
// *** Instantiate the connext client ***
try {
Logger.log('PC::createClient about to call');
const connext = await Connext.createClient(opts);
Logger.log('PC::createClient success');
this.setState({
connext,
tokenAddress: connext.opts.tokenAddress,
channelManagerAddress: connext.opts.contractAddress,
hubWalletAddress: connext.opts.hubAddress,
ethChainId: connext.opts.ethChainId,
ethprovider
});
} catch (e) {
this.logCurrentState('PC::createClient');
Logger.error('PC::createClient', e);
throw e;
}
}
getBalance = () => {
const amount = (this.state && this.state.channelState && this.state.channelState.balanceTokenUser) || '0';
const ret = parseFloat(renderFromWei(amount, 18));
if (ret === 0) {
return '0.00';
}
return ret.toFixed(2).toString();
};
async pollConnextState() {
Logger.log('PC::createClient success');
const { connext } = this.state;
// start polling
try {
Logger.log('PC::pollConnextState connext.start');
await connext.start();
Logger.log('PC::pollConnextState connext.start succesful');
} catch (e) {
this.logCurrentState('PC::start');
Logger.error('PC::start', e);
}
// register connext listeners
connext.on('onStateChange', async state => {
try {
this.checkForBalanceChange(state);
this.setState({
ready: true,
channelState: state.persistent.channel,
connextState: state,
runtime: state.runtime,
exchangeRate: state.runtime.exchangeRate ? state.runtime.exchangeRate.rates.DAI : 0
});
this.checkStatus();
const transactions = await this.state.connext.getPaymentHistory();
hub.emit('state::change', {
balance: this.getBalance(),
status: this.state.status,
transactions,
ready: true
});
if (state.runtime.channelStatus === 'CS_CHAINSAW_ERROR') {
hub.emit('state::cs_chainsaw_error', { channelState: state.persistent.channel });
}
} catch (e) {
this.logCurrentState('PC::onStateChange');
Logger.error('PC::onStateChange', e);
}
});
}
checkPaymentHistory = async () => {
const paymentHistory = await this.state.connext.getPaymentHistory();
const lastKnownPaymentIDStr = await AsyncStorage.getItem('@MetaMask:lastKnownInstantPaymentID');
let lastKnownPaymentID = 0;
const latestPayment = paymentHistory.find(
payment => payment.recipient.toLowerCase() === this.selectedAddress.toLowerCase()
);
if (latestPayment) {
const latestPaymentID = parseInt(latestPayment.id, 10);
if (lastKnownPaymentIDStr) {
lastKnownPaymentID = parseInt(lastKnownPaymentIDStr, 10);
if (lastKnownPaymentID < latestPaymentID) {
const amountToken = renderFromWei(latestPayment.amount.amountToken);
setTimeout(() => {
TransactionsNotificationManager.showIncomingPaymentNotification(amountToken);
}, 300);
await AsyncStorage.setItem('@MetaMask:lastKnownInstantPaymentID', latestPaymentID.toString());
}
} else {
// For first time flow
await AsyncStorage.setItem('@MetaMask:lastKnownInstantPaymentID', latestPaymentID.toString());
}
}
this.setState({ transactions: paymentHistory });
};
pollAndSwap = async () => {
try {
await this.autoSwap();
} catch (e) {
this.logCurrentState('PC::autoswap');
Logger.error('PC::autoswap', e);
this.setState({ swapPending: false });
}
this.autoswapHandler = setTimeout(() => {
this.pollAndSwap();
}, 1000);
};
async autoSwap() {
const { channelState, connextState, swapPending } = this.state;
if (!connextState || hasPendingOps(channelState) || swapPending) {
!swapPending && this.logCurrentState('PC::autoswap::exception');
return;
}
const weiBalance = toBN(channelState.balanceWeiUser);
const tokenBalance = toBN(channelState.balanceTokenUser);
if (channelState && weiBalance.gt(toBN('0')) && tokenBalance.lte(HUB_EXCHANGE_CEILING)) {
this.setState({ swapPending: true });
Logger.log('PC::pollAndSwap autoSwap exchanging');
await this.state.connext.exchange(channelState.balanceWeiUser, 'wei');
Logger.log('PC::pollAndSwap autoSwap exchanging succesful');
this.setState({ swapPending: false });
}
}
checkForBalanceChange = async newState => {
// Check for balance changes
const prevBalance = (this.state && this.state.channelState && this.state.channelState.balanceTokenUser) || '0';
const currentBalance =
(newState && newState.persistent.channel && newState.persistent.channel.balanceTokenUser) || '0';
if (toBN(prevBalance).lt(toBN(currentBalance))) {
this.checkPaymentHistory();
}
};
handleInternalTransactions = txHash => {
const { withdrawalPendingValue } = this.state;
const networkID = Networks[Engine.context.NetworkController.state.provider.type].networkId.toString();
const newInternalTxs = Engine.context.TransactionController.state.internalTransactions || [];
newInternalTxs.push({
time: Date.now(),
status: 'confirmed',
paymentChannelTransaction: true,
networkID,
transaction: {
from: CONTRACTS[networkID],
to: Engine.context.PreferencesController.state.selectedAddress,
value: BNToHex(withdrawalPendingValue)
},
transactionHash: txHash
});
return newInternalTxs;
};
checkStatus() {
const { runtime, status, depositPending, withdrawalPending } = this.state;
const newStatus = {
reset: status.reset
};
if (runtime) {
if (depositPending && runtime.deposit.submitted) {
if (!runtime.deposit.detected) {
newStatus.type = 'DEPOSIT_PENDING';
} else {
newStatus.type = 'DEPOSIT_SUCCESS';
newStatus.txHash = runtime.deposit.transactionHash;
this.setState({ depositPending: false });
}
}
if (withdrawalPending && runtime.withdrawal.submitted) {
if (!runtime.withdrawal.detected) {
newStatus.type = 'WITHDRAWAL_PENDING';
} else {
newStatus.type = 'WITHDRAWAL_SUCCESS';
newStatus.txHash = runtime.withdrawal.transactionHash;
const newInternalTxs = this.handleInternalTransactions(newStatus.txHash);
Engine.context.TransactionController.update({ internalTransactions: newInternalTxs });
this.setState({ withdrawalPending: false, withdrawalPendingValue: undefined });
}
}
}
if (newStatus.type !== status.type) {
newStatus.reset = true;
if (newStatus.type && newStatus.type !== 'DEPOSIT_PENDING') {
const notification_type = newStatus.type
.toLowerCase()
.split('_')
.reverse()
.join('_');
hideMessage();
setTimeout(() => {
TransactionsNotificationManager.showInstantPaymentNotification(notification_type);
}, 300);
}
}
this.setState({ status: newStatus });
}
deposit = async ({ depositAmount }) => {
if (this.state.blocked) {
throw new Error('still_blocked');
}
try {
const { connext } = this.state;
const data = {
amountWei: toWei(depositAmount).toString(),
amountToken: '0'
};
await connext.deposit(data);
this.setState({ depositPending: true });
} catch (e) {
this.logCurrentState('PC::deposit');
Logger.error('PC::deposit', e);
throw e;
}
};
send = async ({ sendAmount, sendRecipient }) => {
let amount = toWei(sendAmount).toString();
const {
connext,
channelState: { balanceTokenUser }
} = this.state;
const maxAmount = balanceTokenUser;
if (sendAmount.toString() === this.getBalance()) | {
amount = maxAmount;
} | conditional_block |
|
test_utils.py | = 'localhost'
os.environ['HTTP_HOST'] = 'localhost'
os.environ['SERVER_PORT'] = '8080'
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
os.environ['USER_IS_ADMIN'] = '0'
os.environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
os.environ['HTTP_HOST'], os.environ['SERVER_PORT'])
class TestBase(unittest.TestCase):
"""Base class for all tests."""
maxDiff = 2500
DEFAULT_USERNAME = 'defaultusername'
def setUp(self):
raise NotImplementedError
def tearDown(self):
raise NotImplementedError
def log_line(self, line):
"""Print the line with a prefix that can be identified by the
script that calls the test.
"""
print '%s%s' % (LOG_LINE_PREFIX, line)
def _delete_all_models(self):
raise NotImplementedError
def login(self, email, is_super_admin=False):
os.environ['USER_EMAIL'] = email
os.environ['USER_ID'] = self.get_user_id_from_email(email)
os.environ['USER_IS_ADMIN'] = '1' if is_super_admin else '0'
def logout(self):
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
os.environ['USER_IS_ADMIN'] = '0'
def shortDescription(self):
"""Additional information logged during unit test invocation."""
# Suppress default logging of docstrings.
return None
def get_expected_login_url(self, slug):
"""Returns the expected login URL."""
return current_user_services.create_login_url(slug)
def get_expected_logout_url(self, slug):
"""Returns the expected logout URL."""
return current_user_services.create_logout_url(slug)
def _parse_json_response(self, json_response, expect_errors=False):
"""Convert a JSON server response to an object (such as a dict)."""
if not expect_errors:
self.assertEqual(json_response.status_int, 200)
self.assertEqual(
json_response.content_type, 'application/javascript')
self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))
return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])
def get_json(self, url):
"""Get a JSON response, transformed to a Python object."""
json_response = self.testapp.get(url)
self.assertEqual(json_response.status_int, 200)
return self._parse_json_response(json_response, expect_errors=False)
def post_json(self, url, payload, csrf_token=None, expect_errors=False,
expected_status_int=200, upload_files=None):
"""Post an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
json_response = self.testapp.post(
str(url), data, expect_errors=expect_errors,
upload_files=upload_files)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(
json_response, expect_errors=expect_errors)
def put_json(self, url, payload, csrf_token=None, expect_errors=False,
expected_status_int=200):
"""Put an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
json_response = self.testapp.put(
str(url), data, expect_errors=expect_errors)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(
json_response, expect_errors=expect_errors)
def get_csrf_token_from_response(self, response):
"""Retrieve the CSRF token from a GET response."""
return re.search(CSRF_REGEX, response.body).group(1)
def register_editor(self, email, username=None):
"""Register a user with the given username as an editor."""
if username is None:
username = self.DEFAULT_USERNAME
self.login(email)
response = self.testapp.get(feconf.EDITOR_PREREQUISITES_URL)
csrf_token = self.get_csrf_token_from_response(response)
response = self.testapp.post(feconf.EDITOR_PREREQUISITES_DATA_URL, {
'csrf_token': csrf_token,
'payload': json.dumps({
'username': username,
'agreed_to_terms': True
})
})
self.assertEqual(response.status_int, 200)
self.logout()
def set_admins(self, admin_emails):
"""Set the ADMIN_EMAILS property."""
self.login('[email protected]', is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.ADMIN_EMAILS.name: admin_emails,
}
}, csrf_token)
self.logout()
def set_moderators(self, moderator_emails):
"""Set the MODERATOR_EMAILS property."""
self.login('[email protected]', is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.MODERATOR_EMAILS.name: moderator_emails,
}
}, csrf_token)
self.logout()
def get_current_logged_in_user_id(self):
return os.environ['USER_ID']
def get_user_id_from_email(self, email):
return current_user_services.get_user_id_from_email(email)
def save_new_default_exploration(self,
exploration_id, owner_id, title='A title'):
"""Saves a new default exploration written by owner_id.
Returns the exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title, 'A category')
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_valid_exploration(
self, exploration_id, owner_id, title='A title'):
"""Saves a new strictly-validated exploration.
Returns the exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title, 'A category')
exploration.states[exploration.init_state_name].widget.handlers[
0].rule_specs[0].dest = feconf.END_DEST
exploration.objective = 'An objective'
exp_services.save_new_exploration(owner_id, exploration)
return exploration
@contextlib.contextmanager
def swap(self, obj, attr, newvalue):
"""Swap an object's attribute value within the context of a
'with' statement. The object can be anything that supports
getattr and setattr, such as class instances, modules, ...
Example usage:
import math
with self.swap(math, "sqrt", lambda x: 42):
print math.sqrt(16.0) # prints 42
print math.sqrt(16.0) # prints 4 as expected.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
class AppEngineTestBase(TestBase):
"""Base class for tests requiring App Engine services."""
def _delete_all_models(self):
from google.appengine.ext import ndb
ndb.delete_multi(ndb.Query().iter(keys_only=True))
def setUp(self):
empty_environ()
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import testbed
self.testbed = testbed.Testbed()
self.testbed.activate()
# Configure datastore policy to emulate instantaneously and globally
# consistent HRD.
policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(
probability=1)
# Declare any relevant App Engine service stubs here.
self.testbed.init_user_stub()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
self.testbed.init_taskqueue_stub()
self.taskqueue_stub = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
self.testbed.init_urlfetch_stub()
self.testbed.init_files_stub()
self.testbed.init_blobstore_stub()
# Set up the app to be tested.
self.testapp = webtest.TestApp(main.app)
def tearDown(self):
self.logout()
self._delete_all_models()
self.testbed.deactivate()
def count_jobs_in_taskqueue(self):
return len(self.taskqueue_stub.get_filtered_tasks())
def process_and_flush_pending_tasks(self):
from google.appengine.ext import deferred
tasks = self.taskqueue_stub.get_filtered_tasks()
self.taskqueue_stub.FlushQueue('default')
while tasks:
for task in tasks:
| if task.url == '/_ah/queue/deferred':
deferred.run(task.payload)
else:
# All other tasks are expected to be mapreduce ones.
headers = {
key: str(val) for key, val in task.headers.iteritems()
}
headers['Content-Length'] = str(len(task.payload or ''))
response = self.testapp.post(
url=str(task.url), params=(task.payload or ''),
headers=headers)
if response.status_code != 200:
raise RuntimeError(
'MapReduce task to URL %s failed' % task.url) | conditional_block |
|
test_utils.py | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for test classes."""
import contextlib
import os
import re
import unittest
import webtest
from core.domain import config_domain
from core.domain import exp_domain
from core.domain import exp_services
from core.platform import models
current_user_services = models.Registry.import_current_user_services()
import feconf
import main
import json
CSRF_REGEX = (
r'csrf_token: JSON\.parse\(\'\\\"([A-Za-z0-9/=_-]+)\\\"\'\)')
# Prefix to append to all lines printed by tests to the console.
LOG_LINE_PREFIX = 'LOG_INFO_TEST: '
def empty_environ():
os.environ['AUTH_DOMAIN'] = 'example.com'
os.environ['SERVER_NAME'] = 'localhost'
os.environ['HTTP_HOST'] = 'localhost'
os.environ['SERVER_PORT'] = '8080'
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
os.environ['USER_IS_ADMIN'] = '0'
os.environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
os.environ['HTTP_HOST'], os.environ['SERVER_PORT'])
class TestBase(unittest.TestCase):
"""Base class for all tests."""
maxDiff = 2500
DEFAULT_USERNAME = 'defaultusername'
def setUp(self):
raise NotImplementedError
def tearDown(self):
raise NotImplementedError
def log_line(self, line):
"""Print the line with a prefix that can be identified by the
script that calls the test.
"""
print '%s%s' % (LOG_LINE_PREFIX, line)
def _delete_all_models(self):
raise NotImplementedError
def login(self, email, is_super_admin=False):
os.environ['USER_EMAIL'] = email
os.environ['USER_ID'] = self.get_user_id_from_email(email)
os.environ['USER_IS_ADMIN'] = '1' if is_super_admin else '0'
def logout(self):
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
os.environ['USER_IS_ADMIN'] = '0'
def shortDescription(self):
"""Additional information logged during unit test invocation."""
# Suppress default logging of docstrings.
return None
def get_expected_login_url(self, slug):
"""Returns the expected login URL."""
return current_user_services.create_login_url(slug)
def get_expected_logout_url(self, slug):
"""Returns the expected logout URL."""
return current_user_services.create_logout_url(slug)
def _parse_json_response(self, json_response, expect_errors=False):
"""Convert a JSON server response to an object (such as a dict)."""
if not expect_errors:
self.assertEqual(json_response.status_int, 200)
self.assertEqual(
json_response.content_type, 'application/javascript')
self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))
return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])
def get_json(self, url):
"""Get a JSON response, transformed to a Python object."""
json_response = self.testapp.get(url)
self.assertEqual(json_response.status_int, 200)
return self._parse_json_response(json_response, expect_errors=False)
def post_json(self, url, payload, csrf_token=None, expect_errors=False,
expected_status_int=200, upload_files=None):
"""Post an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
json_response = self.testapp.post(
str(url), data, expect_errors=expect_errors,
upload_files=upload_files)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(
json_response, expect_errors=expect_errors)
def put_json(self, url, payload, csrf_token=None, expect_errors=False,
expected_status_int=200):
"""Put an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
json_response = self.testapp.put(
str(url), data, expect_errors=expect_errors)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(
json_response, expect_errors=expect_errors)
def get_csrf_token_from_response(self, response):
"""Retrieve the CSRF token from a GET response."""
return re.search(CSRF_REGEX, response.body).group(1)
def register_editor(self, email, username=None):
"""Register a user with the given username as an editor."""
if username is None:
username = self.DEFAULT_USERNAME
self.login(email)
response = self.testapp.get(feconf.EDITOR_PREREQUISITES_URL)
csrf_token = self.get_csrf_token_from_response(response)
response = self.testapp.post(feconf.EDITOR_PREREQUISITES_DATA_URL, {
'csrf_token': csrf_token,
'payload': json.dumps({
'username': username,
'agreed_to_terms': True
})
})
self.assertEqual(response.status_int, 200)
self.logout()
def set_admins(self, admin_emails):
"""Set the ADMIN_EMAILS property."""
self.login('[email protected]', is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.ADMIN_EMAILS.name: admin_emails,
}
}, csrf_token)
self.logout()
def set_moderators(self, moderator_emails):
"""Set the MODERATOR_EMAILS property."""
self.login('[email protected]', is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.MODERATOR_EMAILS.name: moderator_emails,
}
}, csrf_token)
self.logout()
def | (self):
return os.environ['USER_ID']
def get_user_id_from_email(self, email):
return current_user_services.get_user_id_from_email(email)
def save_new_default_exploration(self,
exploration_id, owner_id, title='A title'):
"""Saves a new default exploration written by owner_id.
Returns the exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title, 'A category')
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_valid_exploration(
self, exploration_id, owner_id, title='A title'):
"""Saves a new strictly-validated exploration.
Returns the exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title, 'A category')
exploration.states[exploration.init_state_name].widget.handlers[
0].rule_specs[0].dest = feconf.END_DEST
exploration.objective = 'An objective'
exp_services.save_new_exploration(owner_id, exploration)
return exploration
@contextlib.contextmanager
def swap(self, obj, attr, newvalue):
"""Swap an object's attribute value within the context of a
'with' statement. The object can be anything that supports
getattr and setattr, such as class instances, modules, ...
Example usage:
import math
with self.swap(math, "sqrt", lambda x: 42):
print math.sqrt(16.0) # prints 42
print math.sqrt(16.0) # prints 4 as expected.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
class AppEngineTestBase(TestBase):
"""Base class for tests requiring App Engine services."""
def _delete_all_models(self):
from google.appengine.ext import ndb
ndb.delete_multi(ndb.Query().iter(keys_only=True))
def setUp(self):
empty_environ()
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import testbed
self.testbed = testbed.Testbed()
self.testbed.activate()
# Configure datastore policy to emulate instantaneously and globally
# consistent HRD.
policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(
probability=1)
# Declare any relevant App Engine service stubs here.
self.testbed.init_user_stub()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
self.testbed.init_taskqueue_stub()
self.taskqueue_stub = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
self.testbed.init_urlfetch_stub()
self.testbed.init_files_stub()
self.testbed.init_blobstore_stub()
# Set up the app to be tested.
self.testapp = webtest.TestApp(main.app)
def tearDown(self):
self.logout()
self._delete_all_models()
self.testbed.deactivate()
def count_jobs_in_task | get_current_logged_in_user_id | identifier_name |
test_utils.py | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for test classes."""
import contextlib
import os
import re
import unittest
import webtest
from core.domain import config_domain
from core.domain import exp_domain
from core.domain import exp_services
from core.platform import models
current_user_services = models.Registry.import_current_user_services()
import feconf
import main
import json
CSRF_REGEX = (
r'csrf_token: JSON\.parse\(\'\\\"([A-Za-z0-9/=_-]+)\\\"\'\)')
# Prefix to append to all lines printed by tests to the console.
LOG_LINE_PREFIX = 'LOG_INFO_TEST: '
def empty_environ():
os.environ['AUTH_DOMAIN'] = 'example.com'
os.environ['SERVER_NAME'] = 'localhost'
os.environ['HTTP_HOST'] = 'localhost'
os.environ['SERVER_PORT'] = '8080'
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
os.environ['USER_IS_ADMIN'] = '0'
os.environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
os.environ['HTTP_HOST'], os.environ['SERVER_PORT'])
class TestBase(unittest.TestCase):
"""Base class for all tests."""
maxDiff = 2500
DEFAULT_USERNAME = 'defaultusername'
def setUp(self):
raise NotImplementedError
def tearDown(self):
raise NotImplementedError
def log_line(self, line):
"""Print the line with a prefix that can be identified by the
script that calls the test.
"""
print '%s%s' % (LOG_LINE_PREFIX, line)
def _delete_all_models(self):
raise NotImplementedError
def login(self, email, is_super_admin=False):
os.environ['USER_EMAIL'] = email
os.environ['USER_ID'] = self.get_user_id_from_email(email)
os.environ['USER_IS_ADMIN'] = '1' if is_super_admin else '0'
def logout(self):
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
os.environ['USER_IS_ADMIN'] = '0'
def shortDescription(self):
"""Additional information logged during unit test invocation."""
# Suppress default logging of docstrings.
return None
def get_expected_login_url(self, slug):
"""Returns the expected login URL."""
return current_user_services.create_login_url(slug)
def get_expected_logout_url(self, slug):
"""Returns the expected logout URL."""
return current_user_services.create_logout_url(slug)
def _parse_json_response(self, json_response, expect_errors=False):
"""Convert a JSON server response to an object (such as a dict)."""
if not expect_errors: | json_response.content_type, 'application/javascript')
self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))
return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])
def get_json(self, url):
"""Get a JSON response, transformed to a Python object."""
json_response = self.testapp.get(url)
self.assertEqual(json_response.status_int, 200)
return self._parse_json_response(json_response, expect_errors=False)
def post_json(self, url, payload, csrf_token=None, expect_errors=False,
expected_status_int=200, upload_files=None):
"""Post an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
json_response = self.testapp.post(
str(url), data, expect_errors=expect_errors,
upload_files=upload_files)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(
json_response, expect_errors=expect_errors)
def put_json(self, url, payload, csrf_token=None, expect_errors=False,
expected_status_int=200):
"""Put an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
json_response = self.testapp.put(
str(url), data, expect_errors=expect_errors)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(
json_response, expect_errors=expect_errors)
def get_csrf_token_from_response(self, response):
"""Retrieve the CSRF token from a GET response."""
return re.search(CSRF_REGEX, response.body).group(1)
def register_editor(self, email, username=None):
"""Register a user with the given username as an editor."""
if username is None:
username = self.DEFAULT_USERNAME
self.login(email)
response = self.testapp.get(feconf.EDITOR_PREREQUISITES_URL)
csrf_token = self.get_csrf_token_from_response(response)
response = self.testapp.post(feconf.EDITOR_PREREQUISITES_DATA_URL, {
'csrf_token': csrf_token,
'payload': json.dumps({
'username': username,
'agreed_to_terms': True
})
})
self.assertEqual(response.status_int, 200)
self.logout()
def set_admins(self, admin_emails):
"""Set the ADMIN_EMAILS property."""
self.login('[email protected]', is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.ADMIN_EMAILS.name: admin_emails,
}
}, csrf_token)
self.logout()
def set_moderators(self, moderator_emails):
"""Set the MODERATOR_EMAILS property."""
self.login('[email protected]', is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.MODERATOR_EMAILS.name: moderator_emails,
}
}, csrf_token)
self.logout()
def get_current_logged_in_user_id(self):
return os.environ['USER_ID']
def get_user_id_from_email(self, email):
return current_user_services.get_user_id_from_email(email)
def save_new_default_exploration(self,
exploration_id, owner_id, title='A title'):
"""Saves a new default exploration written by owner_id.
Returns the exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title, 'A category')
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_valid_exploration(
self, exploration_id, owner_id, title='A title'):
"""Saves a new strictly-validated exploration.
Returns the exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title, 'A category')
exploration.states[exploration.init_state_name].widget.handlers[
0].rule_specs[0].dest = feconf.END_DEST
exploration.objective = 'An objective'
exp_services.save_new_exploration(owner_id, exploration)
return exploration
@contextlib.contextmanager
def swap(self, obj, attr, newvalue):
"""Swap an object's attribute value within the context of a
'with' statement. The object can be anything that supports
getattr and setattr, such as class instances, modules, ...
Example usage:
import math
with self.swap(math, "sqrt", lambda x: 42):
print math.sqrt(16.0) # prints 42
print math.sqrt(16.0) # prints 4 as expected.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
class AppEngineTestBase(TestBase):
"""Base class for tests requiring App Engine services."""
def _delete_all_models(self):
from google.appengine.ext import ndb
ndb.delete_multi(ndb.Query().iter(keys_only=True))
def setUp(self):
empty_environ()
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import testbed
self.testbed = testbed.Testbed()
self.testbed.activate()
# Configure datastore policy to emulate instantaneously and globally
# consistent HRD.
policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(
probability=1)
# Declare any relevant App Engine service stubs here.
self.testbed.init_user_stub()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
self.testbed.init_taskqueue_stub()
self.taskqueue_stub = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
self.testbed.init_urlfetch_stub()
self.testbed.init_files_stub()
self.testbed.init_blobstore_stub()
# Set up the app to be tested.
self.testapp = webtest.TestApp(main.app)
def tearDown(self):
self.logout()
self._delete_all_models()
self.testbed.deactivate()
def count_jobs_in_taskqueue | self.assertEqual(json_response.status_int, 200)
self.assertEqual( | random_line_split |
test_utils.py | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for test classes."""
import contextlib
import os
import re
import unittest
import webtest
from core.domain import config_domain
from core.domain import exp_domain
from core.domain import exp_services
from core.platform import models
current_user_services = models.Registry.import_current_user_services()
import feconf
import main
import json
CSRF_REGEX = (
r'csrf_token: JSON\.parse\(\'\\\"([A-Za-z0-9/=_-]+)\\\"\'\)')
# Prefix to append to all lines printed by tests to the console.
LOG_LINE_PREFIX = 'LOG_INFO_TEST: '
def empty_environ():
os.environ['AUTH_DOMAIN'] = 'example.com'
os.environ['SERVER_NAME'] = 'localhost'
os.environ['HTTP_HOST'] = 'localhost'
os.environ['SERVER_PORT'] = '8080'
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
os.environ['USER_IS_ADMIN'] = '0'
os.environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
os.environ['HTTP_HOST'], os.environ['SERVER_PORT'])
class TestBase(unittest.TestCase):
"""Base class for all tests."""
maxDiff = 2500
DEFAULT_USERNAME = 'defaultusername'
def setUp(self):
raise NotImplementedError
def tearDown(self):
raise NotImplementedError
def log_line(self, line):
"""Print the line with a prefix that can be identified by the
script that calls the test.
"""
print '%s%s' % (LOG_LINE_PREFIX, line)
def _delete_all_models(self):
raise NotImplementedError
def login(self, email, is_super_admin=False):
os.environ['USER_EMAIL'] = email
os.environ['USER_ID'] = self.get_user_id_from_email(email)
os.environ['USER_IS_ADMIN'] = '1' if is_super_admin else '0'
def logout(self):
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
os.environ['USER_IS_ADMIN'] = '0'
def shortDescription(self):
"""Additional information logged during unit test invocation."""
# Suppress default logging of docstrings.
return None
def get_expected_login_url(self, slug):
"""Returns the expected login URL."""
return current_user_services.create_login_url(slug)
def get_expected_logout_url(self, slug):
"""Returns the expected logout URL."""
return current_user_services.create_logout_url(slug)
def _parse_json_response(self, json_response, expect_errors=False):
"""Convert a JSON server response to an object (such as a dict)."""
if not expect_errors:
self.assertEqual(json_response.status_int, 200)
self.assertEqual(
json_response.content_type, 'application/javascript')
self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))
return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])
def get_json(self, url):
"""Get a JSON response, transformed to a Python object."""
json_response = self.testapp.get(url)
self.assertEqual(json_response.status_int, 200)
return self._parse_json_response(json_response, expect_errors=False)
def post_json(self, url, payload, csrf_token=None, expect_errors=False,
expected_status_int=200, upload_files=None):
"""Post an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
json_response = self.testapp.post(
str(url), data, expect_errors=expect_errors,
upload_files=upload_files)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(
json_response, expect_errors=expect_errors)
def put_json(self, url, payload, csrf_token=None, expect_errors=False,
expected_status_int=200):
"""Put an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
json_response = self.testapp.put(
str(url), data, expect_errors=expect_errors)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(
json_response, expect_errors=expect_errors)
def get_csrf_token_from_response(self, response):
"""Retrieve the CSRF token from a GET response."""
return re.search(CSRF_REGEX, response.body).group(1)
def register_editor(self, email, username=None):
"""Register a user with the given username as an editor."""
if username is None:
username = self.DEFAULT_USERNAME
self.login(email)
response = self.testapp.get(feconf.EDITOR_PREREQUISITES_URL)
csrf_token = self.get_csrf_token_from_response(response)
response = self.testapp.post(feconf.EDITOR_PREREQUISITES_DATA_URL, {
'csrf_token': csrf_token,
'payload': json.dumps({
'username': username,
'agreed_to_terms': True
})
})
self.assertEqual(response.status_int, 200)
self.logout()
def set_admins(self, admin_emails):
"""Set the ADMIN_EMAILS property."""
self.login('[email protected]', is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.ADMIN_EMAILS.name: admin_emails,
}
}, csrf_token)
self.logout()
def set_moderators(self, moderator_emails):
"""Set the MODERATOR_EMAILS property."""
self.login('[email protected]', is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.MODERATOR_EMAILS.name: moderator_emails,
}
}, csrf_token)
self.logout()
def get_current_logged_in_user_id(self):
return os.environ['USER_ID']
def get_user_id_from_email(self, email):
return current_user_services.get_user_id_from_email(email)
def save_new_default_exploration(self,
exploration_id, owner_id, title='A title'):
"""Saves a new default exploration written by owner_id.
Returns the exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title, 'A category')
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_valid_exploration(
self, exploration_id, owner_id, title='A title'):
"""Saves a new strictly-validated exploration.
Returns the exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title, 'A category')
exploration.states[exploration.init_state_name].widget.handlers[
0].rule_specs[0].dest = feconf.END_DEST
exploration.objective = 'An objective'
exp_services.save_new_exploration(owner_id, exploration)
return exploration
@contextlib.contextmanager
def swap(self, obj, attr, newvalue):
"""Swap an object's attribute value within the context of a
'with' statement. The object can be anything that supports
getattr and setattr, such as class instances, modules, ...
Example usage:
import math
with self.swap(math, "sqrt", lambda x: 42):
print math.sqrt(16.0) # prints 42
print math.sqrt(16.0) # prints 4 as expected.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
class AppEngineTestBase(TestBase):
"""Base class for tests requiring App Engine services."""
def _delete_all_models(self):
|
def setUp(self):
empty_environ()
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import testbed
self.testbed = testbed.Testbed()
self.testbed.activate()
# Configure datastore policy to emulate instantaneously and globally
# consistent HRD.
policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(
probability=1)
# Declare any relevant App Engine service stubs here.
self.testbed.init_user_stub()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
self.testbed.init_taskqueue_stub()
self.taskqueue_stub = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
self.testbed.init_urlfetch_stub()
self.testbed.init_files_stub()
self.testbed.init_blobstore_stub()
# Set up the app to be tested.
self.testapp = webtest.TestApp(main.app)
def tearDown(self):
self.logout()
self._delete_all_models()
self.testbed.deactivate()
def count_jobs_in_task | from google.appengine.ext import ndb
ndb.delete_multi(ndb.Query().iter(keys_only=True)) | identifier_body |
visualizations.go | izationWithDashboards {
// This function is used, when
log.Logger.Debug("rendering data to user")
response := []common.VisualizationWithDashboards{}
for visualizationPtr, dashboards := range *data {
renderedVisualization := VisualizationDashboardToResponse(
&visualizationPtr, dashboards)
response = append(response, *renderedVisualization)
}
return &response
}
// VisualizationsGet handler queries visualizations
func (h *V1Visualizations) VisualizationsGet(clients *common.ClientContainer,
organizationID, name string, tags map[string]interface{}) (
*[]common.VisualizationWithDashboards, error) {
log.Logger.Debug("Querying data to user according to name and tags")
data, err := clients.DatabaseManager.QueryVisualizationsDashboards(
"", name, organizationID, tags)
if err != nil {
log.Logger.Errorf("Error getting data from db: '%s'", err)
return nil, err
}
return GroupedVisualizationDashboardToResponse(data), nil
}
func renderTemplates(templates []string, templateParamaters []interface{}) (
[]string, error) {
// this function takes Visualization data and returns rendered templates
log.Logger.Debug("Rendering golang templates")
renderedTemplates := []string{}
for index := range templates {
// validate that golang template is valid
// "missingkey=error" would return error, if user did not provide
// all parameters for his own template
tmpl, err := template.New("").Option(
"missingkey=error").Parse(templates[index])
if err != nil {
// something is wrong with structure of user provided template
return nil, common.NewUserDataError(
fmt.Sprintf("ErrorMsg: '%s', TemplateIndex: '%d'",
err.Error(), index))
}
// render golang template with user provided arguments to buffer
templateBuffer := new(bytes.Buffer)
err = tmpl.Execute(templateBuffer, templateParamaters[index])
if err != nil {
// something is wrong with rendering of user provided template
return nil, common.NewUserDataError(err.Error())
}
renderedTemplates = append(renderedTemplates, templateBuffer.String())
}
return renderedTemplates, nil
}
// VisualizationsPost handler creates new visualizations
func (h *V1Visualizations) VisualizationsPost(clients *common.ClientContainer,
data common.VisualizationPOSTData, organizationID string) (
*common.VisualizationWithDashboards, error) {
/*
1 - validate and render all golang templates provided by user,
if there are any errors, then immediately return error to user
2 - validate that rendered templates matches grafana json structure
if there are any mismatch - return error to user
3 - create db entry for visualization and every dashboard.
4 - for each validated template - upload it to grafana, store received
slug for future update of dashboard db entry
5 - return data to user
*/
log.Logger.Debug("Extracting names, templates, data from provided user data")
templates := []string{}
templateParamaters := []interface{}{}
dashboardNames := []string{}
for _, dashboardData := range data.Dashboards {
templates = append(templates, dashboardData.TemplateBody)
templateParamaters = append(templateParamaters, dashboardData.TemplateParameters)
dashboardNames = append(dashboardNames, dashboardData.Name)
}
log.Logger.Debug("Extracted names, templates, data from provided user data")
renderedTemplates, err := renderTemplates(templates, templateParamaters)
if err != nil {
return nil, err
}
// create db entries for visualizations and dashboards
log.Logger.Debug("Creating database entries for visualizations and dashboards")
visualizationDB, dashboardsDB, err := clients.DatabaseManager.CreateVisualizationsWithDashboards(
data.Name, organizationID, data.Tags, dashboardNames, renderedTemplates)
log.Logger.Debug("Created database entries for visualizations and dashboards")
if err != nil {
return nil, err
}
/*
Here concistency problem is faced. We can not guarantee, that data,
stored in database would successfully be updated in grafana, due to
possible errors on grafana side (service down, etc.). At the same time
we can not guarantee, that data created in grafana would successfully
stored into db.
To resolve such kind of issue - following approach is taken. The highest
priority is given to database data.
That means, that creation of visualization happens in 3 steps
1 - create database entry for visualizations and all dashboards.
Grafana slug field is left empty
2 - create grafana entries via grafana api, get slugs as the result
3 - update database entries with grafana slugs
*/
uploadedGrafanaSlugs := []string{}
log.Logger.Debug("Uploading dashboard data to grafana")
for _, renderedTemplate := range renderedTemplates {
slug, grafanaUploadErr := clients.Grafana.UploadDashboard(
[]byte(renderedTemplate), organizationID, false)
if grafanaUploadErr != nil {
// We can not create grafana dashboard using user-provided template
log.Logger.Errorf("Error during performing grafana call "+
" for dashboard upload %s", grafanaUploadErr)
log.Logger.Debugf("Due to error '%s' - already created grafana "+
" dashboards, matching the same visualization, would be deleted",
grafanaUploadErr)
updateDashboardsDB := []*models.Dashboard{}
deleteDashboardsDB := []*models.Dashboard{}
for index, slugToDelete := range uploadedGrafanaSlugs |
// Delete dashboards, that were not uploaded to grafana
deleteDashboardsDB = append(deleteDashboardsDB,
dashboardsDB[len(uploadedGrafanaSlugs):]...)
if len(updateDashboardsDB) > 0 {
dashboardsToReturn := []*models.Dashboard{}
dashboardsToReturn = append(dashboardsToReturn, updateDashboardsDB...)
log.Logger.Debug("Updating db dashboards with grafana slugs")
updateErrorDB := clients.DatabaseManager.BulkUpdateDashboard(
updateDashboardsDB)
if updateErrorDB != nil {
log.Logger.Errorf("Error during cleanup on grafana upload"+
" error '%s'. Unable to update db entities of dashboards"+
" with slugs of corresponding grafana dashboards for"+
"dashboards not deleted from grafana '%s'",
grafanaUploadErr, updateErrorDB)
}
log.Logger.Debug("Deleting db dashboards that are not uploaded" +
" to grafana")
deletionErrorDB := clients.DatabaseManager.BulkDeleteDashboard(
deleteDashboardsDB)
if deletionErrorDB != nil {
log.Logger.Debug("due to failed deletion operation - extend" +
" the slice of returned dashboards to user")
dashboardsToReturn = append(dashboardsToReturn, deleteDashboardsDB...)
log.Logger.Errorf("Error during cleanup on grafana upload"+
" error '%s'. Unable to delete entities of grafana "+
"dashboards deleted from grafana '%s'",
grafanaUploadErr, updateErrorDB)
}
result := VisualizationDashboardToResponse(
visualizationDB, dashboardsToReturn)
return result, common.NewClientError(
"Unable to create new grafana dashboards, and remove old ones")
}
log.Logger.Debug("trying to delete visualization with " +
"corresponding dashboards from database. dashboards have no " +
"matching grafana uploads")
visualizationDeletionErr := clients.DatabaseManager.DeleteVisualization(
visualizationDB)
if visualizationDeletionErr != nil {
log.Logger.Error("Unable to delete visualization entry " +
"from db with corresponding dashboards entries. " +
"all entries are returned to user")
result := VisualizationDashboardToResponse(
visualizationDB, updateDashboardsDB)
return result, common.NewClientError(
"Unable to create new grafana dashboards, and remove old ones")
}
log.Logger.Debug("All created data was deleted both from grafana " +
"and from database without errors. original grafana error is returned")
return nil, grafanaUploadErr
}
log.Logger.Infof("Created dashboard named '%s'", slug)
uploadedGrafanaSlugs = append(uploadedGrafanaSlugs, slug)
}
log.Logger.Debug("Uploaded dashboard data to grafana")
// Positive outcome. All dashboards were created both in db | {
grafanaDeletionErr := clients.Grafana.DeleteDashboard(slugToDelete, organizationID)
// if already created dashboard was failed to delete -
// corresponding db entry has to be updated with grafanaSlug
// to guarantee consistency
if grafanaDeletionErr != nil {
log.Logger.Errorf("Error during performing grafana call "+
" for dashboard deletion %s", grafanaDeletionErr)
dashboard := dashboardsDB[index]
dashboard.Slug = uploadedGrafanaSlugs[index]
updateDashboardsDB = append(
updateDashboardsDB, dashboard)
} else {
log.Logger.Debug("deleted dashboard from grafana")
deleteDashboardsDB = append(deleteDashboardsDB,
dashboardsDB[index])
}
} | conditional_block |
visualizations.go | {
log.Logger.Debug("Querying data to user according to name and tags")
data, err := clients.DatabaseManager.QueryVisualizationsDashboards(
"", name, organizationID, tags)
if err != nil {
log.Logger.Errorf("Error getting data from db: '%s'", err)
return nil, err
}
return GroupedVisualizationDashboardToResponse(data), nil
}
func renderTemplates(templates []string, templateParamaters []interface{}) (
[]string, error) {
// this function takes Visualization data and returns rendered templates
log.Logger.Debug("Rendering golang templates")
renderedTemplates := []string{}
for index := range templates {
// validate that golang template is valid
// "missingkey=error" would return error, if user did not provide
// all parameters for his own template
tmpl, err := template.New("").Option(
"missingkey=error").Parse(templates[index])
if err != nil {
// something is wrong with structure of user provided template
return nil, common.NewUserDataError(
fmt.Sprintf("ErrorMsg: '%s', TemplateIndex: '%d'",
err.Error(), index))
}
// render golang template with user provided arguments to buffer
templateBuffer := new(bytes.Buffer)
err = tmpl.Execute(templateBuffer, templateParamaters[index])
if err != nil {
// something is wrong with rendering of user provided template
return nil, common.NewUserDataError(err.Error())
}
renderedTemplates = append(renderedTemplates, templateBuffer.String())
}
return renderedTemplates, nil
}
// VisualizationsPost handler creates new visualizations
func (h *V1Visualizations) VisualizationsPost(clients *common.ClientContainer,
data common.VisualizationPOSTData, organizationID string) (
*common.VisualizationWithDashboards, error) {
/*
1 - validate and render all golang templates provided by user,
if there are any errors, then immediately return error to user
2 - validate that rendered templates matches grafana json structure
if there are any mismatch - return error to user
3 - create db entry for visualization and every dashboard.
4 - for each validated template - upload it to grafana, store received
slug for future update of dashboard db entry
5 - return data to user
*/
log.Logger.Debug("Extracting names, templates, data from provided user data")
templates := []string{}
templateParamaters := []interface{}{}
dashboardNames := []string{}
for _, dashboardData := range data.Dashboards {
templates = append(templates, dashboardData.TemplateBody)
templateParamaters = append(templateParamaters, dashboardData.TemplateParameters)
dashboardNames = append(dashboardNames, dashboardData.Name)
}
log.Logger.Debug("Extracted names, templates, data from provided user data")
renderedTemplates, err := renderTemplates(templates, templateParamaters)
if err != nil {
return nil, err
}
// create db entries for visualizations and dashboards
log.Logger.Debug("Creating database entries for visualizations and dashboards")
visualizationDB, dashboardsDB, err := clients.DatabaseManager.CreateVisualizationsWithDashboards(
data.Name, organizationID, data.Tags, dashboardNames, renderedTemplates)
log.Logger.Debug("Created database entries for visualizations and dashboards")
if err != nil {
return nil, err
}
/*
Here concistency problem is faced. We can not guarantee, that data,
stored in database would successfully be updated in grafana, due to
possible errors on grafana side (service down, etc.). At the same time
we can not guarantee, that data created in grafana would successfully
stored into db.
To resolve such kind of issue - following approach is taken. The highest
priority is given to database data.
That means, that creation of visualization happens in 3 steps
1 - create database entry for visualizations and all dashboards.
Grafana slug field is left empty
2 - create grafana entries via grafana api, get slugs as the result
3 - update database entries with grafana slugs
*/
uploadedGrafanaSlugs := []string{}
log.Logger.Debug("Uploading dashboard data to grafana")
for _, renderedTemplate := range renderedTemplates {
slug, grafanaUploadErr := clients.Grafana.UploadDashboard(
[]byte(renderedTemplate), organizationID, false)
if grafanaUploadErr != nil {
// We can not create grafana dashboard using user-provided template
log.Logger.Errorf("Error during performing grafana call "+
" for dashboard upload %s", grafanaUploadErr)
log.Logger.Debugf("Due to error '%s' - already created grafana "+
" dashboards, matching the same visualization, would be deleted",
grafanaUploadErr)
updateDashboardsDB := []*models.Dashboard{}
deleteDashboardsDB := []*models.Dashboard{}
for index, slugToDelete := range uploadedGrafanaSlugs {
grafanaDeletionErr := clients.Grafana.DeleteDashboard(slugToDelete, organizationID)
// if already created dashboard was failed to delete -
// corresponding db entry has to be updated with grafanaSlug
// to guarantee consistency
if grafanaDeletionErr != nil {
log.Logger.Errorf("Error during performing grafana call "+
" for dashboard deletion %s", grafanaDeletionErr)
dashboard := dashboardsDB[index]
dashboard.Slug = uploadedGrafanaSlugs[index]
updateDashboardsDB = append(
updateDashboardsDB, dashboard)
} else {
log.Logger.Debug("deleted dashboard from grafana")
deleteDashboardsDB = append(deleteDashboardsDB,
dashboardsDB[index])
}
}
// Delete dashboards, that were not uploaded to grafana
deleteDashboardsDB = append(deleteDashboardsDB,
dashboardsDB[len(uploadedGrafanaSlugs):]...)
if len(updateDashboardsDB) > 0 {
dashboardsToReturn := []*models.Dashboard{}
dashboardsToReturn = append(dashboardsToReturn, updateDashboardsDB...)
log.Logger.Debug("Updating db dashboards with grafana slugs")
updateErrorDB := clients.DatabaseManager.BulkUpdateDashboard(
updateDashboardsDB)
if updateErrorDB != nil {
log.Logger.Errorf("Error during cleanup on grafana upload"+
" error '%s'. Unable to update db entities of dashboards"+
" with slugs of corresponding grafana dashboards for"+
"dashboards not deleted from grafana '%s'",
grafanaUploadErr, updateErrorDB)
}
log.Logger.Debug("Deleting db dashboards that are not uploaded" +
" to grafana")
deletionErrorDB := clients.DatabaseManager.BulkDeleteDashboard(
deleteDashboardsDB)
if deletionErrorDB != nil {
log.Logger.Debug("due to failed deletion operation - extend" +
" the slice of returned dashboards to user")
dashboardsToReturn = append(dashboardsToReturn, deleteDashboardsDB...)
log.Logger.Errorf("Error during cleanup on grafana upload"+
" error '%s'. Unable to delete entities of grafana "+
"dashboards deleted from grafana '%s'",
grafanaUploadErr, updateErrorDB)
}
result := VisualizationDashboardToResponse(
visualizationDB, dashboardsToReturn)
return result, common.NewClientError(
"Unable to create new grafana dashboards, and remove old ones")
}
log.Logger.Debug("trying to delete visualization with " +
"corresponding dashboards from database. dashboards have no " +
"matching grafana uploads")
visualizationDeletionErr := clients.DatabaseManager.DeleteVisualization(
visualizationDB)
if visualizationDeletionErr != nil {
log.Logger.Error("Unable to delete visualization entry " +
"from db with corresponding dashboards entries. " +
"all entries are returned to user")
result := VisualizationDashboardToResponse(
visualizationDB, updateDashboardsDB)
return result, common.NewClientError(
"Unable to create new grafana dashboards, and remove old ones")
}
log.Logger.Debug("All created data was deleted both from grafana " +
"and from database without errors. original grafana error is returned")
return nil, grafanaUploadErr
}
log.Logger.Infof("Created dashboard named '%s'", slug)
uploadedGrafanaSlugs = append(uploadedGrafanaSlugs, slug)
}
log.Logger.Debug("Uploaded dashboard data to grafana")
// Positive outcome. All dashboards were created both in db and grafana
for index := range dashboardsDB {
dashboardsDB[index].Slug = uploadedGrafanaSlugs[index]
}
log.Logger.Debug("Updating db entries of dashboards with corresponding" +
" grafana slugs")
updateErrorDB := clients.DatabaseManager.BulkUpdateDashboard(dashboardsDB)
if updateErrorDB != nil {
log.Logger.Errorf("Error updating db dashboard slugs '%s'", updateErrorDB)
return nil, err
}
return VisualizationDashboardToResponse(visualizationDB, dashboardsDB), nil
}
// VisualizationDelete removes visualizations
func (h *V1Visualizations) | VisualizationDelete | identifier_name |
|
visualizations.go | izationWithDashboards {
// This function is used, when
log.Logger.Debug("rendering data to user")
response := []common.VisualizationWithDashboards{}
for visualizationPtr, dashboards := range *data {
renderedVisualization := VisualizationDashboardToResponse(
&visualizationPtr, dashboards)
response = append(response, *renderedVisualization)
}
return &response
}
// VisualizationsGet handler queries visualizations
func (h *V1Visualizations) VisualizationsGet(clients *common.ClientContainer,
organizationID, name string, tags map[string]interface{}) (
*[]common.VisualizationWithDashboards, error) {
log.Logger.Debug("Querying data to user according to name and tags")
data, err := clients.DatabaseManager.QueryVisualizationsDashboards(
"", name, organizationID, tags)
if err != nil {
log.Logger.Errorf("Error getting data from db: '%s'", err)
return nil, err
}
return GroupedVisualizationDashboardToResponse(data), nil
}
func renderTemplates(templates []string, templateParamaters []interface{}) (
[]string, error) | if err != nil {
// something is wrong with rendering of user provided template
return nil, common.NewUserDataError(err.Error())
}
renderedTemplates = append(renderedTemplates, templateBuffer.String())
}
return renderedTemplates, nil
}
// VisualizationsPost handler creates new visualizations
func (h *V1Visualizations) VisualizationsPost(clients *common.ClientContainer,
data common.VisualizationPOSTData, organizationID string) (
*common.VisualizationWithDashboards, error) {
/*
1 - validate and render all golang templates provided by user,
if there are any errors, then immediately return error to user
2 - validate that rendered templates matches grafana json structure
if there are any mismatch - return error to user
3 - create db entry for visualization and every dashboard.
4 - for each validated template - upload it to grafana, store received
slug for future update of dashboard db entry
5 - return data to user
*/
log.Logger.Debug("Extracting names, templates, data from provided user data")
templates := []string{}
templateParamaters := []interface{}{}
dashboardNames := []string{}
for _, dashboardData := range data.Dashboards {
templates = append(templates, dashboardData.TemplateBody)
templateParamaters = append(templateParamaters, dashboardData.TemplateParameters)
dashboardNames = append(dashboardNames, dashboardData.Name)
}
log.Logger.Debug("Extracted names, templates, data from provided user data")
renderedTemplates, err := renderTemplates(templates, templateParamaters)
if err != nil {
return nil, err
}
// create db entries for visualizations and dashboards
log.Logger.Debug("Creating database entries for visualizations and dashboards")
visualizationDB, dashboardsDB, err := clients.DatabaseManager.CreateVisualizationsWithDashboards(
data.Name, organizationID, data.Tags, dashboardNames, renderedTemplates)
log.Logger.Debug("Created database entries for visualizations and dashboards")
if err != nil {
return nil, err
}
/*
Here concistency problem is faced. We can not guarantee, that data,
stored in database would successfully be updated in grafana, due to
possible errors on grafana side (service down, etc.). At the same time
we can not guarantee, that data created in grafana would successfully
stored into db.
To resolve such kind of issue - following approach is taken. The highest
priority is given to database data.
That means, that creation of visualization happens in 3 steps
1 - create database entry for visualizations and all dashboards.
Grafana slug field is left empty
2 - create grafana entries via grafana api, get slugs as the result
3 - update database entries with grafana slugs
*/
uploadedGrafanaSlugs := []string{}
log.Logger.Debug("Uploading dashboard data to grafana")
for _, renderedTemplate := range renderedTemplates {
slug, grafanaUploadErr := clients.Grafana.UploadDashboard(
[]byte(renderedTemplate), organizationID, false)
if grafanaUploadErr != nil {
// We can not create grafana dashboard using user-provided template
log.Logger.Errorf("Error during performing grafana call "+
" for dashboard upload %s", grafanaUploadErr)
log.Logger.Debugf("Due to error '%s' - already created grafana "+
" dashboards, matching the same visualization, would be deleted",
grafanaUploadErr)
updateDashboardsDB := []*models.Dashboard{}
deleteDashboardsDB := []*models.Dashboard{}
for index, slugToDelete := range uploadedGrafanaSlugs {
grafanaDeletionErr := clients.Grafana.DeleteDashboard(slugToDelete, organizationID)
// if already created dashboard was failed to delete -
// corresponding db entry has to be updated with grafanaSlug
// to guarantee consistency
if grafanaDeletionErr != nil {
log.Logger.Errorf("Error during performing grafana call "+
" for dashboard deletion %s", grafanaDeletionErr)
dashboard := dashboardsDB[index]
dashboard.Slug = uploadedGrafanaSlugs[index]
updateDashboardsDB = append(
updateDashboardsDB, dashboard)
} else {
log.Logger.Debug("deleted dashboard from grafana")
deleteDashboardsDB = append(deleteDashboardsDB,
dashboardsDB[index])
}
}
// Delete dashboards, that were not uploaded to grafana
deleteDashboardsDB = append(deleteDashboardsDB,
dashboardsDB[len(uploadedGrafanaSlugs):]...)
if len(updateDashboardsDB) > 0 {
dashboardsToReturn := []*models.Dashboard{}
dashboardsToReturn = append(dashboardsToReturn, updateDashboardsDB...)
log.Logger.Debug("Updating db dashboards with grafana slugs")
updateErrorDB := clients.DatabaseManager.BulkUpdateDashboard(
updateDashboardsDB)
if updateErrorDB != nil {
log.Logger.Errorf("Error during cleanup on grafana upload"+
" error '%s'. Unable to update db entities of dashboards"+
" with slugs of corresponding grafana dashboards for"+
"dashboards not deleted from grafana '%s'",
grafanaUploadErr, updateErrorDB)
}
log.Logger.Debug("Deleting db dashboards that are not uploaded" +
" to grafana")
deletionErrorDB := clients.DatabaseManager.BulkDeleteDashboard(
deleteDashboardsDB)
if deletionErrorDB != nil {
log.Logger.Debug("due to failed deletion operation - extend" +
" the slice of returned dashboards to user")
dashboardsToReturn = append(dashboardsToReturn, deleteDashboardsDB...)
log.Logger.Errorf("Error during cleanup on grafana upload"+
" error '%s'. Unable to delete entities of grafana "+
"dashboards deleted from grafana '%s'",
grafanaUploadErr, updateErrorDB)
}
result := VisualizationDashboardToResponse(
visualizationDB, dashboardsToReturn)
return result, common.NewClientError(
"Unable to create new grafana dashboards, and remove old ones")
}
log.Logger.Debug("trying to delete visualization with " +
"corresponding dashboards from database. dashboards have no " +
"matching grafana uploads")
visualizationDeletionErr := clients.DatabaseManager.DeleteVisualization(
visualizationDB)
if visualizationDeletionErr != nil {
log.Logger.Error("Unable to delete visualization entry " +
"from db with corresponding dashboards entries. " +
"all entries are returned to user")
result := VisualizationDashboardToResponse(
visualizationDB, updateDashboardsDB)
return result, common.NewClientError(
"Unable to create new grafana dashboards, and remove old ones")
}
log.Logger.Debug("All created data was deleted both from grafana " +
"and from database without errors. original grafana error is returned")
return nil, grafanaUploadErr
}
log.Logger.Infof("Created dashboard named '%s'", slug)
uploadedGrafanaSlugs = append(uploadedGrafanaSlugs, slug)
}
log.Logger.Debug("Uploaded dashboard data to grafana")
// Positive outcome. All dashboards were created both in db and | {
// this function takes Visualization data and returns rendered templates
log.Logger.Debug("Rendering golang templates")
renderedTemplates := []string{}
for index := range templates {
// validate that golang template is valid
// "missingkey=error" would return error, if user did not provide
// all parameters for his own template
tmpl, err := template.New("").Option(
"missingkey=error").Parse(templates[index])
if err != nil {
// something is wrong with structure of user provided template
return nil, common.NewUserDataError(
fmt.Sprintf("ErrorMsg: '%s', TemplateIndex: '%d'",
err.Error(), index))
}
// render golang template with user provided arguments to buffer
templateBuffer := new(bytes.Buffer)
err = tmpl.Execute(templateBuffer, templateParamaters[index]) | identifier_body |
visualizations.go | missingkey=error").Parse(templates[index])
if err != nil {
// something is wrong with structure of user provided template
return nil, common.NewUserDataError(
fmt.Sprintf("ErrorMsg: '%s', TemplateIndex: '%d'",
err.Error(), index))
}
// render golang template with user provided arguments to buffer
templateBuffer := new(bytes.Buffer)
err = tmpl.Execute(templateBuffer, templateParamaters[index])
if err != nil {
// something is wrong with rendering of user provided template
return nil, common.NewUserDataError(err.Error())
}
renderedTemplates = append(renderedTemplates, templateBuffer.String())
}
return renderedTemplates, nil
}
// VisualizationsPost handler creates new visualizations
func (h *V1Visualizations) VisualizationsPost(clients *common.ClientContainer,
data common.VisualizationPOSTData, organizationID string) (
*common.VisualizationWithDashboards, error) {
/*
1 - validate and render all golang templates provided by user,
if there are any errors, then immediately return error to user
2 - validate that rendered templates matches grafana json structure
if there are any mismatch - return error to user
3 - create db entry for visualization and every dashboard.
4 - for each validated template - upload it to grafana, store received
slug for future update of dashboard db entry
5 - return data to user
*/
log.Logger.Debug("Extracting names, templates, data from provided user data")
templates := []string{}
templateParamaters := []interface{}{}
dashboardNames := []string{}
for _, dashboardData := range data.Dashboards {
templates = append(templates, dashboardData.TemplateBody)
templateParamaters = append(templateParamaters, dashboardData.TemplateParameters)
dashboardNames = append(dashboardNames, dashboardData.Name)
}
log.Logger.Debug("Extracted names, templates, data from provided user data")
renderedTemplates, err := renderTemplates(templates, templateParamaters)
if err != nil {
return nil, err
}
// create db entries for visualizations and dashboards
log.Logger.Debug("Creating database entries for visualizations and dashboards")
visualizationDB, dashboardsDB, err := clients.DatabaseManager.CreateVisualizationsWithDashboards(
data.Name, organizationID, data.Tags, dashboardNames, renderedTemplates)
log.Logger.Debug("Created database entries for visualizations and dashboards")
if err != nil {
return nil, err
}
/*
Here concistency problem is faced. We can not guarantee, that data,
stored in database would successfully be updated in grafana, due to
possible errors on grafana side (service down, etc.). At the same time
we can not guarantee, that data created in grafana would successfully
stored into db.
To resolve such kind of issue - following approach is taken. The highest
priority is given to database data.
That means, that creation of visualization happens in 3 steps
1 - create database entry for visualizations and all dashboards.
Grafana slug field is left empty
2 - create grafana entries via grafana api, get slugs as the result
3 - update database entries with grafana slugs
*/
uploadedGrafanaSlugs := []string{}
log.Logger.Debug("Uploading dashboard data to grafana")
for _, renderedTemplate := range renderedTemplates {
slug, grafanaUploadErr := clients.Grafana.UploadDashboard(
[]byte(renderedTemplate), organizationID, false)
if grafanaUploadErr != nil {
// We can not create grafana dashboard using user-provided template
log.Logger.Errorf("Error during performing grafana call "+
" for dashboard upload %s", grafanaUploadErr)
log.Logger.Debugf("Due to error '%s' - already created grafana "+
" dashboards, matching the same visualization, would be deleted",
grafanaUploadErr)
updateDashboardsDB := []*models.Dashboard{}
deleteDashboardsDB := []*models.Dashboard{}
for index, slugToDelete := range uploadedGrafanaSlugs {
grafanaDeletionErr := clients.Grafana.DeleteDashboard(slugToDelete, organizationID)
// if already created dashboard was failed to delete -
// corresponding db entry has to be updated with grafanaSlug
// to guarantee consistency
if grafanaDeletionErr != nil {
log.Logger.Errorf("Error during performing grafana call "+
" for dashboard deletion %s", grafanaDeletionErr)
dashboard := dashboardsDB[index]
dashboard.Slug = uploadedGrafanaSlugs[index]
updateDashboardsDB = append(
updateDashboardsDB, dashboard)
} else {
log.Logger.Debug("deleted dashboard from grafana")
deleteDashboardsDB = append(deleteDashboardsDB,
dashboardsDB[index])
}
}
// Delete dashboards, that were not uploaded to grafana
deleteDashboardsDB = append(deleteDashboardsDB,
dashboardsDB[len(uploadedGrafanaSlugs):]...)
if len(updateDashboardsDB) > 0 {
dashboardsToReturn := []*models.Dashboard{}
dashboardsToReturn = append(dashboardsToReturn, updateDashboardsDB...)
log.Logger.Debug("Updating db dashboards with grafana slugs")
updateErrorDB := clients.DatabaseManager.BulkUpdateDashboard(
updateDashboardsDB)
if updateErrorDB != nil {
log.Logger.Errorf("Error during cleanup on grafana upload"+
" error '%s'. Unable to update db entities of dashboards"+
" with slugs of corresponding grafana dashboards for"+
"dashboards not deleted from grafana '%s'",
grafanaUploadErr, updateErrorDB)
}
log.Logger.Debug("Deleting db dashboards that are not uploaded" +
" to grafana")
deletionErrorDB := clients.DatabaseManager.BulkDeleteDashboard(
deleteDashboardsDB)
if deletionErrorDB != nil {
log.Logger.Debug("due to failed deletion operation - extend" +
" the slice of returned dashboards to user")
dashboardsToReturn = append(dashboardsToReturn, deleteDashboardsDB...)
log.Logger.Errorf("Error during cleanup on grafana upload"+
" error '%s'. Unable to delete entities of grafana "+
"dashboards deleted from grafana '%s'",
grafanaUploadErr, updateErrorDB)
}
result := VisualizationDashboardToResponse(
visualizationDB, dashboardsToReturn)
return result, common.NewClientError(
"Unable to create new grafana dashboards, and remove old ones")
}
log.Logger.Debug("trying to delete visualization with " +
"corresponding dashboards from database. dashboards have no " +
"matching grafana uploads")
visualizationDeletionErr := clients.DatabaseManager.DeleteVisualization(
visualizationDB)
if visualizationDeletionErr != nil {
log.Logger.Error("Unable to delete visualization entry " +
"from db with corresponding dashboards entries. " +
"all entries are returned to user")
result := VisualizationDashboardToResponse(
visualizationDB, updateDashboardsDB)
return result, common.NewClientError(
"Unable to create new grafana dashboards, and remove old ones")
}
log.Logger.Debug("All created data was deleted both from grafana " +
"and from database without errors. original grafana error is returned")
return nil, grafanaUploadErr
}
log.Logger.Infof("Created dashboard named '%s'", slug)
uploadedGrafanaSlugs = append(uploadedGrafanaSlugs, slug)
}
log.Logger.Debug("Uploaded dashboard data to grafana")
// Positive outcome. All dashboards were created both in db and grafana
for index := range dashboardsDB {
dashboardsDB[index].Slug = uploadedGrafanaSlugs[index]
}
log.Logger.Debug("Updating db entries of dashboards with corresponding" +
" grafana slugs")
updateErrorDB := clients.DatabaseManager.BulkUpdateDashboard(dashboardsDB)
if updateErrorDB != nil {
log.Logger.Errorf("Error updating db dashboard slugs '%s'", updateErrorDB)
return nil, err
}
return VisualizationDashboardToResponse(visualizationDB, dashboardsDB), nil
}
// VisualizationDelete removes visualizations
func (h *V1Visualizations) VisualizationDelete(clients *common.ClientContainer,
organizationID, visualizationSlug string) (
*common.VisualizationWithDashboards, error) {
log.Logger.Debug("getting data from db matching provided string")
visualizationDB, dashboardsDB, err := clients.DatabaseManager.GetVisualizationWithDashboardsBySlug(
visualizationSlug, organizationID)
log.Logger.Debug("got data from db matching provided string")
if err != nil {
log.Logger.Errorf("Error getting data from db: '%s'", err)
return nil, err
}
if visualizationDB == nil {
log.Logger.Errorf("User requested visualization '%s' not found in db", visualizationSlug)
return nil, common.NewUserDataError("No visualizations found")
}
removedDashboardsFromGrafana := []*models.Dashboard{}
failedToRemoveDashboardsFromGrafana := []*models.Dashboard{} | for index, dashboardDB := range dashboardsDB { | random_line_split |
|
consumers.go | ColonOrDeclare() Token {
t := Token{
Type: Colon,
Value: string(Colon),
Column: l.Column,
Line: l.Line,
}
l.move()
// check if it is a `:=`
if next, _ := l.peek(); next == '=' {
t.Type = Declare
t.Value = `:=`
l.move()
}
return t
}
// recognizeOperator consumes an operator token
func (l *Lexer) recognizeOperator() Token {
c := l.getCurr()
if isArithmeticOperator(c) || isBitOperator(c) || c == '!' {
t := l.consumeArithmeticOrBitOperator()
if t.Type == Unknown && isBoolOperator(c) {
return l.consumableBoolOperator()
}
return t
}
// attempt to consume shift operator
if beginsBitShift(c) {
if t := l.consumeBitShiftOperator(); t.Type != Unknown {
return t
}
}
// if it isn't arithmetic, bit or boolean then it is comparison
return l.consumeComparisonOperator()
}
// consumebitShiftOperator consumes a bit shifting operator
func (l *Lexer) consumeBitShiftOperator() Token {
c := l.getCurr()
t := Token{
Column: l.Column,
Line: l.Line,
}
switch c {
case '<':
t.Type = BitLeftShift
t.Value = string(BitLeftShift)
case '>':
t.Type = BitRightShift
t.Value = string(BitRightShift)
default:
return l.getUnknownToken(string(c))
}
// consume first token
l.move()
// if the current and next tokens aren't the same
// then it can't be a bit shift(<< or >>)
if next, _ := l.peek(); c != next {
t = UnknownToken(string(next), l.Line, l.Column)
l.retract()
return t
}
// consume second token
l.move()
return t
}
// consumeArithmeticOrBitOperator consumes an arithmetic or bit operator token
func (l *Lexer) consumeArithmeticOrBitOperator() Token {
op := l.getCurr()
t := Token{
Column: l.Column,
Line: l.Line,
Value: string(op),
}
l.move()
next, _ := l.peek()
if next == '=' | return l.getUnknownToken(string(op))
}
// consume equals sign
t.Value = string(op) + "="
l.move()
return t
}
if !isBoolOperator(next) {
switch op {
case '+':
t.Type = Plus
// check if increment and consume
if next == '+' {
t.Type = Increment
t.Value = "++"
l.move()
}
case '-':
t.Type = Minus
// check if decrement and consume
if next == '-' {
t.Type = Decrement
t.Value = "--"
l.move()
}
case '/':
t.Type = Div
case '*':
t.Type = Times
case '%':
t.Type = Mod
case '&':
t.Type = BitAnd
case '|':
t.Type = BitOr
case '^':
t.Type = BitXor
case '~':
t.Type = BitNot
default:
l.retract()
return l.getUnknownToken(string(op))
}
return t
}
l.retract()
return l.getUnknownToken(string(next))
}
// consumableBoolOperator consumes a bool operator token
func (l *Lexer) consumableBoolOperator() Token {
t := Token{
Column: l.Column,
Line: l.Line,
}
c := l.getCurr()
l.move()
next, _ := l.peek()
if c != '!' && c != next {
return l.getUnknownToken(string(next))
}
switch c {
case '&':
t.Type = And
t.Value = string(And)
case '|':
t.Type = Or
t.Value = string(Or)
case '!':
if next == '=' {
t.Type = NotEqual
t.Value = string(NotEqual)
} else {
t.Type = Not
t.Value = string(Not)
}
}
if t.Value != `!` {
l.move()
}
return t
}
// consumeComparisonOperator consumes an operator token
func (l *Lexer) consumeComparisonOperator() Token {
t := Token{
Column: l.Column,
Line: l.Line,
}
char := l.getCurr()
hasEquals := false
if l.position+1 < len(l.input) {
// copy next rune
cpy := l.input[l.position+1]
// move cursor to accommodate '='
if cpy == '=' {
hasEquals = true
l.move()
}
}
switch char {
case '<':
if hasEquals {
t.Type = LessThanOrEqual
t.Value = "<="
} else {
t.Type = LessThan
t.Value = "<"
}
case '>':
if hasEquals {
t.Type = GreaterThanOrEqual
t.Value = ">="
} else {
t.Type = GreaterThan
t.Value = ">"
}
case '=':
if hasEquals {
t.Type = Equal
t.Value = "=="
} else {
t.Type = Assign
t.Value = "="
}
}
l.move()
return t
}
func (l *Lexer) recognizeLiteral() Token {
b := l.getCurr()
if beginsIdentifier(b) {
return l.consumeIdentifierOrKeyword()
}
if beginsNumber(b) {
if t := l.consumeNumber(); t.Type != Unknown {
return t
}
// if it began with a number literal, it is likely a dot
return l.consumeDots()
}
if beginsString(b) {
return l.consumeString()
}
if beginsRune(b) {
return l.consumeRune()
}
return UnknownToken(string(b), l.Line, l.Column)
}
// consumeIdentifierOrKeyword recognizes an identifier or a keyword
func (l *Lexer) consumeIdentifierOrKeyword() Token {
word := l.getNextWord(isValidIdentifierChar)
defer func() {
l.position += len(word)
l.Column += len(word)
}()
if t := l.consumableKeyword(word); t.Type != Unknown {
return t
}
Type := Identifier
if word == `_` {
Type = Underscore
}
col := l.Column
// check for colon after identifier
if next := len(word) + l.position; next < len(l.input) && l.input[next] == ':' {
Type = LoopName
l.move()
}
return Token{
Type: Type,
Value: word,
Column: col,
Line: l.Line,
}
}
// consumableKeyword returns a keyword/unknown token which can be consumed
// this also consumes true/false literals
func (l *Lexer) consumableKeyword(word string) Token {
t := Token{
Value: word,
Column: l.Column,
Line: l.Line,
}
keyword := TokenType(word)
if keyword == `true` || keyword == `false` {
t.Type = Bool
} else if _, ok := keywords[keyword]; ok {
t.Type = keyword
} else {
t.Type = Unknown
}
return t
}
// consumeDots consumes a dot or dots token
func (l *Lexer) consumeDots() Token {
t := Token{
Type: Dot,
Value: string(Dot),
Line: l.Line,
Column: l.Column,
}
l.move()
// check for potential second dot to form two dots
if next, _ := l.peek(); isDot(next) {
t.Type = TwoDots
t.Value = string(TwoDots)
l.move()
// check for potential third dot to form ellipsis
if next, _ = l.peek(); isDot(next) {
t.Type = Ellipsis
t.Value = string(Ellipsis)
l.move()
}
}
return t
}
// consumeRune consumes a rune token
func (l *Lexer) consumeRune() Token {
// consume_quote returns an empty Token and true if a quote
// can be consumed else it returns an unknown token and false
consume_quote := func() (Token, bool) {
if b, ok := l.peek(); !ok || b != '\'' {
col := l.Column
l.move()
if !ok {
return UnknownToken(``, l.Line, col), false
}
return UnknownToken(string(b), l.Line, col), false
}
return Token{}, true
}
if t, ok := consume_quote(); !ok {
return | {
switch op {
case '+':
t.Type = PlusEq
case '-':
t.Type = MinusEq
case '/':
t.Type = DivEq
case '*':
t.Type = TimesEq
case '%':
t.Type = ModEq
case '&':
t.Type = BitAndEq
case '|':
t.Type = BitOrEq
case '^':
t.Type = BitXorEq
default:
l.retract() | conditional_block |
consumers.go | OrDeclare() Token {
t := Token{
Type: Colon,
Value: string(Colon),
Column: l.Column,
Line: l.Line,
}
l.move()
// check if it is a `:=`
if next, _ := l.peek(); next == '=' {
t.Type = Declare
t.Value = `:=`
l.move()
}
return t
}
// recognizeOperator consumes an operator token
func (l *Lexer) | () Token {
c := l.getCurr()
if isArithmeticOperator(c) || isBitOperator(c) || c == '!' {
t := l.consumeArithmeticOrBitOperator()
if t.Type == Unknown && isBoolOperator(c) {
return l.consumableBoolOperator()
}
return t
}
// attempt to consume shift operator
if beginsBitShift(c) {
if t := l.consumeBitShiftOperator(); t.Type != Unknown {
return t
}
}
// if it isn't arithmetic, bit or boolean then it is comparison
return l.consumeComparisonOperator()
}
// consumebitShiftOperator consumes a bit shifting operator
func (l *Lexer) consumeBitShiftOperator() Token {
c := l.getCurr()
t := Token{
Column: l.Column,
Line: l.Line,
}
switch c {
case '<':
t.Type = BitLeftShift
t.Value = string(BitLeftShift)
case '>':
t.Type = BitRightShift
t.Value = string(BitRightShift)
default:
return l.getUnknownToken(string(c))
}
// consume first token
l.move()
// if the current and next tokens aren't the same
// then it can't be a bit shift(<< or >>)
if next, _ := l.peek(); c != next {
t = UnknownToken(string(next), l.Line, l.Column)
l.retract()
return t
}
// consume second token
l.move()
return t
}
// consumeArithmeticOrBitOperator consumes an arithmetic or bit operator token
func (l *Lexer) consumeArithmeticOrBitOperator() Token {
op := l.getCurr()
t := Token{
Column: l.Column,
Line: l.Line,
Value: string(op),
}
l.move()
next, _ := l.peek()
if next == '=' {
switch op {
case '+':
t.Type = PlusEq
case '-':
t.Type = MinusEq
case '/':
t.Type = DivEq
case '*':
t.Type = TimesEq
case '%':
t.Type = ModEq
case '&':
t.Type = BitAndEq
case '|':
t.Type = BitOrEq
case '^':
t.Type = BitXorEq
default:
l.retract()
return l.getUnknownToken(string(op))
}
// consume equals sign
t.Value = string(op) + "="
l.move()
return t
}
if !isBoolOperator(next) {
switch op {
case '+':
t.Type = Plus
// check if increment and consume
if next == '+' {
t.Type = Increment
t.Value = "++"
l.move()
}
case '-':
t.Type = Minus
// check if decrement and consume
if next == '-' {
t.Type = Decrement
t.Value = "--"
l.move()
}
case '/':
t.Type = Div
case '*':
t.Type = Times
case '%':
t.Type = Mod
case '&':
t.Type = BitAnd
case '|':
t.Type = BitOr
case '^':
t.Type = BitXor
case '~':
t.Type = BitNot
default:
l.retract()
return l.getUnknownToken(string(op))
}
return t
}
l.retract()
return l.getUnknownToken(string(next))
}
// consumableBoolOperator consumes a bool operator token
func (l *Lexer) consumableBoolOperator() Token {
t := Token{
Column: l.Column,
Line: l.Line,
}
c := l.getCurr()
l.move()
next, _ := l.peek()
if c != '!' && c != next {
return l.getUnknownToken(string(next))
}
switch c {
case '&':
t.Type = And
t.Value = string(And)
case '|':
t.Type = Or
t.Value = string(Or)
case '!':
if next == '=' {
t.Type = NotEqual
t.Value = string(NotEqual)
} else {
t.Type = Not
t.Value = string(Not)
}
}
if t.Value != `!` {
l.move()
}
return t
}
// consumeComparisonOperator consumes an operator token
func (l *Lexer) consumeComparisonOperator() Token {
t := Token{
Column: l.Column,
Line: l.Line,
}
char := l.getCurr()
hasEquals := false
if l.position+1 < len(l.input) {
// copy next rune
cpy := l.input[l.position+1]
// move cursor to accommodate '='
if cpy == '=' {
hasEquals = true
l.move()
}
}
switch char {
case '<':
if hasEquals {
t.Type = LessThanOrEqual
t.Value = "<="
} else {
t.Type = LessThan
t.Value = "<"
}
case '>':
if hasEquals {
t.Type = GreaterThanOrEqual
t.Value = ">="
} else {
t.Type = GreaterThan
t.Value = ">"
}
case '=':
if hasEquals {
t.Type = Equal
t.Value = "=="
} else {
t.Type = Assign
t.Value = "="
}
}
l.move()
return t
}
func (l *Lexer) recognizeLiteral() Token {
b := l.getCurr()
if beginsIdentifier(b) {
return l.consumeIdentifierOrKeyword()
}
if beginsNumber(b) {
if t := l.consumeNumber(); t.Type != Unknown {
return t
}
// if it began with a number literal, it is likely a dot
return l.consumeDots()
}
if beginsString(b) {
return l.consumeString()
}
if beginsRune(b) {
return l.consumeRune()
}
return UnknownToken(string(b), l.Line, l.Column)
}
// consumeIdentifierOrKeyword recognizes an identifier or a keyword
func (l *Lexer) consumeIdentifierOrKeyword() Token {
word := l.getNextWord(isValidIdentifierChar)
defer func() {
l.position += len(word)
l.Column += len(word)
}()
if t := l.consumableKeyword(word); t.Type != Unknown {
return t
}
Type := Identifier
if word == `_` {
Type = Underscore
}
col := l.Column
// check for colon after identifier
if next := len(word) + l.position; next < len(l.input) && l.input[next] == ':' {
Type = LoopName
l.move()
}
return Token{
Type: Type,
Value: word,
Column: col,
Line: l.Line,
}
}
// consumableKeyword returns a keyword/unknown token which can be consumed
// this also consumes true/false literals
func (l *Lexer) consumableKeyword(word string) Token {
t := Token{
Value: word,
Column: l.Column,
Line: l.Line,
}
keyword := TokenType(word)
if keyword == `true` || keyword == `false` {
t.Type = Bool
} else if _, ok := keywords[keyword]; ok {
t.Type = keyword
} else {
t.Type = Unknown
}
return t
}
// consumeDots consumes a dot or dots token
func (l *Lexer) consumeDots() Token {
t := Token{
Type: Dot,
Value: string(Dot),
Line: l.Line,
Column: l.Column,
}
l.move()
// check for potential second dot to form two dots
if next, _ := l.peek(); isDot(next) {
t.Type = TwoDots
t.Value = string(TwoDots)
l.move()
// check for potential third dot to form ellipsis
if next, _ = l.peek(); isDot(next) {
t.Type = Ellipsis
t.Value = string(Ellipsis)
l.move()
}
}
return t
}
// consumeRune consumes a rune token
func (l *Lexer) consumeRune() Token {
// consume_quote returns an empty Token and true if a quote
// can be consumed else it returns an unknown token and false
consume_quote := func() (Token, bool) {
if b, ok := l.peek(); !ok || b != '\'' {
col := l.Column
l.move()
if !ok {
return UnknownToken(``, l.Line, col), false
}
return UnknownToken(string(b), l.Line, col), false
}
return Token{}, true
}
if t, ok := consume_quote(); !ok {
return | recognizeOperator | identifier_name |
consumers.go | arithmetic, bit or boolean then it is comparison
return l.consumeComparisonOperator()
}
// consumebitShiftOperator consumes a bit shifting operator
func (l *Lexer) consumeBitShiftOperator() Token {
c := l.getCurr()
t := Token{
Column: l.Column,
Line: l.Line,
}
switch c {
case '<':
t.Type = BitLeftShift
t.Value = string(BitLeftShift)
case '>':
t.Type = BitRightShift
t.Value = string(BitRightShift)
default:
return l.getUnknownToken(string(c))
}
// consume first token
l.move()
// if the current and next tokens aren't the same
// then it can't be a bit shift(<< or >>)
if next, _ := l.peek(); c != next {
t = UnknownToken(string(next), l.Line, l.Column)
l.retract()
return t
}
// consume second token
l.move()
return t
}
// consumeArithmeticOrBitOperator consumes an arithmetic or bit operator token
func (l *Lexer) consumeArithmeticOrBitOperator() Token {
op := l.getCurr()
t := Token{
Column: l.Column,
Line: l.Line,
Value: string(op),
}
l.move()
next, _ := l.peek()
if next == '=' {
switch op {
case '+':
t.Type = PlusEq
case '-':
t.Type = MinusEq
case '/':
t.Type = DivEq
case '*':
t.Type = TimesEq
case '%':
t.Type = ModEq
case '&':
t.Type = BitAndEq
case '|':
t.Type = BitOrEq
case '^':
t.Type = BitXorEq
default:
l.retract()
return l.getUnknownToken(string(op))
}
// consume equals sign
t.Value = string(op) + "="
l.move()
return t
}
if !isBoolOperator(next) {
switch op {
case '+':
t.Type = Plus
// check if increment and consume
if next == '+' {
t.Type = Increment
t.Value = "++"
l.move()
}
case '-':
t.Type = Minus
// check if decrement and consume
if next == '-' {
t.Type = Decrement
t.Value = "--"
l.move()
}
case '/':
t.Type = Div
case '*':
t.Type = Times
case '%':
t.Type = Mod
case '&':
t.Type = BitAnd
case '|':
t.Type = BitOr
case '^':
t.Type = BitXor
case '~':
t.Type = BitNot
default:
l.retract()
return l.getUnknownToken(string(op))
}
return t
}
l.retract()
return l.getUnknownToken(string(next))
}
// consumableBoolOperator consumes a bool operator token
func (l *Lexer) consumableBoolOperator() Token {
t := Token{
Column: l.Column,
Line: l.Line,
}
c := l.getCurr()
l.move()
next, _ := l.peek()
if c != '!' && c != next {
return l.getUnknownToken(string(next))
}
switch c {
case '&':
t.Type = And
t.Value = string(And)
case '|':
t.Type = Or
t.Value = string(Or)
case '!':
if next == '=' {
t.Type = NotEqual
t.Value = string(NotEqual)
} else {
t.Type = Not
t.Value = string(Not)
}
}
if t.Value != `!` {
l.move()
}
return t
}
// consumeComparisonOperator consumes an operator token
func (l *Lexer) consumeComparisonOperator() Token {
t := Token{
Column: l.Column,
Line: l.Line,
}
char := l.getCurr()
hasEquals := false
if l.position+1 < len(l.input) {
// copy next rune
cpy := l.input[l.position+1]
// move cursor to accommodate '='
if cpy == '=' {
hasEquals = true
l.move()
}
}
switch char {
case '<':
if hasEquals {
t.Type = LessThanOrEqual
t.Value = "<="
} else {
t.Type = LessThan
t.Value = "<"
}
case '>':
if hasEquals {
t.Type = GreaterThanOrEqual
t.Value = ">="
} else {
t.Type = GreaterThan
t.Value = ">"
}
case '=':
if hasEquals {
t.Type = Equal
t.Value = "=="
} else {
t.Type = Assign
t.Value = "="
}
}
l.move()
return t
}
func (l *Lexer) recognizeLiteral() Token {
b := l.getCurr()
if beginsIdentifier(b) {
return l.consumeIdentifierOrKeyword()
}
if beginsNumber(b) {
if t := l.consumeNumber(); t.Type != Unknown {
return t
}
// if it began with a number literal, it is likely a dot
return l.consumeDots()
}
if beginsString(b) {
return l.consumeString()
}
if beginsRune(b) {
return l.consumeRune()
}
return UnknownToken(string(b), l.Line, l.Column)
}
// consumeIdentifierOrKeyword recognizes an identifier or a keyword
func (l *Lexer) consumeIdentifierOrKeyword() Token {
word := l.getNextWord(isValidIdentifierChar)
defer func() {
l.position += len(word)
l.Column += len(word)
}()
if t := l.consumableKeyword(word); t.Type != Unknown {
return t
}
Type := Identifier
if word == `_` {
Type = Underscore
}
col := l.Column
// check for colon after identifier
if next := len(word) + l.position; next < len(l.input) && l.input[next] == ':' {
Type = LoopName
l.move()
}
return Token{
Type: Type,
Value: word,
Column: col,
Line: l.Line,
}
}
// consumableKeyword returns a keyword/unknown token which can be consumed
// this also consumes true/false literals
func (l *Lexer) consumableKeyword(word string) Token {
t := Token{
Value: word,
Column: l.Column,
Line: l.Line,
}
keyword := TokenType(word)
if keyword == `true` || keyword == `false` {
t.Type = Bool
} else if _, ok := keywords[keyword]; ok {
t.Type = keyword
} else {
t.Type = Unknown
}
return t
}
// consumeDots consumes a dot or dots token
func (l *Lexer) consumeDots() Token {
t := Token{
Type: Dot,
Value: string(Dot),
Line: l.Line,
Column: l.Column,
}
l.move()
// check for potential second dot to form two dots
if next, _ := l.peek(); isDot(next) {
t.Type = TwoDots
t.Value = string(TwoDots)
l.move()
// check for potential third dot to form ellipsis
if next, _ = l.peek(); isDot(next) {
t.Type = Ellipsis
t.Value = string(Ellipsis)
l.move()
}
}
return t
}
// consumeRune consumes a rune token
func (l *Lexer) consumeRune() Token {
// consume_quote returns an empty Token and true if a quote
// can be consumed else it returns an unknown token and false
consume_quote := func() (Token, bool) {
if b, ok := l.peek(); !ok || b != '\'' {
col := l.Column
l.move()
if !ok {
return UnknownToken(``, l.Line, col), false
}
return UnknownToken(string(b), l.Line, col), false
}
return Token{}, true
}
if t, ok := consume_quote(); !ok {
return t
}
var value bytes.Buffer
// consume opening quote
l.move()
// check character
c, ok := l.peek()
if !ok {
col := l.Column
l.move()
return UnknownToken(``, l.Line, col)
}
col := l.Column
// consume escape character if one exists
if c == '\\' {
value.WriteByte('\\')
l.move()
if c, ok = l.peek(); !ok {
l.move()
return l.getUnknownToken(``)
}
// TODO: check valid escapes
}
// write charcter
value.WriteRune(c)
// consume character
l.move()
if t, ok := consume_quote(); !ok {
return t
}
// consume closing quote
l.move()
return Token{
Column: col,
Line: l.Line,
Type: Rune,
Value: value.String(),
}
} |
func (l *Lexer) consumeString() Token {
nextState := &nextStringState | random_line_split |
|
consumers.go | ColonOrDeclare() Token {
t := Token{
Type: Colon,
Value: string(Colon),
Column: l.Column,
Line: l.Line,
}
l.move()
// check if it is a `:=`
if next, _ := l.peek(); next == '=' {
t.Type = Declare
t.Value = `:=`
l.move()
}
return t
}
// recognizeOperator consumes an operator token
func (l *Lexer) recognizeOperator() Token {
c := l.getCurr()
if isArithmeticOperator(c) || isBitOperator(c) || c == '!' {
t := l.consumeArithmeticOrBitOperator()
if t.Type == Unknown && isBoolOperator(c) {
return l.consumableBoolOperator()
}
return t
}
// attempt to consume shift operator
if beginsBitShift(c) {
if t := l.consumeBitShiftOperator(); t.Type != Unknown {
return t
}
}
// if it isn't arithmetic, bit or boolean then it is comparison
return l.consumeComparisonOperator()
}
// consumebitShiftOperator consumes a bit shifting operator
func (l *Lexer) consumeBitShiftOperator() Token {
c := l.getCurr()
t := Token{
Column: l.Column,
Line: l.Line,
}
switch c {
case '<':
t.Type = BitLeftShift
t.Value = string(BitLeftShift)
case '>':
t.Type = BitRightShift
t.Value = string(BitRightShift)
default:
return l.getUnknownToken(string(c))
}
// consume first token
l.move()
// if the current and next tokens aren't the same
// then it can't be a bit shift(<< or >>)
if next, _ := l.peek(); c != next {
t = UnknownToken(string(next), l.Line, l.Column)
l.retract()
return t
}
// consume second token
l.move()
return t
}
// consumeArithmeticOrBitOperator consumes an arithmetic or bit operator token
func (l *Lexer) consumeArithmeticOrBitOperator() Token {
op := l.getCurr()
t := Token{
Column: l.Column,
Line: l.Line,
Value: string(op),
}
l.move()
next, _ := l.peek()
if next == '=' {
switch op {
case '+':
t.Type = PlusEq
case '-':
t.Type = MinusEq
case '/':
t.Type = DivEq
case '*':
t.Type = TimesEq
case '%':
t.Type = ModEq
case '&':
t.Type = BitAndEq
case '|':
t.Type = BitOrEq
case '^':
t.Type = BitXorEq
default:
l.retract()
return l.getUnknownToken(string(op))
}
// consume equals sign
t.Value = string(op) + "="
l.move()
return t
}
if !isBoolOperator(next) {
switch op {
case '+':
t.Type = Plus
// check if increment and consume
if next == '+' {
t.Type = Increment
t.Value = "++"
l.move()
}
case '-':
t.Type = Minus
// check if decrement and consume
if next == '-' {
t.Type = Decrement
t.Value = "--"
l.move()
}
case '/':
t.Type = Div
case '*':
t.Type = Times
case '%':
t.Type = Mod
case '&':
t.Type = BitAnd
case '|':
t.Type = BitOr
case '^':
t.Type = BitXor
case '~':
t.Type = BitNot
default:
l.retract()
return l.getUnknownToken(string(op))
}
return t
}
l.retract()
return l.getUnknownToken(string(next))
}
// consumableBoolOperator consumes a bool operator token
func (l *Lexer) consumableBoolOperator() Token {
t := Token{
Column: l.Column,
Line: l.Line,
}
c := l.getCurr()
l.move()
next, _ := l.peek()
if c != '!' && c != next {
return l.getUnknownToken(string(next))
}
switch c {
case '&':
t.Type = And
t.Value = string(And)
case '|':
t.Type = Or
t.Value = string(Or)
case '!':
if next == '=' {
t.Type = NotEqual
t.Value = string(NotEqual)
} else {
t.Type = Not
t.Value = string(Not)
}
}
if t.Value != `!` {
l.move()
}
return t
}
// consumeComparisonOperator consumes an operator token
func (l *Lexer) consumeComparisonOperator() Token | switch char {
case '<':
if hasEquals {
t.Type = LessThanOrEqual
t.Value = "<="
} else {
t.Type = LessThan
t.Value = "<"
}
case '>':
if hasEquals {
t.Type = GreaterThanOrEqual
t.Value = ">="
} else {
t.Type = GreaterThan
t.Value = ">"
}
case '=':
if hasEquals {
t.Type = Equal
t.Value = "=="
} else {
t.Type = Assign
t.Value = "="
}
}
l.move()
return t
}
func (l *Lexer) recognizeLiteral() Token {
b := l.getCurr()
if beginsIdentifier(b) {
return l.consumeIdentifierOrKeyword()
}
if beginsNumber(b) {
if t := l.consumeNumber(); t.Type != Unknown {
return t
}
// if it began with a number literal, it is likely a dot
return l.consumeDots()
}
if beginsString(b) {
return l.consumeString()
}
if beginsRune(b) {
return l.consumeRune()
}
return UnknownToken(string(b), l.Line, l.Column)
}
// consumeIdentifierOrKeyword recognizes an identifier or a keyword
func (l *Lexer) consumeIdentifierOrKeyword() Token {
word := l.getNextWord(isValidIdentifierChar)
defer func() {
l.position += len(word)
l.Column += len(word)
}()
if t := l.consumableKeyword(word); t.Type != Unknown {
return t
}
Type := Identifier
if word == `_` {
Type = Underscore
}
col := l.Column
// check for colon after identifier
if next := len(word) + l.position; next < len(l.input) && l.input[next] == ':' {
Type = LoopName
l.move()
}
return Token{
Type: Type,
Value: word,
Column: col,
Line: l.Line,
}
}
// consumableKeyword returns a keyword/unknown token which can be consumed
// this also consumes true/false literals
func (l *Lexer) consumableKeyword(word string) Token {
t := Token{
Value: word,
Column: l.Column,
Line: l.Line,
}
keyword := TokenType(word)
if keyword == `true` || keyword == `false` {
t.Type = Bool
} else if _, ok := keywords[keyword]; ok {
t.Type = keyword
} else {
t.Type = Unknown
}
return t
}
// consumeDots consumes a dot or dots token
func (l *Lexer) consumeDots() Token {
t := Token{
Type: Dot,
Value: string(Dot),
Line: l.Line,
Column: l.Column,
}
l.move()
// check for potential second dot to form two dots
if next, _ := l.peek(); isDot(next) {
t.Type = TwoDots
t.Value = string(TwoDots)
l.move()
// check for potential third dot to form ellipsis
if next, _ = l.peek(); isDot(next) {
t.Type = Ellipsis
t.Value = string(Ellipsis)
l.move()
}
}
return t
}
// consumeRune consumes a rune token
func (l *Lexer) consumeRune() Token {
// consume_quote returns an empty Token and true if a quote
// can be consumed else it returns an unknown token and false
consume_quote := func() (Token, bool) {
if b, ok := l.peek(); !ok || b != '\'' {
col := l.Column
l.move()
if !ok {
return UnknownToken(``, l.Line, col), false
}
return UnknownToken(string(b), l.Line, col), false
}
return Token{}, true
}
if t, ok := consume_quote(); !ok {
return | {
t := Token{
Column: l.Column,
Line: l.Line,
}
char := l.getCurr()
hasEquals := false
if l.position+1 < len(l.input) {
// copy next rune
cpy := l.input[l.position+1]
// move cursor to accommodate '='
if cpy == '=' {
hasEquals = true
l.move()
}
}
| identifier_body |
lib.rs | b fn flush(&mut self) -> Result<(), Error> {
if self.bit_count > 0 {
self.buffer <<= 8 - self.bit_count;
let mut buffer = 0;
for i in 0..8 {
buffer <<= 1;
buffer |= (self.buffer >> i) & 1;
}
self.output_vector.push(buffer.clone());
if PRINT_DEBUG == true {
println!("push data: {:08b}", self.buffer);
for i in 0..(self.output_vector.len()){
print!("{:08b}", self.output_vector[i]);
}
println!();
println!("{:02x?}", self.output_vector);
}
}
Ok(())
}
/*
bufferが8ビット(1バイト)溜まった時に実行される
*/
fn flush_to_output(&mut self) -> Result<(), Error> {
let mut buffer = 0;
for i in 0..8 {
buffer <<= 1;
buffer |= (self.buffer >> i) & 1;
}
self.output_vector.push(buffer.clone());
if PRINT_DEBUG == true {
println!("push data: {:08b}", buffer);
for i in 0..(self.output_vector.len()){
print!("{:08b}", self.output_vector[i]);
}
println!();
}
self.buffer = 0;
self.bit_count = 0;
Ok(())
}
}
/*
読み込みをbyteで保持するもの
buffer: データをMAX_BUFFER_SIZE分取り込むための配列。
buf_count: 現在bufferが何個目まで読まれているかを保持する。
buf_size: bufferの何番目までデータがあるかを保持する
flag: 読み込むデータがもうない時に使用する。
file_size: 入力ファイルのサイズを記録する。
input: 入力ファイルの情報を記録する。
*/
struct ByteReader<'a, T: Read> {
buffer: [u8; MAX_BUFFER_SIZE],
buf_count: usize,
buf_size: usize,
flag: bool,
file_size: u32,
input: &'a mut T,
}
impl<'a, T: Read> ByteReader<'a, T> {
pub fn new(input: &'a mut T) -> Self {
let mut reader = ByteReader {
buffer: [0; MAX_BUFFER_SIZE],
buf_count: 0,
buf_size: 0,
flag: true,
file_size: 0,
input,
};
let _ = reader.load_next_byte();
reader
}
/*
bufferが最後まで読まれたり、最初の読み込みの際に実行される。
*/
fn load_next_byte(&mut self) -> Result<(), std::io::Error>{
match self.input.read(&mut self.buffer)? {
0 => {
self.flag = false;
self.buf_size = 0;
},
n => {
self.file_size += n as u32;
self.buf_size = n;
self.flag = true;
}
};
Ok(())
}
/*
buf_countの位置にあるバイトを返す。
*/
pub fn seek_byte(&mut self) -> u8{
self.buffer[self.buf_count]
}
/*
bit_countを進める。bufferの最後まできていた場合には
load_next_byteで次のブロックを読み込む。
*/
pub fn next_byte(&mut self) {
if self.buf_count + 1 < self.buf_size {
self.buf_count += 1;
} else {
let _ = self.load_next_byte();
self.buf_count = 0;
}
}
/*
bit_countの位置にあるバイトを返して、next_byteを読みこむ
*/
pub fn get_byte(&mut self) -> u8 {
let buffer = self.buffer[self.buf_count];
self.next_byte();
buffer
}
}
/*
Crc32を計算するための構造体
crc32の実装については下のurlを参考に行なった。
https://www.slideshare.net/7shi/crc32
divisor: 除算を行う際に使用するbit列を保持する
non_divisor: 除算される側のデータを保持する
buffer: とりあえずのデータを保持する
buf_count: bufferが何bit処理されたかを保持する
first_count: 最初の4バイトは反転する必要があるためカウントする
*/
struct Crc32 {
divisor: u32,
non_divisor: u32,
buffer: u8,
buf_count: u8,
first_count: u8,
}
impl Crc32 {
pub fn new() -> Self {
Crc32{
divisor: 0b100110000010001110110110111,
non_divisor: 0,
buffer: 0,
buf_count: 0,
first_count: 0,
}
}
/*
non_divisorやbufferにデータを保持させるもの
*/
pub fn push_buf(&mut self, buf: u8){
let mut buffer: u8 = 0;
for i in 0..8 {
buffer <<= 1;
buffer |= (buf >> i) & 1;
}
if self.first_count < 4 {
self.non_divisor <<= 8;
self.non_divisor += !buffer as u32;
self.first_count += 1;
} else {
self.buffer = buffer.clone();
self.buf_count = 8;
self.bit_shift();
}
}
/*
先頭bitが立っている場合には除算を行い、それ以外の場合にはbufferのbitを先頭から突っ込む
*/
fn bit_shift(&mut self){
for i in 0..self.buf_count{
if self.non_divisor >= 2147483648{
self.non_divisor <<= 1;
self.non_divisor |= (((self.buffer as u16) >> (self.buf_count - i - 1)) & 1) as u32;
self.xor();
} else {
self.non_divisor <<= 1;
self.non_divisor |= (((self.buffer as u16) >> (self.buf_count - i - 1)) & 1) as u32;
}
}
self.buf_count = 0
}
/*
除算を行う。実際にはxor
*/
fn xor(&mut self){
let buffer = self.non_divisor ^ self.divisor;
self.non_divisor = buffer;
}
/*
現在のnon_divisorからcrc32を計算してそれを返す
*/
fn get_crc32(&mut self) -> u32 {
self.push_buf(0);
self.push_buf(0);
self.push_buf(0);
self.push_buf(0);
let mut buffer: u32 = 0;
for i in 0..32 {
buffer <<= 1;
buffer |= (self.non_divisor >> i) & 1;
}
if PRINT_DEBUG == true {
println!("crc32: {:08x?}", !buffer);
}
!buffer
}
}
/*
zipのローカルヘッダーやセントラルヘッダー、エンドセントラルヘッダなどを
保持するための構造体
buffer: ヘッダー情報を保持する
before_size: 圧縮前のサイズを保持する
after_size: 圧縮後のサイズを保持する
filename: ファイルの名前を保持する
crc32: crc32の情報を保持する
hms: 時間, 分, 秒のデータを保持する
ymd: 年, 月, 日のデータを保持する
*/
struct Header{
buffer: Vec<u8>,
before_size: u32,
after_size: u32,
filename: String,
crc32: u32,
hms: u16,
ymd: u16,
}
impl Header {
pub fn new(before_size: u32, after_size: u32, filename: impl Into<String>, crc32: u32, hms: u16, ymd: u16) -> Self {
Header{
buffer: Vec::new(),
before_size,
after_size,
filename: filename.into(),
crc32,
hms,
ymd,
}
}
/*
32bitの情報をbufferに追加する
*/
fn push32(&mut self, num: u32) {
let a = num & 0b11111111;
let b = (num >> 8) & (0b11111111);
let c = (num >> 16) & (0b11111111);
let d = (num >> 24) & (0b11111111);
| */
pu | identifier_name |
|
lib.rs | を保持する
ymd: 年, 月, 日のデータを保持する
*/
struct Header{
buffer: Vec<u8>,
before_size: u32,
after_size: u32,
filename: String,
crc32: u32,
hms: u16,
ymd: u16,
}
impl Header {
pub fn new(before_size: u32, after_size: u32, filename: impl Into<String>, crc32: u32, hms: u16, ymd: u16) -> Self {
Header{
buffer: Vec::new(),
before_size,
after_size,
filename: filename.into(),
crc32,
hms,
ymd,
}
}
/*
32bitの情報をbufferに追加する
*/
fn push32(&mut self, num: u32) {
let a = num & 0b11111111;
let b = (num >> 8) & (0b11111111);
let c = (num >> 16) & (0b11111111);
let d = (num >> 24) & (0b11111111);
self.buffer.push(a as u8);
self.buffer.push(b as u8);
self.buffer.push(c as u8);
self.buffer.push(d as u8);
}
/*
16bitの情報をbufferに追加する
*/
fn push16(&mut self, num: u16) {
let a = num & 0b11111111;
let b = (num >> 8) & (0b11111111);
self.buffer.push(a as u8);
self.buffer.push(b as u8);
}
/*
PK0506ヘッダであることを示す情報を追加する
*/
fn push_pk0506(&mut self){
self.buffer.push(0x50);
self.buffer.push(0x4b);
self.buffer.push(0x05);
self.buffer.push(0x06);
}
/*
PK0304ヘッダであることを示す情報を追加する
*/
fn push_pk0304(&mut self){
self.buffer.push(0x50);
self.buffer.push(0x4b);
self.buffer.push(0x03);
self.buffer.push(0x04);
}
/*
PK0102ヘッダであることを示す情報を追加する
*/
fn push_pk0102(&mut self){
self.buffer.push(0x50);
self.buffer.push(0x4b);
self.buffer.push(0x01);
self.buffer.push(0x02);
}
/*
ファイルの名前の情報を追加する
*/
fn push_filename(&mut self){
let bytes: &[u8] = self.filename.as_bytes();
for i in 0..bytes.len() {
self.buffer.push(bytes[i]);
}
}
/*
ローカルヘッダーに必要な情報をもらって、ローカルヘッダーを作成する
構造
8byte: PK0304ヘッダを示す情報
4byte: 展開に必要なバージョン(2.0)
4byte: オプション(今回は設定していない)
4byte: 使用圧縮アルゴリズム(deflate圧縮: 0008)
4byte: 時刻
4byte: 日付
8byte: crc32情報
8byte: 圧縮後のサイズ(mとする)
8byte: 圧縮前のサイズ
4byte: ファイル名の長さ(nとする)
4byte: コメントがあればその長さ(今回はないものとしている)
nbyte: ファイル名
mbyte: 圧縮したデータ(ここではpushしておらず、ファイルに書き込む際に追加している)
*/
pub fn local_header(mut self) -> Vec<u8> {
self.push_pk0304();
self.push16(0x0014);
self.push16(0x0000);
self.push16(0x0008);
self.push16(self.hms);
self.push16(self.ymd);
self.push32(self.crc32);
self.push32(self.after_size);
self.push32(self.before_size);
self.push16((self.filename.len()) as u16);
self.push16(0x0000);
self.push_filename();
self.buffer
}
/*
セントラルヘッダーに必要な情報をもらって、セントラルヘッダーを作成する
8byte: PK0102ヘッダを示す情報
4byte: 作成したバージョン(ここでは2.0としている)
4byte: 展開に必要なバージョン(2.0)
4byte: オプション(今回は設定していない)
4byte: 使用圧縮アルゴリズム(deflate圧縮)
4byte: 時刻
4byte: 日付
8byte: crc32情報
8byte: 圧縮後のサイズ
8byte: 圧縮前のサイズ
4byte: ファイル名の長さ(nとする)
4byte: 拡張フィールドの長さ。(使用していないため0)
4byte: コメントがあればその長さ(今回はないものとしている)
4byte: 分割されている場合、対応するPK0304ヘッダが格納されたパートの番号
(分割していないため0)
4byte: 対応するPK0304に格納したファイルの属性情報(0としている)
8byte: OSで保持していた対象ファイルの属性情報(0としている)
8byte: 対応するPK0304ヘッダの位置
(今回はファイル一つのみの設定であるため0としている)
nbyte: ファイル名
*/
pub fn central_header(mut self) -> Vec<u8> {
self.push_pk0102();
self.push16(0x0314);
self.push16(0x0014);
self.push16(0x0000);
self.push16(0x0008);
self.push16(self.hms);
self.push16(self.ymd);
self.push32(self.crc32);
self.push32(self.after_size);
self.push32(self.before_size);
self.push16((self.filename.len()) as u16);
self.push16(0x0000);
self.push16(0x0000);
self.push16(0x0000);
self.push16(0x0000);
self.push32(0x00000000);
self.push32(0x00000000);
self.push_filename();
self.buffer
}
/*
エンドセントラルヘッダーに必要な情報をもらって、エンドセントラルヘッダーを作成する
8byte: PK0506ヘッダを示す情報
4byte: 分割している場合にはこのパートの番号(分割していないため0)
4byte: 分割している場合には最初のPK0304が格納されたパートの番号(分割していないため0)
4byte: 分割時にこのパートに格納されているファイル数(分割していないため下と同じ)
4byte: 圧縮したファイルの数(1としている)
8byte: PK0102ヘッダの合計サイズ
8byte: PK0102ヘッダの開始位置
4byte: コメントの長さ(今回は無し)
*/
pub fn end_header(mut self, header_size: u32, header_start: u32) -> Vec | <u8>{
self.push_pk0506();
self.push16(0x0000);
self.push16(0x0000);
self.push16(0x0001);
self.push16(0x0001);
self.push32(header_size);
self.push32(header_start);
self.push16(0x00);
self.buffer
}
/*
cloneの実装を行なっている
*/
pub fn clone(&self) -> Self {
Header::new(self.before_size, self.after_size, self.filename.clone(), sel | identifier_body |
|
lib.rs | pub fn seek_byte(&mut self) -> u8{
self.buffer[self.buf_count]
}
/*
bit_countを進める。bufferの最後まできていた場合には
load_next_byteで次のブロックを読み込む。
*/
pub fn next_byte(&mut self) {
if self.buf_count + 1 < self.buf_size {
self.buf_count += 1;
} else {
let _ = self.load_next_byte();
self.buf_count = 0;
}
}
/*
bit_countの位置にあるバイトを返して、next_byteを読みこむ
*/
pub fn get_byte(&mut self) -> u8 {
let buffer = self.buffer[self.buf_count];
self.next_byte();
buffer
}
}
/*
Crc32を計算するための構造体
crc32の実装については下のurlを参考に行なった。
https://www.slideshare.net/7shi/crc32
divisor: 除算を行う際に使用するbit列を保持する
non_divisor: 除算される側のデータを保持する
buffer: とりあえずのデータを保持する
buf_count: bufferが何bit処理されたかを保持する
first_count: 最初の4バイトは反転する必要があるためカウントする
*/
struct Crc32 {
divisor: u32,
non_divisor: u32,
buffer: u8,
buf_count: u8,
first_count: u8,
}
impl Crc32 {
pub fn new() -> Self {
Crc32{
divisor: 0b100110000010001110110110111,
non_divisor: 0,
buffer: 0,
buf_count: 0,
first_count: 0,
}
}
/*
non_divisorやbufferにデータを保持させるもの
*/
pub fn push_buf(&mut self, buf: u8){
let mut buffer: u8 = 0;
for i in 0..8 {
buffer <<= 1;
buffer |= (buf >> i) & 1;
}
if self.first_count < 4 {
self.non_divisor <<= 8;
self.non_divisor += !buffer as u32;
self.first_count += 1;
} else {
self.buffer = buffer.clone();
self.buf_count = 8;
self.bit_shift();
}
}
/*
先頭bitが立っている場合には除算を行い、それ以外の場合にはbufferのbitを先頭から突っ込む
*/
fn bit_shift(&mut self){
for i in 0..self.buf_count{
if self.non_divisor >= 2147483648{
self.non_divisor <<= 1;
self.non_divisor |= (((self.buffer as u16) >> (self.buf_count - i - 1)) & 1) as u32;
self.xor();
} else {
self.non_divisor <<= 1;
self.non_divisor |= (((self.buffer as u16) >> (self.buf_count - i - 1)) & 1) as u32;
}
}
self.buf_count = 0
}
/*
除算を行う。実際にはxor
*/
fn xor(&mut self){
let buffer = self.non_divisor ^ self.divisor;
self.non_divisor = buffer;
}
/*
現在のnon_divisorからcrc32を計算してそれを返す
*/
fn get_crc32(&mut self) -> u32 {
self.push_buf(0);
self.push_buf(0);
self.push_buf(0);
self.push_buf(0);
let mut buffer: u32 = 0;
for i in 0..32 {
buffer <<= 1;
buffer |= (self.non_divisor >> i) & 1;
}
if PRINT_DEBUG == true {
println!("crc32: {:08x?}", !buffer);
}
!buffer
}
}
/*
zipのローカルヘッダーやセントラルヘッダー、エンドセントラルヘッダなどを
保持するための構造体
buffer: ヘッダー情報を保持する
before_size: 圧縮前のサイズを保持する
after_size: 圧縮後のサイズを保持する
filename: ファイルの名前を保持する
crc32: crc32の情報を保持する
hms: 時間, 分, 秒のデータを保持する
ymd: 年, 月, 日のデータを保持する
*/
struct Header{
buffer: Vec<u8>,
before_size: u32,
after_size: u32,
filename: String,
crc32: u32,
hms: u16,
ymd: u16,
}
impl Header {
pub fn new(before_size: u32, after_size: u32, filename: impl Into<String>, crc32: u32, hms: u16, ymd: u16) -> Self {
Header{
buffer: Vec::new(),
before_size,
after_size,
filename: filename.into(),
crc32,
hms,
ymd,
}
}
/*
32bitの情報をbufferに追加する
*/
fn push32(&mut self, num: u32) {
let a = num & 0b11111111;
let b = (num >> 8) & (0b11111111);
let c = (num >> 16) & (0b11111111);
let d = (num >> 24) & (0b11111111);
self.buffer.push(a as u8);
self.buffer.push(b as u8);
self.buffer.push(c as u8);
self.buffer.push(d as u8);
}
/*
16bitの情報をbufferに追加する
*/
fn push16(&mut self, num: u16) {
let a = num & 0b11111111;
let b = (num >> 8) & (0b11111111);
self.buffer.push(a as u8);
self.buffer.push(b as u8);
}
/*
PK0506ヘッダであることを示す情報を追加する
*/
fn push_pk0506(&mut self){
self.buffer.push(0x50);
self.buffer.push(0x4b);
self.buffer.push(0x05);
self.buffer.push(0x06);
}
/*
PK0304ヘッダであることを示す情報を追加する
*/
fn push_pk0304(&mut self){
self.buffer.push(0x50);
self.buffer.push(0x4b);
self.buffer.push(0x03);
self.buffer.push(0x04);
}
/*
PK0102ヘッダであることを示す情報を追加する
*/
fn push_pk0102(&mut self){
self.buffer.push(0x50);
self.buffer.push(0x4b);
self.buffer.push(0x01);
self.buffer.push(0x02);
}
/*
ファイルの名前の情報を追加する
*/
fn push_filename(&mut self){
let bytes: &[u8] = self.filename.as_bytes();
for i in 0..bytes.len() {
self.buffer.push(bytes[i]);
}
}
/*
ローカルヘッダーに必要な情報をもらって、ローカルヘッダーを作成する
構造
8byte: PK0304ヘッダを示す情報
4byte: 展開に必要なバージョン(2.0)
4byte: オプション(今回は設定していない)
4byte: 使用圧縮アルゴリズム(deflate圧縮: 0008)
4byte: 時刻
4byte: 日付
8byte: crc32情報
8byte: 圧縮後のサイズ(mとする)
8byte: 圧縮前のサイズ
4byte: ファイル名の長さ(nとする)
4byte: コメントがあればその長さ(今回はないものとしている)
nbyte: ファイル名
mbyte: 圧縮したデータ(ここではpushしておらず、ファイルに書き込む際に追加している)
*/
pub fn local_header(mut self) -> Vec<u8> {
self.push_pk0304();
self.push16(0x0014);
self.push16(0x0000 | */ | random_line_split |
|
font.go | octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// uint16, (2 * segCount) - searchRange.
// range_shift, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// log.Printf("seg_count %v", seg_count)
f.cmap_entry_array = make([]cmap_entry_t, seg_count)
// uint16 * seg_count, Ending character code for each segment,
// last = 0xFFFF.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].end_code, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("end_code %v", f.cmap_entry_array[i].end_code)
}
// uint16, This value should be zero.
// reserved_pad, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// uint16 * seg_count, Starting character code for each segment.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].start_code, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("start_code %v", f.cmap_entry_array[i].start_code)
}
// uint16 * seg_count, Delta for all character codes in segment.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].id_delta, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("id_delta %v", f.cmap_entry_array[i].id_delta)
}
// uint16 * seg_count, Offset in bytes to glyph indexArray, or 0.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].id_range_offset, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("id_range_offset %v", f.cmap_entry_array[i].id_range_offset)
}
// uint16 * seg_count, Glyph index array.
f.cmap_index_array = f.cmap[offset:]
return nil
} else if cmap_format_version == 12 {
// Format 12.0 is a bit like format 4, in that it defines segments for
// sparse representation in 4-byte character space.
// So, the next two bytes is part of version segment and should be 0.
expect_zero, offset := octets_to_u16(f.cmap, offset), offset+2
if expect_zero != 0 {
msg := fmt.Sprint("UNSUPPORT or INVALID: cmap format version %x",
f.cmap[offset-4:offset])
return errors.New(msg)
}
// uint32, Byte length of this subtable (including the header).
// length, offset := octets_to_u32(font.cmap, offset), offset+4
offset = offset + 4
// uint32, 0 if don't care.
// lang, offset := octets_to_u32(font.cmap, offset), offset+4
offset = offset + 4
// uint32, Number of groupings which follow.
group_num, offset := octets_to_u32(f.cmap, offset), offset+4
// log.Printf("group_num %v", group_num)
// Here follow the individual groups.
for i := uint32(0); i < group_num; i++ {
// uint32, First character code in this group.
f.cmap_entry_array[i].start_code, offset =
octets_to_u32(f.cmap, offset), offset+4
// uint32, Last character code in this group.
f.cmap_entry_array[i].end_code, offset =
octets_to_u32(f.cmap, offset), offset+4
// uint32, Glyph index corresponding to the starting character code.
f.cmap_entry_array[i].id_delta, offset =
octets_to_u32(f.cmap, offset), offset+4
}
return nil
} else {
msg := fmt.Sprintf("UNSUPPORT: cmap format version %v",
cmap_format_version)
return errors.New(msg)
}
}
const (
kLocaOffsetFormatUnknown int = iota
kLocaOffsetFormatShort
kLocaOffsetFormatLong
)
// https://developer.apple.com/fonts/TTRefMan/RM06/Chap6head.html
func (f *Font) parse_head() error {
if len(f.head) != 54 {
msg := fmt.Sprintf("INVALID: bad head length %v", len(f.head))
return errors.New(msg)
}
// Range from 64 to 16384
f.units_per_em = int32(octets_to_u16(f.head, 18))
// log.Printf("units_per_em %d", f.units_per_em)
f.bounds.XMin = int32(int16(octets_to_u16(f.head, 36)))
f.bounds.YMin = int32(int16(octets_to_u16(f.head, 38)))
f.bounds.XMax = int32(int16(octets_to_u16(f.head, 40)))
f.bounds.YMax = int32(int16(octets_to_u16(f.head, 42)))
// 0 for short offsets, 1 for long offsets.
index_to_loc_format := octets_to_u16(f.head, 50)
// log.Printf("index_to_loc_format %d", index_to_loc_format)
if index_to_loc_format == 0 {
f.loca_offset_format = kLocaOffsetFormatShort
} else if index_to_loc_format == 1 {
f.loca_offset_format = kLocaOffsetFormatLong
} else {
msg := fmt.Sprintf("INVALID: bad head indexToLocFormat %v",
index_to_loc_format)
return errors.New(msg)
}
return nil
}
// http://developer.apple.com/fonts/TTRefMan/RM06/Chap6kern.html
func (font *Font) parse_kern() error {
if len(font.kern) <= 0 {
if font.kern_num != 0 {
return errors.New("INVALID: kern length.")
} else {
return nil
}
}
index := 0
// uint16, The version number of the kerning table (0x00010000 for the
// current version).
//
// Upto now, only support the older version. Windows only support the older
// version. Mac support both, but prefer the newer version.
//
// TODO(coding): Support the newer version.
kern_format_version, index := octets_to_u16(font.kern, index), index+2
if kern_format_version == 0 {
// uint16, The number of subtables included in the kerning table.
table_num, index := octets_to_u16(font.kern, index), index+2
if table_num != 1 {
msg := fmt.Sprintf("UNSUPPORT: kern table num %v", table_num)
return errors.New(msg)
}
index = index + 2
// uint16, The length of this subtable in bytes, including this header.
length, index := int(octets_to_u16(font.kern, index)), index+2
// uint16, Circumstances under which this table is used. See below for
// description.
coverage, index := octets_to_u16(font.kern, index), index+2
if coverage != 0x0001 {
// Upto now, we don't support horizontal kerning.
// TODO(coding): support the horizontal kerning.
msg := fmt.Sprintf("UNSUPPORT: kern coverage: 0x%04x", coverage)
return errors.New(msg)
}
// uint16, number of kern.
font.kern_num, index = int(octets_to_u16(font.kern, index)), index+2
if font.kern_num*6 != length-14 {
msg := fmt.Sprintf("INVALID: Bad kern table length")
return errors.New(msg)
}
return nil
}
msg := fmt.Sprintf("UNSUPPORT: kern format version %v.",
kern_format_version)
return errors.New(msg)
}
// https://developer.apple.com/fonts/TTRefMan/RM06/Chap6maxp.html
func (font *Font) parse_maxp() error {
if len(font.maxp) != 32 | {
msg := fmt.Sprintf("INVALID: bad maxp length %v", len(font.maxp))
return errors.New(msg)
} | conditional_block |
|
font.go | 3 {
valid = true
break
}
// Microsoft UCS-2 Encoding or Microsoft UCS-4 Encoding.
if pid_psid == 0x00030001 || pid_psid == 0x0003000a {
valid = true
// Don't break. So that unicode encoding can override ms encoding.
}
// TODO(coding): support whole list about pid and psid.
// https://developer.apple.com/fonts/TTRefMan/RM06/Chap6name.html#ID
}
if !valid {
return errors.New("UNSUPPORT or INVALID: cmap language encoding.")
}
cmap_format_version, offset := octets_to_u16(f.cmap, offset), offset+2
if cmap_format_version == 4 { // cmap format 2
// uint16, Length of subtable in bytes.
// length, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// uint16, Language code for this encoding subtable, or zero if
// language-independent.
lang, offset := octets_to_u16(f.cmap, offset), offset+2
if lang != 0 {
return errors.New("UNSUPPORT: cmap language isn't independent.")
}
// uint16, 2 * segCount.
seg_count_x_2, offset := int(octets_to_u16(f.cmap, offset)), offset+2
seg_count := seg_count_x_2 / 2
// uint16, 2 * (2**FLOOR(log2(segCount))).
// search_range, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// uint16, log2(searchRange/2)
// entry_selector, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// uint16, (2 * segCount) - searchRange.
// range_shift, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// log.Printf("seg_count %v", seg_count)
f.cmap_entry_array = make([]cmap_entry_t, seg_count)
// uint16 * seg_count, Ending character code for each segment,
// last = 0xFFFF.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].end_code, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("end_code %v", f.cmap_entry_array[i].end_code)
}
// uint16, This value should be zero.
// reserved_pad, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// uint16 * seg_count, Starting character code for each segment.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].start_code, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("start_code %v", f.cmap_entry_array[i].start_code)
}
// uint16 * seg_count, Delta for all character codes in segment.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].id_delta, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("id_delta %v", f.cmap_entry_array[i].id_delta)
}
// uint16 * seg_count, Offset in bytes to glyph indexArray, or 0.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].id_range_offset, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("id_range_offset %v", f.cmap_entry_array[i].id_range_offset)
}
// uint16 * seg_count, Glyph index array.
f.cmap_index_array = f.cmap[offset:]
return nil
} else if cmap_format_version == 12 {
// Format 12.0 is a bit like format 4, in that it defines segments for
// sparse representation in 4-byte character space.
// So, the next two bytes is part of version segment and should be 0.
expect_zero, offset := octets_to_u16(f.cmap, offset), offset+2
if expect_zero != 0 {
msg := fmt.Sprint("UNSUPPORT or INVALID: cmap format version %x",
f.cmap[offset-4:offset])
return errors.New(msg)
}
// uint32, Byte length of this subtable (including the header).
// length, offset := octets_to_u32(font.cmap, offset), offset+4
offset = offset + 4
// uint32, 0 if don't care.
// lang, offset := octets_to_u32(font.cmap, offset), offset+4
offset = offset + 4
// uint32, Number of groupings which follow.
group_num, offset := octets_to_u32(f.cmap, offset), offset+4
// log.Printf("group_num %v", group_num)
// Here follow the individual groups.
for i := uint32(0); i < group_num; i++ {
// uint32, First character code in this group.
f.cmap_entry_array[i].start_code, offset =
octets_to_u32(f.cmap, offset), offset+4
// uint32, Last character code in this group.
f.cmap_entry_array[i].end_code, offset =
octets_to_u32(f.cmap, offset), offset+4
// uint32, Glyph index corresponding to the starting character code.
f.cmap_entry_array[i].id_delta, offset =
octets_to_u32(f.cmap, offset), offset+4
}
return nil
} else {
msg := fmt.Sprintf("UNSUPPORT: cmap format version %v",
cmap_format_version)
return errors.New(msg)
}
}
const (
kLocaOffsetFormatUnknown int = iota
kLocaOffsetFormatShort
kLocaOffsetFormatLong
)
// https://developer.apple.com/fonts/TTRefMan/RM06/Chap6head.html
func (f *Font) parse_head() error {
if len(f.head) != 54 {
msg := fmt.Sprintf("INVALID: bad head length %v", len(f.head))
return errors.New(msg)
}
// Range from 64 to 16384
f.units_per_em = int32(octets_to_u16(f.head, 18))
// log.Printf("units_per_em %d", f.units_per_em)
f.bounds.XMin = int32(int16(octets_to_u16(f.head, 36)))
f.bounds.YMin = int32(int16(octets_to_u16(f.head, 38)))
f.bounds.XMax = int32(int16(octets_to_u16(f.head, 40)))
f.bounds.YMax = int32(int16(octets_to_u16(f.head, 42)))
// 0 for short offsets, 1 for long offsets.
index_to_loc_format := octets_to_u16(f.head, 50)
// log.Printf("index_to_loc_format %d", index_to_loc_format)
if index_to_loc_format == 0 {
f.loca_offset_format = kLocaOffsetFormatShort
} else if index_to_loc_format == 1 {
f.loca_offset_format = kLocaOffsetFormatLong
} else {
msg := fmt.Sprintf("INVALID: bad head indexToLocFormat %v",
index_to_loc_format)
return errors.New(msg)
}
return nil
}
// http://developer.apple.com/fonts/TTRefMan/RM06/Chap6kern.html
func (font *Font) parse_kern() error | {
if len(font.kern) <= 0 {
if font.kern_num != 0 {
return errors.New("INVALID: kern length.")
} else {
return nil
}
}
index := 0
// uint16, The version number of the kerning table (0x00010000 for the
// current version).
//
// Upto now, only support the older version. Windows only support the older
// version. Mac support both, but prefer the newer version.
//
// TODO(coding): Support the newer version.
kern_format_version, index := octets_to_u16(font.kern, index), index+2
| identifier_body |
|
font.go | begin := int(octets_to_u32(ttf_bytes, table_offset+8))
length := int(octets_to_u32(ttf_bytes, table_offset+12))
switch title {
case "cmap":
new_font.cmap, err = read_table(ttf_bytes, begin, length)
case "head":
new_font.head, err = read_table(ttf_bytes, begin, length)
case "kern":
new_font.kern, err = read_table(ttf_bytes, begin, length)
case "maxp":
new_font.maxp, err = read_table(ttf_bytes, begin, length)
case "cvt ":
new_font.cvt, err = read_table(ttf_bytes, begin, length)
case "fpgm":
new_font.fpgm, err = read_table(ttf_bytes, begin, length)
case "glyf":
new_font.glyf, err = read_table(ttf_bytes, begin, length)
case "hmtx":
new_font.hmtx, err = read_table(ttf_bytes, begin, length)
case "loca":
new_font.loca, err = read_table(ttf_bytes, begin, length)
case "prep":
new_font.prep, err = read_table(ttf_bytes, begin, length)
case "hhea":
new_font.hhea, err = read_table(ttf_bytes, begin, length)
}
if err != nil {
return
}
}
if err = new_font.parse_head(); err != nil {
return
}
if err = new_font.parse_cmap(); err != nil {
return
}
if err = new_font.parse_maxp(); err != nil {
return
}
if err = new_font.parse_hhea(); err != nil {
return
}
if err = new_font.parse_kern(); err != nil {
return
}
return new_font, nil
}
func read_table(ttf_bytes []byte, begin int, length int) ([]byte, error) {
if begin < 0 {
return nil, errors.New("INVALID: begin too large.")
}
if length < 0 {
return nil, errors.New("INVALID: length too large.")
}
end := begin + length
if end < 0 || end > len(ttf_bytes) {
return nil, errors.New("INVALID: begin + length too large.")
}
return ttf_bytes[begin:end], nil
}
type cmap_entry_t struct {
start_code uint32
end_code uint32
id_delta uint32
id_range_offset uint32
}
// https://developer.apple.com/fonts/TTRefMan/RM06/Chap6cmap.html
func (f *Font) parse_cmap() error {
if len(f.cmap) < 4 {
log.Print("Font cmap too short.")
}
index := 2
subtable_num, index := int(octets_to_u16(f.cmap, index)), index+2
if len(f.cmap) < subtable_num*8+4 {
log.Print("Font cmap too short.")
}
valid := false
offset := 0
for i := 0; i < subtable_num; i++ {
// platform id is platform identifier, platform specific id is platform
// specific encoding identifier.
var pid_psid uint32
pid_psid, index = octets_to_u32(f.cmap, index), index+4
offset, index = int(octets_to_u32(f.cmap, index)), index+4
// Unicode encoding.
if pid_psid == 0x00000003 {
valid = true
break
}
// Microsoft UCS-2 Encoding or Microsoft UCS-4 Encoding.
if pid_psid == 0x00030001 || pid_psid == 0x0003000a {
valid = true
// Don't break. So that unicode encoding can override ms encoding.
}
// TODO(coding): support whole list about pid and psid.
// https://developer.apple.com/fonts/TTRefMan/RM06/Chap6name.html#ID
}
if !valid {
return errors.New("UNSUPPORT or INVALID: cmap language encoding.")
}
cmap_format_version, offset := octets_to_u16(f.cmap, offset), offset+2
if cmap_format_version == 4 { // cmap format 2
// uint16, Length of subtable in bytes.
// length, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// uint16, Language code for this encoding subtable, or zero if
// language-independent.
lang, offset := octets_to_u16(f.cmap, offset), offset+2
if lang != 0 {
return errors.New("UNSUPPORT: cmap language isn't independent.")
}
// uint16, 2 * segCount.
seg_count_x_2, offset := int(octets_to_u16(f.cmap, offset)), offset+2
seg_count := seg_count_x_2 / 2
// uint16, 2 * (2**FLOOR(log2(segCount))).
// search_range, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// uint16, log2(searchRange/2)
// entry_selector, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// uint16, (2 * segCount) - searchRange.
// range_shift, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// log.Printf("seg_count %v", seg_count)
f.cmap_entry_array = make([]cmap_entry_t, seg_count)
// uint16 * seg_count, Ending character code for each segment,
// last = 0xFFFF.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].end_code, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("end_code %v", f.cmap_entry_array[i].end_code)
}
// uint16, This value should be zero.
// reserved_pad, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// uint16 * seg_count, Starting character code for each segment.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].start_code, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("start_code %v", f.cmap_entry_array[i].start_code)
}
// uint16 * seg_count, Delta for all character codes in segment.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].id_delta, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("id_delta %v", f.cmap_entry_array[i].id_delta)
}
// uint16 * seg_count, Offset in bytes to glyph indexArray, or 0.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].id_range_offset, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("id_range_offset %v", f.cmap_entry_array[i].id_range_offset)
}
// uint16 * seg_count, Glyph index array.
f.cmap_index_array = f.cmap[offset:]
return nil
} else if cmap_format_version == 12 {
// Format 12.0 is a bit like format 4, in that it defines segments for
// sparse representation in 4-byte character space.
// So, the next two bytes is part of version segment and should be 0.
expect_zero, offset := octets_to_u16(f.cmap, offset), offset+2
if expect_zero != 0 {
msg := fmt.Sprint("UNSUPPORT or INVALID: cmap format version %x",
f.cmap[offset-4:offset])
return errors.New(msg)
}
// uint32, Byte length of this subtable (including the header).
// length, offset := octets_to_u32(font.cmap, offset), offset+4
offset = offset + 4
// uint32, 0 if don't care.
// lang, offset := octets | new_font := new(Font)
for i := 0; i < table_num; i++ {
table_offset := 16*i + 12
title := string(ttf_bytes[table_offset : table_offset+4]) | random_line_split |
|
font.go | , err = read_table(ttf_bytes, begin, length)
case "prep":
new_font.prep, err = read_table(ttf_bytes, begin, length)
case "hhea":
new_font.hhea, err = read_table(ttf_bytes, begin, length)
}
if err != nil {
return
}
}
if err = new_font.parse_head(); err != nil {
return
}
if err = new_font.parse_cmap(); err != nil {
return
}
if err = new_font.parse_maxp(); err != nil {
return
}
if err = new_font.parse_hhea(); err != nil {
return
}
if err = new_font.parse_kern(); err != nil {
return
}
return new_font, nil
}
func read_table(ttf_bytes []byte, begin int, length int) ([]byte, error) {
if begin < 0 {
return nil, errors.New("INVALID: begin too large.")
}
if length < 0 {
return nil, errors.New("INVALID: length too large.")
}
end := begin + length
if end < 0 || end > len(ttf_bytes) {
return nil, errors.New("INVALID: begin + length too large.")
}
return ttf_bytes[begin:end], nil
}
type cmap_entry_t struct {
start_code uint32
end_code uint32
id_delta uint32
id_range_offset uint32
}
// https://developer.apple.com/fonts/TTRefMan/RM06/Chap6cmap.html
func (f *Font) | () error {
if len(f.cmap) < 4 {
log.Print("Font cmap too short.")
}
index := 2
subtable_num, index := int(octets_to_u16(f.cmap, index)), index+2
if len(f.cmap) < subtable_num*8+4 {
log.Print("Font cmap too short.")
}
valid := false
offset := 0
for i := 0; i < subtable_num; i++ {
// platform id is platform identifier, platform specific id is platform
// specific encoding identifier.
var pid_psid uint32
pid_psid, index = octets_to_u32(f.cmap, index), index+4
offset, index = int(octets_to_u32(f.cmap, index)), index+4
// Unicode encoding.
if pid_psid == 0x00000003 {
valid = true
break
}
// Microsoft UCS-2 Encoding or Microsoft UCS-4 Encoding.
if pid_psid == 0x00030001 || pid_psid == 0x0003000a {
valid = true
// Don't break. So that unicode encoding can override ms encoding.
}
// TODO(coding): support whole list about pid and psid.
// https://developer.apple.com/fonts/TTRefMan/RM06/Chap6name.html#ID
}
if !valid {
return errors.New("UNSUPPORT or INVALID: cmap language encoding.")
}
cmap_format_version, offset := octets_to_u16(f.cmap, offset), offset+2
if cmap_format_version == 4 { // cmap format 2
// uint16, Length of subtable in bytes.
// length, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// uint16, Language code for this encoding subtable, or zero if
// language-independent.
lang, offset := octets_to_u16(f.cmap, offset), offset+2
if lang != 0 {
return errors.New("UNSUPPORT: cmap language isn't independent.")
}
// uint16, 2 * segCount.
seg_count_x_2, offset := int(octets_to_u16(f.cmap, offset)), offset+2
seg_count := seg_count_x_2 / 2
// uint16, 2 * (2**FLOOR(log2(segCount))).
// search_range, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// uint16, log2(searchRange/2)
// entry_selector, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// uint16, (2 * segCount) - searchRange.
// range_shift, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// log.Printf("seg_count %v", seg_count)
f.cmap_entry_array = make([]cmap_entry_t, seg_count)
// uint16 * seg_count, Ending character code for each segment,
// last = 0xFFFF.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].end_code, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("end_code %v", f.cmap_entry_array[i].end_code)
}
// uint16, This value should be zero.
// reserved_pad, offset := octets_to_u16(font.cmap, offset), offset+2
offset = offset + 2
// uint16 * seg_count, Starting character code for each segment.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].start_code, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("start_code %v", f.cmap_entry_array[i].start_code)
}
// uint16 * seg_count, Delta for all character codes in segment.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].id_delta, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("id_delta %v", f.cmap_entry_array[i].id_delta)
}
// uint16 * seg_count, Offset in bytes to glyph indexArray, or 0.
for i := 0; i < seg_count; i++ {
f.cmap_entry_array[i].id_range_offset, offset =
uint32(octets_to_u16(f.cmap, offset)), offset+2
// log.Printf("id_range_offset %v", f.cmap_entry_array[i].id_range_offset)
}
// uint16 * seg_count, Glyph index array.
f.cmap_index_array = f.cmap[offset:]
return nil
} else if cmap_format_version == 12 {
// Format 12.0 is a bit like format 4, in that it defines segments for
// sparse representation in 4-byte character space.
// So, the next two bytes is part of version segment and should be 0.
expect_zero, offset := octets_to_u16(f.cmap, offset), offset+2
if expect_zero != 0 {
msg := fmt.Sprint("UNSUPPORT or INVALID: cmap format version %x",
f.cmap[offset-4:offset])
return errors.New(msg)
}
// uint32, Byte length of this subtable (including the header).
// length, offset := octets_to_u32(font.cmap, offset), offset+4
offset = offset + 4
// uint32, 0 if don't care.
// lang, offset := octets_to_u32(font.cmap, offset), offset+4
offset = offset + 4
// uint32, Number of groupings which follow.
group_num, offset := octets_to_u32(f.cmap, offset), offset+4
// log.Printf("group_num %v", group_num)
// Here follow the individual groups.
for i := uint32(0); i < group_num; i++ {
// uint32, First character code in this group.
f.cmap_entry_array[i].start_code, offset =
octets_to_u32(f.cmap, offset), offset+4
// uint32, Last character code in this group.
f.cmap_entry_array[i].end_code, offset =
octets_to_u32(f.cmap, offset), offset+4
// uint32, Glyph index corresponding to the starting character code.
f.cmap_entry_array[i].id_delta, offset =
octets_to_u32(f.cmap, offset), offset+4
}
return nil
} else {
msg := fmt.Sprintf("UNSUPPORT: cmap format version %v",
cmap_format_version)
return errors.New(msg)
}
}
const (
kLocaOffsetFormatUnknown int = iota
kLocaOffsetFormatShort
k | parse_cmap | identifier_name |
main.rs | ()?;
let mut linkages = HashMap::new();
let results = app.get_docs().process(&self.spec)?; // note: avoid name clash with db table
let mut first = true;
for doc in results {
if first {
first = false;
} else {
tcprintln!(app.ps, (""));
}
tcprintln!(app.ps, [hl: "Name:"], (" "), [green: "{}", doc.name]);
tcprintln!(app.ps, [hl: "MIME-type:"], (" {}", doc.mime_type));
tcprintln!(app.ps, [hl: "Size:"], (" {}", doc.human_size().unwrap_or_else(|| "N/A".to_owned())));
tcprintln!(app.ps, [hl: "Modified:"], (" {}", doc.utc_mod_time().to_rfc3339()));
tcprintln!(app.ps, [hl: "ID:"], (" {}", doc.id));
tcprintln!(app.ps, [hl: "Starred?:"], (" {}", if doc.starred { "yes" } else { "no" }));
tcprintln!(app.ps, [hl: "Trashed?:"], (" {}", if doc.trashed { "yes" } else { "no" }));
let accounts = doc.accounts(app)?;
let mut path_reprs = Vec::new();
for acct in &accounts {
if let Entry::Vacant(e) = linkages.entry(acct.id) {
let table = app.load_linkage_table(acct.id, true)?;
e.insert(table);
}
let link_table = linkages.get(&acct.id).unwrap();
for p in link_table.find_parent_paths(&doc.id).iter().map(|id_path| {
// This is not efficient, and it's panicky, but meh.
let names: Vec<_> = id_path
.iter()
.map(|docid| {
use schema::docs::dsl::*;
let elem = docs
.filter(id.eq(&docid))
.first::<database::Doc>(&app.conn)
.unwrap();
elem.name
})
.collect();
names.join(" > ")
}) {
path_reprs.push(format!("{}: {}", acct.email, p));
} | _n => {
tcprintln!(app.ps, [hl: "Paths::"]);
for p in path_reprs {
tcprintln!(app.ps, (" {}", p));
}
}
}
tcprintln!(app.ps, [hl: "Open-URL:"], (" {}", doc.open_url()));
}
Ok(0)
}
}
/// List documents.
#[derive(Debug, StructOpt)]
pub struct DrorgListOptions {
#[structopt(help = "A document specifier (name, ID, ...)", required_unless = "all")]
spec: Option<String>,
#[structopt(
long = "all",
help = "List all documents in the database",
conflicts_with = "spec"
)]
all: bool,
}
impl DrorgListOptions {
fn cli(self, app: &mut Application) -> Result<i32> {
app.maybe_sync_all_accounts()?;
let results = if self.all {
app.get_docs().all()
} else {
app.get_docs().process(&self.spec.unwrap())
}?;
app.print_doc_list(results)?;
Ok(0)
}
}
/// The command-line action to add a login to the credentials DB.
///
/// Note that "email" doesn't really have to be an email address -- it can be
/// any random string; the user chooses which account to login-to
/// interactively during the login process. But I think it makes sense from a
/// UI perspective to just call it "email" and let the user figure out for
/// themselves that they can give it some other value if they feel like it.
#[derive(Debug, StructOpt)]
pub struct DrorgLoginOptions {}
impl DrorgLoginOptions {
/// The auth flow here will print out a message on the console, asking the
/// user to go to a URL, following instructions, and paste a string back
/// into the client.
///
/// We want to allow the user to login to multiple accounts
/// simultaneously. Therefore we set up the authenticator flow with a null
/// storage, and then add the resulting token to the disk storage.
fn cli(self, app: &mut Application) -> Result<i32> {
let mut account = accounts::Account::default();
// First we need to get authorization.
account.authorize_interactively(&app.secret)?;
// Now, for bookkeeping, we look up the email address associated with
// it. We could just have the user specify an identifier, but I went
// to the trouble to figure out how to do this right, so ...
let email_addr = account.fetch_email_address(&app.secret)?;
tcprintln!(app.ps, ("Successfully logged in to "), [hl: "{}", email_addr], ("."));
// We might need to add this account to the database. To have sensible
// foreign key relations, the email address is not the primary key of
// the accounts table, so we need to see whether there's already an
// existing row for this account (which could happen if the user
// re-logs-in, etc.) If we add a new row, we have to do this awkward
// bit where we insert and then immediately query for the row we just
// added (cf https://github.com/diesel-rs/diesel/issues/771 ).
{
use diesel::prelude::*;
use schema::accounts::dsl::*;
let maybe_row = accounts
.filter(email.eq(&email_addr))
.first::<database::Account>(&app.conn)
.optional()?;
let row_id = if let Some(row) = maybe_row {
row.id
} else {
let new_account = database::NewAccount::new(&email_addr);
diesel::replace_into(accounts)
.values(&new_account)
.execute(&app.conn)?;
let row = accounts
.filter(email.eq(&email_addr))
.first::<database::Account>(&app.conn)?;
row.id
};
account.data.db_id = row_id;
// JSON will be rewritten in acquire_change_page_token below.
}
// Initialize our token for checking for changes to the documents. We
// do this *before* scanning the complete listing; there's going to be
// a race condition either way, but the one that happens with this
// ordering seems like it would be more benign.
account.acquire_change_page_token(&app.secret)?;
// OK, now actually slurp in the list of documents.
tcprintln!(app.ps, ("Scanning documents ..."));
app.import_documents(&mut account)?;
// All done.
tcprintln!(app.ps, ("Done."));
Ok(0)
}
}
/// List the files in a folder.
///
/// TODO: this name is going to be super confusing compared to `list`.
#[derive(Debug, StructOpt)]
pub struct DrorgLsOptions {
#[structopt(help = "A folder specifier (name, ID, ...)")]
spec: String,
}
impl DrorgLsOptions {
fn cli(self, app: &mut Application) -> Result<i32> {
use std::collections::HashSet;
app.maybe_sync_all_accounts()?;
let doc = app.get_docs().process_one(self.spec)?;
// We *could* just proceed and see if there's anything that Drive
// thinks is a child of this doc, but it seems like the more sensible
// UX is to make this a hard failure. You could imagine adding a CLI
// option to override this behavior.
if !doc.is_folder() {
return Err(format_err!("the selected document is not a folder"));
}
// This is another operation which can be surprising when you think
// about the behavior when a doc belongs to more than one account. We
// find children for each account separately and merge the results.
let accounts = doc.accounts(app)?;
let mut child_ids = HashSet::new();
if accounts.len() > 1 {
tcreport!(app.ps, warning: "folder belongs to multiple accounts; \
their listings will be merged");
}
for acct in &accounts {
let table = app.load_linkage_table(acct.id, false)?;
let node = match table.nodes.get(&doc.id) {
Some(n) => *n,
None => continue,
};
for child_idx in table.graph.neighbors(node) {
child_ids.insert(table.graph[child_idx].clone());
}
}
// Is this the best ordering?
let mut docs = app.ids_to_docs(&child_ids);
docs.sort_by_key(|d| d.utc_mod_time());
docs.reverse();
app.print_doc_list(docs)?;
app.set_cwd(&doc)?;
Ok(0)
}
}
/// Open a document.
#[derive(Debug, StructOpt)]
pub struct DrorgOpenOptions {
#[structopt(help = "A document specifier (name, ID, ...)")]
spec: String,
}
impl DrorgOpenOptions {
fn cli(self, app: &mut Application | }
match path_reprs.len() {
0 => tcprintln!(app.ps, [hl: "Path:"], (" [none??]")),
1 => tcprintln!(app.ps, [hl: "Path:"], (" {}", path_reprs[0])), | random_line_split |
main.rs | ()?;
let mut linkages = HashMap::new();
let results = app.get_docs().process(&self.spec)?; // note: avoid name clash with db table
let mut first = true;
for doc in results {
if first {
first = false;
} else {
tcprintln!(app.ps, (""));
}
tcprintln!(app.ps, [hl: "Name:"], (" "), [green: "{}", doc.name]);
tcprintln!(app.ps, [hl: "MIME-type:"], (" {}", doc.mime_type));
tcprintln!(app.ps, [hl: "Size:"], (" {}", doc.human_size().unwrap_or_else(|| "N/A".to_owned())));
tcprintln!(app.ps, [hl: "Modified:"], (" {}", doc.utc_mod_time().to_rfc3339()));
tcprintln!(app.ps, [hl: "ID:"], (" {}", doc.id));
tcprintln!(app.ps, [hl: "Starred?:"], (" {}", if doc.starred { "yes" } else { "no" }));
tcprintln!(app.ps, [hl: "Trashed?:"], (" {}", if doc.trashed { "yes" } else { "no" }));
let accounts = doc.accounts(app)?;
let mut path_reprs = Vec::new();
for acct in &accounts {
if let Entry::Vacant(e) = linkages.entry(acct.id) {
let table = app.load_linkage_table(acct.id, true)?;
e.insert(table);
}
let link_table = linkages.get(&acct.id).unwrap();
for p in link_table.find_parent_paths(&doc.id).iter().map(|id_path| {
// This is not efficient, and it's panicky, but meh.
let names: Vec<_> = id_path
.iter()
.map(|docid| {
use schema::docs::dsl::*;
let elem = docs
.filter(id.eq(&docid))
.first::<database::Doc>(&app.conn)
.unwrap();
elem.name
})
.collect();
names.join(" > ")
}) {
path_reprs.push(format!("{}: {}", acct.email, p));
}
}
match path_reprs.len() {
0 => tcprintln!(app.ps, [hl: "Path:"], (" [none??]")),
1 => tcprintln!(app.ps, [hl: "Path:"], (" {}", path_reprs[0])),
_n => {
tcprintln!(app.ps, [hl: "Paths::"]);
for p in path_reprs {
tcprintln!(app.ps, (" {}", p));
}
}
}
tcprintln!(app.ps, [hl: "Open-URL:"], (" {}", doc.open_url()));
}
Ok(0)
}
}
/// List documents.
#[derive(Debug, StructOpt)]
pub struct DrorgListOptions {
#[structopt(help = "A document specifier (name, ID, ...)", required_unless = "all")]
spec: Option<String>,
#[structopt(
long = "all",
help = "List all documents in the database",
conflicts_with = "spec"
)]
all: bool,
}
impl DrorgListOptions {
fn cli(self, app: &mut Application) -> Result<i32> {
app.maybe_sync_all_accounts()?;
let results = if self.all {
app.get_docs().all()
} else {
app.get_docs().process(&self.spec.unwrap())
}?;
app.print_doc_list(results)?;
Ok(0)
}
}
/// The command-line action to add a login to the credentials DB.
///
/// Note that "email" doesn't really have to be an email address -- it can be
/// any random string; the user chooses which account to login-to
/// interactively during the login process. But I think it makes sense from a
/// UI perspective to just call it "email" and let the user figure out for
/// themselves that they can give it some other value if they feel like it.
#[derive(Debug, StructOpt)]
pub struct DrorgLoginOptions {}
impl DrorgLoginOptions {
/// The auth flow here will print out a message on the console, asking the
/// user to go to a URL, following instructions, and paste a string back
/// into the client.
///
/// We want to allow the user to login to multiple accounts
/// simultaneously. Therefore we set up the authenticator flow with a null
/// storage, and then add the resulting token to the disk storage.
fn cli(self, app: &mut Application) -> Result<i32> {
let mut account = accounts::Account::default();
// First we need to get authorization.
account.authorize_interactively(&app.secret)?;
// Now, for bookkeeping, we look up the email address associated with
// it. We could just have the user specify an identifier, but I went
// to the trouble to figure out how to do this right, so ...
let email_addr = account.fetch_email_address(&app.secret)?;
tcprintln!(app.ps, ("Successfully logged in to "), [hl: "{}", email_addr], ("."));
// We might need to add this account to the database. To have sensible
// foreign key relations, the email address is not the primary key of
// the accounts table, so we need to see whether there's already an
// existing row for this account (which could happen if the user
// re-logs-in, etc.) If we add a new row, we have to do this awkward
// bit where we insert and then immediately query for the row we just
// added (cf https://github.com/diesel-rs/diesel/issues/771 ).
{
use diesel::prelude::*;
use schema::accounts::dsl::*;
let maybe_row = accounts
.filter(email.eq(&email_addr))
.first::<database::Account>(&app.conn)
.optional()?;
let row_id = if let Some(row) = maybe_row {
row.id
} else {
let new_account = database::NewAccount::new(&email_addr);
diesel::replace_into(accounts)
.values(&new_account)
.execute(&app.conn)?;
let row = accounts
.filter(email.eq(&email_addr))
.first::<database::Account>(&app.conn)?;
row.id
};
account.data.db_id = row_id;
// JSON will be rewritten in acquire_change_page_token below.
}
// Initialize our token for checking for changes to the documents. We
// do this *before* scanning the complete listing; there's going to be
// a race condition either way, but the one that happens with this
// ordering seems like it would be more benign.
account.acquire_change_page_token(&app.secret)?;
// OK, now actually slurp in the list of documents.
tcprintln!(app.ps, ("Scanning documents ..."));
app.import_documents(&mut account)?;
// All done.
tcprintln!(app.ps, ("Done."));
Ok(0)
}
}
/// List the files in a folder.
///
/// TODO: this name is going to be super confusing compared to `list`.
#[derive(Debug, StructOpt)]
pub struct DrorgLsOptions {
#[structopt(help = "A folder specifier (name, ID, ...)")]
spec: String,
}
impl DrorgLsOptions {
fn cli(self, app: &mut Application) -> Result<i32> {
use std::collections::HashSet;
app.maybe_sync_all_accounts()?;
let doc = app.get_docs().process_one(self.spec)?;
// We *could* just proceed and see if there's anything that Drive
// thinks is a child of this doc, but it seems like the more sensible
// UX is to make this a hard failure. You could imagine adding a CLI
// option to override this behavior.
if !doc.is_folder() {
return Err(format_err!("the selected document is not a folder"));
}
// This is another operation which can be surprising when you think
// about the behavior when a doc belongs to more than one account. We
// find children for each account separately and merge the results.
let accounts = doc.accounts(app)?;
let mut child_ids = HashSet::new();
if accounts.len() > 1 {
tcreport!(app.ps, warning: "folder belongs to multiple accounts; \
their listings will be merged");
}
for acct in &accounts {
let table = app.load_linkage_table(acct.id, false)?;
let node = match table.nodes.get(&doc.id) {
Some(n) => *n,
None => continue,
};
for child_idx in table.graph.neighbors(node) {
child_ids.insert(table.graph[child_idx].clone());
}
}
// Is this the best ordering?
let mut docs = app.ids_to_docs(&child_ids);
docs.sort_by_key(|d| d.utc_mod_time());
docs.reverse();
app.print_doc_list(docs)?;
app.set_cwd(&doc)?;
Ok(0)
}
}
/// Open a document.
#[derive(Debug, StructOpt)]
pub struct DrorgOpenOptions {
#[structopt(help = "A document specifier (name, ID, ...)")]
spec: String,
}
impl DrorgOpenOptions {
fn | (self, app: &mut | cli | identifier_name |
finality.rs | blank finality checker under the given validator set.
pub fn blank(signers: Vec<PublicKey>) -> Self {
RollingFinality {
headers: VecDeque::new(),
signers: SimpleList::new(signers),
sign_count: HashMap::new(),
last_pushed: None,
}
}
pub fn add_signer(&mut self, signer: PublicKey) {
self.signers.add(signer)
}
pub fn remove_signer(&mut self, signer: &u64) {
self.signers.remove_by_id(signer)
}
/// Extract unfinalized subchain from ancestry iterator.
/// Clears the current subchain.
///
/// Fails if any provided signature isn't part of the signers set.
pub fn build_ancestry_subchain<I>(&mut self, iterable: I) -> Result<(), UnknownValidator>
where I: IntoIterator<Item=(H256, Vec<u64>)>
{
self.clear();
for (hash, signers) in iterable {
self.check_signers(&signers)?;
if self.last_pushed.is_none() { self.last_pushed = Some(hash.clone()) }
// break when we've got our first finalized block.
{
let current_signed = self.sign_count.len();
let new_signers = signers.iter().filter(|s| !self.sign_count.contains_key(s)).count();
let would_be_finalized = (current_signed + new_signers) * 2 > self.signers.len();
if would_be_finalized {
trace!(target: "finality", "Encountered already finalized block {:?}", hash.clone());
break
}
for signer in signers.iter() {
*self.sign_count.entry(signer.clone()).or_insert(0) += 1;
}
}
self.headers.push_front((hash, signers));
}
trace!(target: "finality", "Rolling finality state: {:?}", self.headers);
Ok(())
}
/// Clear the finality status, but keeps the validator set.
pub fn clear(&mut self) {
self.headers.clear();
self.sign_count.clear();
self.last_pushed = None;
}
/// Returns the last pushed hash.
pub fn subchain_head(&self) -> Option<H256> {
self.last_pushed.clone()
}
/// Get an iterator over stored hashes in order.
#[cfg(test)]
pub fn unfinalized_hashes(&self) -> impl Iterator<Item=&H256> {
self.headers.iter().map(|(h, _)| h)
}
pub fn save(&self, file_name: &str) -> Result<(), std::io::Error> {
let mut file_info = File::create(file_name)?;
let data = self.serialize_info();
file_info.write_all(&data)?;
file_info.flush()?;
Ok(())
}
pub fn load(&mut self, file_name: &str) -> Result<(), std::io::Error> {
if Path::new(file_name).exists() {
let mut file_info = File::open(file_name)?;
let mut data = Vec::new();
file_info.read_to_end(&mut data)?;
self.deserialize_info(data)?;
}
Ok(())
}
/// serialize block hashes info
pub fn serialize_info(&self) -> Vec<u8> {
let mut buf = Vec::new();
//serialize sign_count
let len = self.sign_count.len();
buf.extend_from_slice(&(len as u32).to_le_bytes());
for (sign, count) in self.sign_count.iter() {
buf.extend_from_slice(&(*sign as u64).to_le_bytes());
buf.extend_from_slice(&(*count as u64).to_le_bytes());
}
//serialize headers
let len = self.headers.len();
buf.extend_from_slice(&(len as u32).to_le_bytes());
for h in self.headers.iter() {
let (hash, validators) = h.clone();
buf.append(&mut hash.0.to_vec());
let keys_count = validators.len();
buf.extend_from_slice(&(keys_count as u32).to_le_bytes());
for v in validators.iter() {
buf.extend_from_slice(&(*v as u64).to_le_bytes());
}
}
buf
}
/// deserialize block hashes info
pub fn deserialize_info(&mut self, data: Vec<u8>) -> Result<(), std::io::Error> {
let mut rdr = Cursor::new(data);
// deserialize sing_count
let len = rdr.read_le_u32()?;
for _ in 0..len {
let sign = rdr.read_le_u64()?;
let count = rdr.read_le_u64()? as usize;
self.sign_count.insert(sign, count);
}
// deserialize headers
let len = rdr.read_le_u32()?;
for _ in 0..len {
let hash = rdr.read_u256()?;
let keys_count = rdr.read_le_u32()?;
let mut keys: Vec<u64> = vec![];
for _ in 0..keys_count {
keys.push(rdr.read_le_u64()?);
}
self.headers.push_back((H256(hash), keys));
}
Ok(())
}
/// Get the validator set.
pub fn validators(&self) -> &SimpleList { &self.signers }
/// Remove last validator from list
pub fn remove_last(&mut self) -> Option<(H256, Vec<u64>)> |
/// Push a hash onto the rolling finality checker (implying `subchain_head` == head.parent)
///
/// Fails if `signer` isn't a member of the active validator set.
/// Returns a list of all newly finalized headers.
// TODO: optimize with smallvec.
pub fn push_hash(&mut self, head: H256, signers: Vec<u64>) -> Result<Vec<H256>, UnknownValidator> {
self.check_signers(&signers)?;
for signer in signers.iter() {
*self.sign_count.entry(signer.clone()).or_insert(0) += 1;
}
self.headers.push_back((head.clone(), signers));
let mut newly_finalized = Vec::new();
while self.sign_count.len() * 2 > self.signers.len() {
let (hash, signers) = self.headers.pop_front()
.expect("headers length always greater than sign count length; qed");
newly_finalized.push(hash);
for signer in signers {
match self.sign_count.entry(signer) {
Entry::Occupied(mut entry) => {
// decrement count for this signer and purge on zero.
*entry.get_mut() -= 1;
if *entry.get() == 0 {
entry.remove();
}
}
Entry::Vacant(_) => panic!("all hashes in `header` should have entries in `sign_count` for their signers; qed"),
}
}
}
trace!(target: "finality", "Blocks finalized by {:?}: {:?}", head, newly_finalized);
self.last_pushed = Some(head);
Ok(newly_finalized)
}
fn check_signers(&self, signers: &Vec<u64>) -> Result<(), UnknownValidator> {
for s in signers.iter() {
if !self.signers.contains_id(s) {
return Err(UnknownValidator)
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use ed25519_dalek::PublicKey;
use std::fs;
use std::path::Path;
use ton_block::id_from_key;
use super::RollingFinality;
use engines::authority_round::subst::{H256};
#[test]
fn test_serialation() {
let vec = (0..7).map(|_| {
let pvt_key = ed25519_dalek::SecretKey::generate(&mut rand::thread_rng());
ed25519_dalek::PublicKey::from(&pvt_key)
}).collect::<Vec<ed25519_dalek::PublicKey>>();
let mut bytes = [0u8; 8];
bytes.copy_from_slice(&vec[0].as_bytes()[0..8]);
let v1 = u64::from_be_bytes(bytes);
bytes.copy_from_slice(&vec[1].as_bytes()[0..8]);
let v2 = u64::from_be_bytes(bytes);
bytes.copy_from_slice(&vec[2].as_bytes()[0..8]);
let v3 = u64::from_be_bytes(bytes);
let mut rf = RollingFinality::blank(vec);
rf.push_hash(H256([0;32]), vec![v1]).unwrap();
rf.push_hash(H256([1;32]), vec![v2]).unwrap();
rf.push_hash(H256([2;32]), vec![v1]).unwrap();
rf.push_hash(H256([4;32]), vec![v3]).unwrap();
rf.push_hash(H256([5;32]), vec![v3]).unwrap();
let data = rf.serialize_info();
println!("{:?}", data);
let mut rf2 = RollingFinality::blank(vec![]);
| {
self.headers.pop_back()
} | identifier_body |
finality.rs | ers set.
pub fn build_ancestry_subchain<I>(&mut self, iterable: I) -> Result<(), UnknownValidator>
where I: IntoIterator<Item=(H256, Vec<u64>)>
{
self.clear();
for (hash, signers) in iterable {
self.check_signers(&signers)?;
if self.last_pushed.is_none() { self.last_pushed = Some(hash.clone()) }
// break when we've got our first finalized block.
{
let current_signed = self.sign_count.len();
let new_signers = signers.iter().filter(|s| !self.sign_count.contains_key(s)).count();
let would_be_finalized = (current_signed + new_signers) * 2 > self.signers.len();
if would_be_finalized {
trace!(target: "finality", "Encountered already finalized block {:?}", hash.clone());
break
}
for signer in signers.iter() {
*self.sign_count.entry(signer.clone()).or_insert(0) += 1;
}
}
self.headers.push_front((hash, signers));
}
trace!(target: "finality", "Rolling finality state: {:?}", self.headers);
Ok(())
}
/// Clear the finality status, but keeps the validator set.
pub fn clear(&mut self) {
self.headers.clear();
self.sign_count.clear();
self.last_pushed = None;
}
/// Returns the last pushed hash.
pub fn subchain_head(&self) -> Option<H256> {
self.last_pushed.clone()
}
/// Get an iterator over stored hashes in order.
#[cfg(test)]
pub fn unfinalized_hashes(&self) -> impl Iterator<Item=&H256> {
self.headers.iter().map(|(h, _)| h)
}
pub fn save(&self, file_name: &str) -> Result<(), std::io::Error> {
let mut file_info = File::create(file_name)?;
let data = self.serialize_info();
file_info.write_all(&data)?;
file_info.flush()?;
Ok(())
}
pub fn load(&mut self, file_name: &str) -> Result<(), std::io::Error> {
if Path::new(file_name).exists() {
let mut file_info = File::open(file_name)?;
let mut data = Vec::new();
file_info.read_to_end(&mut data)?;
self.deserialize_info(data)?;
}
Ok(())
}
/// serialize block hashes info
pub fn serialize_info(&self) -> Vec<u8> {
let mut buf = Vec::new();
//serialize sign_count
let len = self.sign_count.len();
buf.extend_from_slice(&(len as u32).to_le_bytes());
for (sign, count) in self.sign_count.iter() {
buf.extend_from_slice(&(*sign as u64).to_le_bytes());
buf.extend_from_slice(&(*count as u64).to_le_bytes());
}
//serialize headers
let len = self.headers.len();
buf.extend_from_slice(&(len as u32).to_le_bytes());
for h in self.headers.iter() {
let (hash, validators) = h.clone();
buf.append(&mut hash.0.to_vec());
let keys_count = validators.len();
buf.extend_from_slice(&(keys_count as u32).to_le_bytes());
for v in validators.iter() {
buf.extend_from_slice(&(*v as u64).to_le_bytes());
}
}
buf
}
/// deserialize block hashes info
pub fn deserialize_info(&mut self, data: Vec<u8>) -> Result<(), std::io::Error> {
let mut rdr = Cursor::new(data);
// deserialize sing_count
let len = rdr.read_le_u32()?;
for _ in 0..len {
let sign = rdr.read_le_u64()?;
let count = rdr.read_le_u64()? as usize;
self.sign_count.insert(sign, count);
}
// deserialize headers
let len = rdr.read_le_u32()?;
for _ in 0..len {
let hash = rdr.read_u256()?;
let keys_count = rdr.read_le_u32()?;
let mut keys: Vec<u64> = vec![];
for _ in 0..keys_count {
keys.push(rdr.read_le_u64()?);
}
self.headers.push_back((H256(hash), keys));
}
Ok(())
}
/// Get the validator set.
pub fn validators(&self) -> &SimpleList { &self.signers }
/// Remove last validator from list
pub fn remove_last(&mut self) -> Option<(H256, Vec<u64>)> {
self.headers.pop_back()
}
/// Push a hash onto the rolling finality checker (implying `subchain_head` == head.parent)
///
/// Fails if `signer` isn't a member of the active validator set.
/// Returns a list of all newly finalized headers.
// TODO: optimize with smallvec.
pub fn push_hash(&mut self, head: H256, signers: Vec<u64>) -> Result<Vec<H256>, UnknownValidator> {
self.check_signers(&signers)?;
for signer in signers.iter() {
*self.sign_count.entry(signer.clone()).or_insert(0) += 1;
}
self.headers.push_back((head.clone(), signers));
let mut newly_finalized = Vec::new();
while self.sign_count.len() * 2 > self.signers.len() {
let (hash, signers) = self.headers.pop_front()
.expect("headers length always greater than sign count length; qed");
newly_finalized.push(hash);
for signer in signers {
match self.sign_count.entry(signer) {
Entry::Occupied(mut entry) => {
// decrement count for this signer and purge on zero.
*entry.get_mut() -= 1;
if *entry.get() == 0 {
entry.remove();
}
}
Entry::Vacant(_) => panic!("all hashes in `header` should have entries in `sign_count` for their signers; qed"),
}
}
}
trace!(target: "finality", "Blocks finalized by {:?}: {:?}", head, newly_finalized);
self.last_pushed = Some(head);
Ok(newly_finalized)
}
fn check_signers(&self, signers: &Vec<u64>) -> Result<(), UnknownValidator> {
for s in signers.iter() {
if !self.signers.contains_id(s) {
return Err(UnknownValidator)
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use ed25519_dalek::PublicKey;
use std::fs;
use std::path::Path;
use ton_block::id_from_key;
use super::RollingFinality;
use engines::authority_round::subst::{H256};
#[test]
fn test_serialation() {
let vec = (0..7).map(|_| {
let pvt_key = ed25519_dalek::SecretKey::generate(&mut rand::thread_rng());
ed25519_dalek::PublicKey::from(&pvt_key)
}).collect::<Vec<ed25519_dalek::PublicKey>>();
let mut bytes = [0u8; 8];
bytes.copy_from_slice(&vec[0].as_bytes()[0..8]);
let v1 = u64::from_be_bytes(bytes);
bytes.copy_from_slice(&vec[1].as_bytes()[0..8]);
let v2 = u64::from_be_bytes(bytes);
bytes.copy_from_slice(&vec[2].as_bytes()[0..8]);
let v3 = u64::from_be_bytes(bytes);
let mut rf = RollingFinality::blank(vec);
rf.push_hash(H256([0;32]), vec![v1]).unwrap();
rf.push_hash(H256([1;32]), vec![v2]).unwrap();
rf.push_hash(H256([2;32]), vec![v1]).unwrap();
rf.push_hash(H256([4;32]), vec![v3]).unwrap();
rf.push_hash(H256([5;32]), vec![v3]).unwrap();
let data = rf.serialize_info();
println!("{:?}", data);
let mut rf2 = RollingFinality::blank(vec![]);
rf2.deserialize_info(data).unwrap();
assert_eq!(rf.headers, rf2.headers);
}
fn get_keys(n: usize) -> (Vec<PublicKey>, Vec<u64>) {
let mut keys = Vec::new();
let mut kids = Vec::new();
for i in 0..n {
let name = format!("../config/pub{:02}", i+1);
let data = fs::read(Path::new(&name))
.expect(&format!("Error reading key file {}", name));
let key = PublicKey::from_bytes(&data).unwrap();
kids.push(id_from_key(&key));
keys.push(key);
}
(keys, kids)
}
#[test]
fn | rejects_unknown_signers | identifier_name |
|
finality.rs | RollingFinality {
headers: VecDeque::new(),
signers: SimpleList::new(signers),
sign_count: HashMap::new(),
last_pushed: None,
}
}
pub fn add_signer(&mut self, signer: PublicKey) {
self.signers.add(signer)
}
pub fn remove_signer(&mut self, signer: &u64) {
self.signers.remove_by_id(signer)
}
/// Extract unfinalized subchain from ancestry iterator.
/// Clears the current subchain.
///
/// Fails if any provided signature isn't part of the signers set.
pub fn build_ancestry_subchain<I>(&mut self, iterable: I) -> Result<(), UnknownValidator>
where I: IntoIterator<Item=(H256, Vec<u64>)>
{
self.clear();
for (hash, signers) in iterable {
self.check_signers(&signers)?;
if self.last_pushed.is_none() { self.last_pushed = Some(hash.clone()) }
// break when we've got our first finalized block.
{
let current_signed = self.sign_count.len();
let new_signers = signers.iter().filter(|s| !self.sign_count.contains_key(s)).count();
let would_be_finalized = (current_signed + new_signers) * 2 > self.signers.len();
if would_be_finalized {
trace!(target: "finality", "Encountered already finalized block {:?}", hash.clone());
break
}
for signer in signers.iter() {
*self.sign_count.entry(signer.clone()).or_insert(0) += 1;
}
}
self.headers.push_front((hash, signers));
}
trace!(target: "finality", "Rolling finality state: {:?}", self.headers);
Ok(())
}
/// Clear the finality status, but keeps the validator set.
pub fn clear(&mut self) {
self.headers.clear();
self.sign_count.clear();
self.last_pushed = None;
}
/// Returns the last pushed hash.
pub fn subchain_head(&self) -> Option<H256> {
self.last_pushed.clone()
}
/// Get an iterator over stored hashes in order.
#[cfg(test)]
pub fn unfinalized_hashes(&self) -> impl Iterator<Item=&H256> {
self.headers.iter().map(|(h, _)| h)
}
pub fn save(&self, file_name: &str) -> Result<(), std::io::Error> {
let mut file_info = File::create(file_name)?;
let data = self.serialize_info();
file_info.write_all(&data)?;
file_info.flush()?;
Ok(())
}
pub fn load(&mut self, file_name: &str) -> Result<(), std::io::Error> {
if Path::new(file_name).exists() {
let mut file_info = File::open(file_name)?;
let mut data = Vec::new();
file_info.read_to_end(&mut data)?;
self.deserialize_info(data)?;
}
Ok(())
}
/// serialize block hashes info
pub fn serialize_info(&self) -> Vec<u8> {
let mut buf = Vec::new();
//serialize sign_count
let len = self.sign_count.len();
buf.extend_from_slice(&(len as u32).to_le_bytes());
for (sign, count) in self.sign_count.iter() {
buf.extend_from_slice(&(*sign as u64).to_le_bytes());
buf.extend_from_slice(&(*count as u64).to_le_bytes());
}
//serialize headers
let len = self.headers.len();
buf.extend_from_slice(&(len as u32).to_le_bytes());
for h in self.headers.iter() {
let (hash, validators) = h.clone();
buf.append(&mut hash.0.to_vec());
let keys_count = validators.len();
buf.extend_from_slice(&(keys_count as u32).to_le_bytes());
for v in validators.iter() {
buf.extend_from_slice(&(*v as u64).to_le_bytes());
}
}
buf
}
/// deserialize block hashes info
pub fn deserialize_info(&mut self, data: Vec<u8>) -> Result<(), std::io::Error> {
let mut rdr = Cursor::new(data);
// deserialize sing_count
let len = rdr.read_le_u32()?;
for _ in 0..len {
let sign = rdr.read_le_u64()?;
let count = rdr.read_le_u64()? as usize;
self.sign_count.insert(sign, count);
}
// deserialize headers
let len = rdr.read_le_u32()?;
for _ in 0..len {
let hash = rdr.read_u256()?;
let keys_count = rdr.read_le_u32()?;
let mut keys: Vec<u64> = vec![];
for _ in 0..keys_count {
keys.push(rdr.read_le_u64()?);
}
self.headers.push_back((H256(hash), keys));
}
Ok(())
}
/// Get the validator set.
pub fn validators(&self) -> &SimpleList { &self.signers }
/// Remove last validator from list
pub fn remove_last(&mut self) -> Option<(H256, Vec<u64>)> {
self.headers.pop_back()
}
/// Push a hash onto the rolling finality checker (implying `subchain_head` == head.parent)
///
/// Fails if `signer` isn't a member of the active validator set.
/// Returns a list of all newly finalized headers.
// TODO: optimize with smallvec.
pub fn push_hash(&mut self, head: H256, signers: Vec<u64>) -> Result<Vec<H256>, UnknownValidator> {
self.check_signers(&signers)?;
for signer in signers.iter() {
*self.sign_count.entry(signer.clone()).or_insert(0) += 1;
}
self.headers.push_back((head.clone(), signers));
let mut newly_finalized = Vec::new();
while self.sign_count.len() * 2 > self.signers.len() {
let (hash, signers) = self.headers.pop_front()
.expect("headers length always greater than sign count length; qed");
newly_finalized.push(hash);
for signer in signers {
match self.sign_count.entry(signer) {
Entry::Occupied(mut entry) => {
// decrement count for this signer and purge on zero.
*entry.get_mut() -= 1;
if *entry.get() == 0 {
entry.remove();
}
}
Entry::Vacant(_) => panic!("all hashes in `header` should have entries in `sign_count` for their signers; qed"),
}
}
}
trace!(target: "finality", "Blocks finalized by {:?}: {:?}", head, newly_finalized);
self.last_pushed = Some(head);
Ok(newly_finalized)
}
fn check_signers(&self, signers: &Vec<u64>) -> Result<(), UnknownValidator> {
for s in signers.iter() {
if !self.signers.contains_id(s) {
return Err(UnknownValidator)
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use ed25519_dalek::PublicKey;
use std::fs;
use std::path::Path;
use ton_block::id_from_key;
use super::RollingFinality;
use engines::authority_round::subst::{H256};
#[test]
fn test_serialation() {
let vec = (0..7).map(|_| {
let pvt_key = ed25519_dalek::SecretKey::generate(&mut rand::thread_rng());
ed25519_dalek::PublicKey::from(&pvt_key)
}).collect::<Vec<ed25519_dalek::PublicKey>>();
let mut bytes = [0u8; 8];
bytes.copy_from_slice(&vec[0].as_bytes()[0..8]);
let v1 = u64::from_be_bytes(bytes);
bytes.copy_from_slice(&vec[1].as_bytes()[0..8]);
let v2 = u64::from_be_bytes(bytes);
bytes.copy_from_slice(&vec[2].as_bytes()[0..8]);
let v3 = u64::from_be_bytes(bytes);
let mut rf = RollingFinality::blank(vec);
rf.push_hash(H256([0;32]), vec![v1]).unwrap();
rf.push_hash(H256([1;32]), vec![v2]).unwrap();
rf.push_hash(H256([2;32]), vec![v1]).unwrap();
rf.push_hash(H256([4;32]), vec![v3]).unwrap();
rf.push_hash(H256([5;32]), vec![v3]).unwrap();
let data = rf.serialize_info();
println!("{:?}", data);
let mut rf2 = RollingFinality::blank(vec! | /// Create a blank finality checker under the given validator set.
pub fn blank(signers: Vec<PublicKey>) -> Self { | random_line_split |
|
finality.rs | blank finality checker under the given validator set.
pub fn blank(signers: Vec<PublicKey>) -> Self {
RollingFinality {
headers: VecDeque::new(),
signers: SimpleList::new(signers),
sign_count: HashMap::new(),
last_pushed: None,
}
}
pub fn add_signer(&mut self, signer: PublicKey) {
self.signers.add(signer)
}
pub fn remove_signer(&mut self, signer: &u64) {
self.signers.remove_by_id(signer)
}
/// Extract unfinalized subchain from ancestry iterator.
/// Clears the current subchain.
///
/// Fails if any provided signature isn't part of the signers set.
pub fn build_ancestry_subchain<I>(&mut self, iterable: I) -> Result<(), UnknownValidator>
where I: IntoIterator<Item=(H256, Vec<u64>)>
{
self.clear();
for (hash, signers) in iterable {
self.check_signers(&signers)?;
if self.last_pushed.is_none() { self.last_pushed = Some(hash.clone()) }
// break when we've got our first finalized block.
{
let current_signed = self.sign_count.len();
let new_signers = signers.iter().filter(|s| !self.sign_count.contains_key(s)).count();
let would_be_finalized = (current_signed + new_signers) * 2 > self.signers.len();
if would_be_finalized |
for signer in signers.iter() {
*self.sign_count.entry(signer.clone()).or_insert(0) += 1;
}
}
self.headers.push_front((hash, signers));
}
trace!(target: "finality", "Rolling finality state: {:?}", self.headers);
Ok(())
}
/// Clear the finality status, but keeps the validator set.
pub fn clear(&mut self) {
self.headers.clear();
self.sign_count.clear();
self.last_pushed = None;
}
/// Returns the last pushed hash.
pub fn subchain_head(&self) -> Option<H256> {
self.last_pushed.clone()
}
/// Get an iterator over stored hashes in order.
#[cfg(test)]
pub fn unfinalized_hashes(&self) -> impl Iterator<Item=&H256> {
self.headers.iter().map(|(h, _)| h)
}
pub fn save(&self, file_name: &str) -> Result<(), std::io::Error> {
let mut file_info = File::create(file_name)?;
let data = self.serialize_info();
file_info.write_all(&data)?;
file_info.flush()?;
Ok(())
}
pub fn load(&mut self, file_name: &str) -> Result<(), std::io::Error> {
if Path::new(file_name).exists() {
let mut file_info = File::open(file_name)?;
let mut data = Vec::new();
file_info.read_to_end(&mut data)?;
self.deserialize_info(data)?;
}
Ok(())
}
/// serialize block hashes info
pub fn serialize_info(&self) -> Vec<u8> {
let mut buf = Vec::new();
//serialize sign_count
let len = self.sign_count.len();
buf.extend_from_slice(&(len as u32).to_le_bytes());
for (sign, count) in self.sign_count.iter() {
buf.extend_from_slice(&(*sign as u64).to_le_bytes());
buf.extend_from_slice(&(*count as u64).to_le_bytes());
}
//serialize headers
let len = self.headers.len();
buf.extend_from_slice(&(len as u32).to_le_bytes());
for h in self.headers.iter() {
let (hash, validators) = h.clone();
buf.append(&mut hash.0.to_vec());
let keys_count = validators.len();
buf.extend_from_slice(&(keys_count as u32).to_le_bytes());
for v in validators.iter() {
buf.extend_from_slice(&(*v as u64).to_le_bytes());
}
}
buf
}
/// deserialize block hashes info
pub fn deserialize_info(&mut self, data: Vec<u8>) -> Result<(), std::io::Error> {
let mut rdr = Cursor::new(data);
// deserialize sing_count
let len = rdr.read_le_u32()?;
for _ in 0..len {
let sign = rdr.read_le_u64()?;
let count = rdr.read_le_u64()? as usize;
self.sign_count.insert(sign, count);
}
// deserialize headers
let len = rdr.read_le_u32()?;
for _ in 0..len {
let hash = rdr.read_u256()?;
let keys_count = rdr.read_le_u32()?;
let mut keys: Vec<u64> = vec![];
for _ in 0..keys_count {
keys.push(rdr.read_le_u64()?);
}
self.headers.push_back((H256(hash), keys));
}
Ok(())
}
/// Get the validator set.
pub fn validators(&self) -> &SimpleList { &self.signers }
/// Remove last validator from list
pub fn remove_last(&mut self) -> Option<(H256, Vec<u64>)> {
self.headers.pop_back()
}
/// Push a hash onto the rolling finality checker (implying `subchain_head` == head.parent)
///
/// Fails if `signer` isn't a member of the active validator set.
/// Returns a list of all newly finalized headers.
// TODO: optimize with smallvec.
pub fn push_hash(&mut self, head: H256, signers: Vec<u64>) -> Result<Vec<H256>, UnknownValidator> {
self.check_signers(&signers)?;
for signer in signers.iter() {
*self.sign_count.entry(signer.clone()).or_insert(0) += 1;
}
self.headers.push_back((head.clone(), signers));
let mut newly_finalized = Vec::new();
while self.sign_count.len() * 2 > self.signers.len() {
let (hash, signers) = self.headers.pop_front()
.expect("headers length always greater than sign count length; qed");
newly_finalized.push(hash);
for signer in signers {
match self.sign_count.entry(signer) {
Entry::Occupied(mut entry) => {
// decrement count for this signer and purge on zero.
*entry.get_mut() -= 1;
if *entry.get() == 0 {
entry.remove();
}
}
Entry::Vacant(_) => panic!("all hashes in `header` should have entries in `sign_count` for their signers; qed"),
}
}
}
trace!(target: "finality", "Blocks finalized by {:?}: {:?}", head, newly_finalized);
self.last_pushed = Some(head);
Ok(newly_finalized)
}
fn check_signers(&self, signers: &Vec<u64>) -> Result<(), UnknownValidator> {
for s in signers.iter() {
if !self.signers.contains_id(s) {
return Err(UnknownValidator)
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use ed25519_dalek::PublicKey;
use std::fs;
use std::path::Path;
use ton_block::id_from_key;
use super::RollingFinality;
use engines::authority_round::subst::{H256};
#[test]
fn test_serialation() {
let vec = (0..7).map(|_| {
let pvt_key = ed25519_dalek::SecretKey::generate(&mut rand::thread_rng());
ed25519_dalek::PublicKey::from(&pvt_key)
}).collect::<Vec<ed25519_dalek::PublicKey>>();
let mut bytes = [0u8; 8];
bytes.copy_from_slice(&vec[0].as_bytes()[0..8]);
let v1 = u64::from_be_bytes(bytes);
bytes.copy_from_slice(&vec[1].as_bytes()[0..8]);
let v2 = u64::from_be_bytes(bytes);
bytes.copy_from_slice(&vec[2].as_bytes()[0..8]);
let v3 = u64::from_be_bytes(bytes);
let mut rf = RollingFinality::blank(vec);
rf.push_hash(H256([0;32]), vec![v1]).unwrap();
rf.push_hash(H256([1;32]), vec![v2]).unwrap();
rf.push_hash(H256([2;32]), vec![v1]).unwrap();
rf.push_hash(H256([4;32]), vec![v3]).unwrap();
rf.push_hash(H256([5;32]), vec![v3]).unwrap();
let data = rf.serialize_info();
println!("{:?}", data);
let mut rf2 = RollingFinality::blank(vec![]);
| {
trace!(target: "finality", "Encountered already finalized block {:?}", hash.clone());
break
} | conditional_block |
installerTools.js | the given
// searchKey. Start in the directory given in path and
// search each sub directory.
// All matches are added to the array listPaths.
var listFiles = fs.readdirSync(path);
for (var indx =0; indx < listFiles.length; indx++) {
var fileName = listFiles[indx];
// don't include .xxx files
if (fileName.indexOf(".") == 0) {
continue;
}
var filePath = path + '/' + fileName;
// don't repeat files already added
if (listPaths.indexOf(filePath) > -1) {
continue;
}
// if this file matches the search pattern,
if (fileName.indexOf(searchKey) > -1) {
// add to listPaths
listPaths.push(filePath );
} else {
// if this is a directory
var stats = fs.statSync(filePath);
if (stats.isDirectory()) {
// scan this directory too
recursiveFileScan( filePath, listPaths, searchKey);
}
}
}
};
exports.recursiveFileScan = recursiveFileScan;
/**
* @function findSystems
*
* Searches the appDev framework tree to find modules, themes, and widgets.
*
* Asynchronous.
*
* @param {Function} callback
* callback(`allsystems`, `modules`, `themes`, `widgets`)
* where each of the three parameters is an array of objects.
*/
var findSystems = function(callback)
{
var db = AD.Model.Datastore.DB;
var dbName = AD.Defaults.dbName;
var results = {
module: [],
theme: [],
widget: []
}
var find = function(type, next) {
// look for all subdirectories
var systemDir = fs.readdirSync(getPaths()[type]);
for (var i=0; i<systemDir.length; i++) {
var systemName = systemDir[i];
var systemPath = getPaths()[type]+'/'+systemName;
// skip .xxxx entries
if (systemName.indexOf('.') === 0) {
continue;
}
// skip non-directories
var stat = fs.statSync(systemPath);
if (!stat.isDirectory()) {
continue;
}
results[type].push({
'type': type,
'name': systemName,
'path': systemPath,
'installed': 0
});
}
// check if it is already installed
var sql = "SELECT * FROM " + dbName + ".`site_system` WHERE `system_type` = ?";
db.query(sql, [type], function(err, values, fields) {
if (err) {
// If we are doing an initial install, we'll probably get
// an error here.
console.error( err );
next();
return;
}
for (var i=0; i<values.length; i++) {
var systemName = values[i]['system_name'];
// Compare each DB result with what we found in the directory
for (j=0; j<results[type].length; j++) {
// Mark this one as installed.
if (results[type][j]['name'] == systemName) {
results[type][j]['installed'] = 1;
}
}
}
next();
});
}
// call find() for each system type
async.forEach(['module', 'theme', 'widget'], find, function() {
// create the combined result
var combinedResults = results['module'].concat(results['theme'], results['widget']);
// return the results in a callback
callback(
combinedResults,
results['module'],
results['theme'],
results['widget']
);
});
}
exports.findSystems = findSystems;
/**
* @function installSQL
*
* Looks for any "setup_xxxx.sql" files in a module's directory and installs
* them.
*
* Asynchronous.
*
* @param {String} basePath
* @param {Function} callback
*/
var installSQL = function(basePath, callback)
{
var db = require(__appdevPath+'/install/db/db_'+AD.Defaults.dataStoreMethod+'.js');
var sqlPaths = [];
var fileName = 'setup_' + AD.Defaults.dataStoreMethod+'.sql';
recursiveFileScan(basePath, sqlPaths, fileName);
if (sqlPaths.length == 0) {
callback();
return;
}
else {
console.log('');
console.log('Importing these SQL files:');
console.log(sqlPaths);
var dummyReq = {};
var dummyRes = {
send: function() {}
};
db.importSQL(dummyReq, dummyRes, callback, sqlPaths, AD.Defaults.dbName);
}
}
exports.installSQL = installSQL;
/**
* @function installLabels
*
* Looks for any .po files within a module's directory and installs them
* to the database.
*
* Asynchronous.
*
* @param {String} basePath
* @param {Function} callback
* @param {Boolean} [skipExisting]
* Optional. Specify TRUE to prevent existing labels from being updated.
*/
var installLabels = function(basePath, callback, skipExisting)
{
var db = require(__appdevPath+'/install/db/db_'+AD.Defaults.dataStoreMethod+'.js');
var poPaths = [];
var langList = [];
var poContent = '';
console.log('Installing labels within [' + basePath + ']');
// Define several asynchronous functions which we will later execute all
// in sequence.
var scanPoFiles = function(callback) {
// scan all the subdirectories for possible .po files
recursiveFileScan(basePath, poPaths, '.po');
callback();
}
var getSiteLanguages = function(callback) {
var db = AD.Model.Datastore.DB;
var sql = "SELECT language_code FROM " + AD.Defaults.dbName + ".site_multilingual_language";
db.query(sql, [], function(err, values, fields) {
if (err) console.error(err);
for (var i=0; i<values.length; i++) {
langList.push(values[i]['language_code']);
}
callback();
});
}
var readPoFiles = function(callback) {
async.forEachSeries(poPaths, function(path, innerCallback) {
for (i=0; i<langList.length; i++) {
// Only read .po files that follow the right naming convention
if (path.indexOf('labels_' + langList[i] + '.po') !== -1) { | else {
console.log('installLabel [' + path + ']');
poContent += '\n\n' + data;
}
innerCallback();
});
// only one possible language match per .po file
return;
}
}
// no language matches for this .po file
innerCallback();
return;
}, callback);
}
//// Ian's label import algorithm
var importPoContent = function(callback) {
var allcontentssplit = poContent.split(/\r?\n\s*\r?\n/);
var alllabels = [];
for (var i=0; i<allcontentssplit.length; i++)
{
var newstr = allcontentssplit[i].trim();
if (newstr != '') {
var iscomment = false;
var thepath = newstr.match(/path\: .*/) == null ? iscomment = true : newstr.match(/path\: .*/)[0].replace('path: ', '').trim() ;
var thecode = newstr.match(/code\: .*/) == null ? iscomment = true : newstr.match(/code\: .*/)[0].replace('code: ', '').trim() ;
var thekey = newstr.match(/key\: .*/) == null ? iscomment = true : newstr.match(/key\: .*/)[0].replace('key: ', '').trim() ;
var thestr = newstr.match(/(?:msgstr ")(.*)(?:"$)/) == null ? iscomment = true : newstr.match(/(?:msgstr ")(.*)(?:"$)/)[1].trim() ;
if (!iscomment)
{
// Add/Update the label
db.dbLabelUpdate(thepath, thecode, thekey, thestr, skipExisting);
alllabels.push({
'path': thepath,
'lang': thecode,
'key': thekey,
'text': thestr
});
}
}
}
// Populate any missing languge labels with placeholders
db.dbLabelExists(alllabels, callback);
}
// Execute the installLabels() stack
async.series([
scanPoFiles,
getSiteLanguages,
readPoFiles,
importPoContent
], callback);
}
exports.installLabels = installLabels;
/**
* @function initSystem
*
* Runs the init function stack(s) of a module/theme/widget, to perform any
* final installation steps. Then records it into the `site_system` DB table.
*
* In each module/theme/widget directory tree, an `initModule.js` script can be
* placed. This script should export an `initStack` array of functions. These
* functions will be called in sequence.
*
* Assumes a normal appDev runtime | fs.readFile(path, 'utf8', function(err, data) {
if (err) {
console.error(err);
} | random_line_split |
installerTools.js | the given
// searchKey. Start in the directory given in path and
// search each sub directory.
// All matches are added to the array listPaths.
var listFiles = fs.readdirSync(path);
for (var indx =0; indx < listFiles.length; indx++) {
var fileName = listFiles[indx];
// don't include .xxx files
if (fileName.indexOf(".") == 0) {
continue;
}
var filePath = path + '/' + fileName;
// don't repeat files already added
if (listPaths.indexOf(filePath) > -1) {
continue;
}
// if this file matches the search pattern,
if (fileName.indexOf(searchKey) > -1) {
// add to listPaths
listPaths.push(filePath );
} else {
// if this is a directory
var stats = fs.statSync(filePath);
if (stats.isDirectory()) {
// scan this directory too
recursiveFileScan( filePath, listPaths, searchKey);
}
}
}
};
exports.recursiveFileScan = recursiveFileScan;
/**
* @function findSystems
*
* Searches the appDev framework tree to find modules, themes, and widgets.
*
* Asynchronous.
*
* @param {Function} callback
* callback(`allsystems`, `modules`, `themes`, `widgets`)
* where each of the three parameters is an array of objects.
*/
var findSystems = function(callback)
{
var db = AD.Model.Datastore.DB;
var dbName = AD.Defaults.dbName;
var results = {
module: [],
theme: [],
widget: []
}
var find = function(type, next) {
// look for all subdirectories
var systemDir = fs.readdirSync(getPaths()[type]);
for (var i=0; i<systemDir.length; i++) {
var systemName = systemDir[i];
var systemPath = getPaths()[type]+'/'+systemName;
// skip .xxxx entries
if (systemName.indexOf('.') === 0) {
continue;
}
// skip non-directories
var stat = fs.statSync(systemPath);
if (!stat.isDirectory()) {
continue;
}
results[type].push({
'type': type,
'name': systemName,
'path': systemPath,
'installed': 0
});
}
// check if it is already installed
var sql = "SELECT * FROM " + dbName + ".`site_system` WHERE `system_type` = ?";
db.query(sql, [type], function(err, values, fields) {
if (err) {
// If we are doing an initial install, we'll probably get
// an error here.
console.error( err );
next();
return;
}
for (var i=0; i<values.length; i++) {
var systemName = values[i]['system_name'];
// Compare each DB result with what we found in the directory
for (j=0; j<results[type].length; j++) {
// Mark this one as installed.
if (results[type][j]['name'] == systemName) {
results[type][j]['installed'] = 1;
}
}
}
next();
});
}
// call find() for each system type
async.forEach(['module', 'theme', 'widget'], find, function() {
// create the combined result
var combinedResults = results['module'].concat(results['theme'], results['widget']);
// return the results in a callback
callback(
combinedResults,
results['module'],
results['theme'],
results['widget']
);
});
}
exports.findSystems = findSystems;
/**
* @function installSQL
*
* Looks for any "setup_xxxx.sql" files in a module's directory and installs
* them.
*
* Asynchronous.
*
* @param {String} basePath
* @param {Function} callback
*/
var installSQL = function(basePath, callback)
{
var db = require(__appdevPath+'/install/db/db_'+AD.Defaults.dataStoreMethod+'.js');
var sqlPaths = [];
var fileName = 'setup_' + AD.Defaults.dataStoreMethod+'.sql';
recursiveFileScan(basePath, sqlPaths, fileName);
if (sqlPaths.length == 0) {
callback();
return;
}
else {
console.log('');
console.log('Importing these SQL files:');
console.log(sqlPaths);
var dummyReq = {};
var dummyRes = {
send: function() {}
};
db.importSQL(dummyReq, dummyRes, callback, sqlPaths, AD.Defaults.dbName);
}
}
exports.installSQL = installSQL;
/**
* @function installLabels
*
* Looks for any .po files within a module's directory and installs them
* to the database.
*
* Asynchronous.
*
* @param {String} basePath
* @param {Function} callback
* @param {Boolean} [skipExisting]
* Optional. Specify TRUE to prevent existing labels from being updated.
*/
var installLabels = function(basePath, callback, skipExisting)
{
var db = require(__appdevPath+'/install/db/db_'+AD.Defaults.dataStoreMethod+'.js');
var poPaths = [];
var langList = [];
var poContent = '';
console.log('Installing labels within [' + basePath + ']');
// Define several asynchronous functions which we will later execute all
// in sequence.
var scanPoFiles = function(callback) {
// scan all the subdirectories for possible .po files
recursiveFileScan(basePath, poPaths, '.po');
callback();
}
var getSiteLanguages = function(callback) {
var db = AD.Model.Datastore.DB;
var sql = "SELECT language_code FROM " + AD.Defaults.dbName + ".site_multilingual_language";
db.query(sql, [], function(err, values, fields) {
if (err) console.error(err);
for (var i=0; i<values.length; i++) {
langList.push(values[i]['language_code']);
}
callback();
});
}
var readPoFiles = function(callback) {
async.forEachSeries(poPaths, function(path, innerCallback) {
for (i=0; i<langList.length; i++) |
// no language matches for this .po file
innerCallback();
return;
}, callback);
}
//// Ian's label import algorithm
var importPoContent = function(callback) {
var allcontentssplit = poContent.split(/\r?\n\s*\r?\n/);
var alllabels = [];
for (var i=0; i<allcontentssplit.length; i++)
{
var newstr = allcontentssplit[i].trim();
if (newstr != '') {
var iscomment = false;
var thepath = newstr.match(/path\: .*/) == null ? iscomment = true : newstr.match(/path\: .*/)[0].replace('path: ', '').trim() ;
var thecode = newstr.match(/code\: .*/) == null ? iscomment = true : newstr.match(/code\: .*/)[0].replace('code: ', '').trim() ;
var thekey = newstr.match(/key\: .*/) == null ? iscomment = true : newstr.match(/key\: .*/)[0].replace('key: ', '').trim() ;
var thestr = newstr.match(/(?:msgstr ")(.*)(?:"$)/) == null ? iscomment = true : newstr.match(/(?:msgstr ")(.*)(?:"$)/)[1].trim() ;
if (!iscomment)
{
// Add/Update the label
db.dbLabelUpdate(thepath, thecode, thekey, thestr, skipExisting);
alllabels.push({
'path': thepath,
'lang': thecode,
'key': thekey,
'text': thestr
});
}
}
}
// Populate any missing languge labels with placeholders
db.dbLabelExists(alllabels, callback);
}
// Execute the installLabels() stack
async.series([
scanPoFiles,
getSiteLanguages,
readPoFiles,
importPoContent
], callback);
}
exports.installLabels = installLabels;
/**
* @function initSystem
*
* Runs the init function stack(s) of a module/theme/widget, to perform any
* final installation steps. Then records it into the `site_system` DB table.
*
* In each module/theme/widget directory tree, an `initModule.js` script can be
* placed. This script should export an `initStack` array of functions. These
* functions will be called in sequence.
*
* Assumes a normal app | {
// Only read .po files that follow the right naming convention
if (path.indexOf('labels_' + langList[i] + '.po') !== -1) {
fs.readFile(path, 'utf8', function(err, data) {
if (err) {
console.error(err);
}
else {
console.log('installLabel [' + path + ']');
poContent += '\n\n' + data;
}
innerCallback();
});
// only one possible language match per .po file
return;
}
} | conditional_block |
main.js | (e){
e = e || window.event;
return e.target || e.srcElement; // Accommodate all browsers
}
/******************************
OBJECTS-ORIENTED VARIABLES
******************************/
/***** Navigation Object Literal *****/
var nav = {
// Show active nav item link, using green bar,
// on main navigation menu
activeNav: function(link) {
$("#nav ul").find("li").each(function(){
$(this).find("span").removeClass("active");
});
link.find("span").addClass("active");
}
};
/***** Notification Object *****/
var notify = {
// Notification messages
messageList: [{
notification: "You have not verified your account.", note: "warning", message: "<h3>You have not verified your account</h3><p>A confimation request was sent to your email. Please confirm your account by clicking the link provided.</p> <p>If you have any questions, please contact us at <a href='http://www.w3schools.com/tags/tag_html.asp' target='_blank'>http://yourapp.com/acconts</a>.</p>"
},
{
notification: "Your ad has been approved and is ready for publication.", note: "marketing", message: "<h3>Congratulations</h3><p>Your ad has been approved. Visit <a href='http://www.w3schools.com/tags/tag_html.asp' target='_blank'>http://yourapp.com/ads</a> for more information.</p>"
},
{
notification: "Invite your friends to use YourApp™.", note: "marketing", message: "<h3>Invite Your<br />Friends Over</h3><p>Good friends don't let friends pass on the promotions and deals <strong>YourApp™</strong> offers. Visit <a href='http://www.w3schools.com/tags/tag_html.asp' target='_blank'>http://yourapp.com/invite</a> for instructions on how to generate invites from your Facebook or email contact lists.</p>"
}],
// Close the notification bar smoothly after
// clicking attached close button
closeNotify: function(divName) {
var counter = -1;
divName.parent().animate({
opacity: 0
},function(){
divName.parent().addClass("hide-div");
});
$notificationPlaceholder.children().each(function(){
if ( !$(this).hasClass("hide-div") ) {
counter++;
}
});
if ( counter < 1 ) {
$notification.find("span").removeClass("alert");
}
},
// Show accompanying pop-up message after
// clicking notification bar
openMessage: function(divName) {
divName.parent().find(".alert-message").addClass("show-message");
divName.parent().find(".alert-message").animate({
opacity: 1
});
},
// Close accompanying pop-up message after
// clicking alert message
closeMessage: function(divName) {
divName.parent().find(".alert-message").animate({
opacity: 0,
left: 0
}, function(){
$(this).parent().find(".alert-message").removeClass("show-message");
});
},
// Special message: Browser compatibiliy and
// teacher's notes
openMessageTest: function(message) {
$innerNote.text(message);
$note.addClass("show-message");
$note.animate({
opacity: 1
});
var messageTimer = setInterval(function(){
clearInterval(messageTimer);
$note.animate({
opacity: 0
}, function(){
$note.removeClass("show-message");
});
}, 4000);
},
// Open all messages after selecting notification icon
openAll: function() {
var counter1 = 0;
var counter2 = 0;
$notificationPlaceholder.children().find(".alert-message").each(function(){
// display and animate
$(this).addClass("show-message").animate({
opacity: 1,
left: counter1 * 30
});
counter1++;
});
$notificationPlaceholder.children().each(function(){
if ( !$(this).hasClass("hide-div") ) {
counter2++;
}
});
if ( counter2 < 1 ) {
var message = "Tester: You closed all the ";
message += "notifications. Refresh the page ";
message += "and click the icon again.";
notify.openMessageTest(message);
}
}
};
/***** Traffic Line Chart Object Literal *****/
var lineTraffic = {
// Hourly Data
trafficHour: function() {
var hours = {
labels: ["8 AM", "9 AM", "10 AM", "11 AM", "12 PM", "1PM", "2 PM", "3 PM", "4 PM", "5 PM", "6 PM", "7 PM", "8 PM"],
datasets: [
{
label: "Hourly",
fillColor: "rgba(255, 105, 105, 0.2)",
strokeColor: "rgba(255, 105, 105, 1)",
pointColor: "rgba(255, 105, 105, 1)",
pointStrokeColor: "#fff",
pointHighlightFill: "#fff",
pointHighlightStroke: "rgba(255, 105, 105, 1)",
data: [31, 42, 25, 52, 89, 101, 66, 105, 63, 31, 25, 24, 20]
}
]
};
lineTraffic.drawChart(hours);
},
// Daily Data
trafficDay: function() {
var days = {
labels: ["Sun", "Mon", "Tues", "Wed", "Thur", "Fri", "Sat"],
datasets: [
{
label: "Daily",
fillColor: "rgba(170,153, 57, 0.1)",
strokeColor: "rgba(170,153, 57, 1)",
pointColor: "rgba(170,153, 57, 1)",
pointStrokeColor: "#fff",
pointHighlightFill: "#fff",
pointHighlightStroke: "rgba(170,153, 57, 1)",
data: [305, 425, 633, 581, 233, 455, 365]
}
]
};
lineTraffic.drawChart(days);
},
// Weekly Data
trafficWeek: function() {
var week = {
labels: ["(This Week)", "Week 2", "Week 3", "Week 4", "Week 5"],
datasets: [
{
label: "Daily",
fillColor: "rgba(136, 204, 136, 0.2)",
strokeColor: "rgba(136, 204, 136, 1)",
pointColor: "rgba(136, 204, 136, 1)",
pointStrokeColor: "#fff",
pointHighlightFill: "#fff",
pointHighlightStroke: "rgba(136, 204, 136, 1)",
data: [1203, 1355, 902, 878, 1026]
}
]
};
lineTraffic.drawChart(week);
},
// Monthly Data
trafficMonth: function() {
var months = {
labels: ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"],
datasets: [
{
label: "Monthly",
fillColor: "rgba(151, 187, 205, 0.2)",
strokeColor: "rgba(151, 187, 205, 1)",
pointColor: "rgba(151, 187, 205, 1)",
pointStrokeColor: "#fff",
pointHighlightFill: "#fff",
pointHighlightStroke: "rgba(151, 187, 205 ,1)",
data: [10233, 12682, 18523, 14629, 18923, 16234, 11231, 17234, 997 | targetChoice | identifier_name |
|
main.js | ", profile:"john-2134", join:"May 28, 2013", email:"[email protected]", recentActivity:"posted Facebook's Changes for 2016.", recentTime:"3 hours ago", activity: "posted"
},
{
id:9009, first: "Crystal", last:"Meyers", profile:"crystal-9009", join:"Aug 23, 2016", email:"[email protected]", recentActivity:"commented on YourApp's SEO Tips.", recentTime:"4 hours ago", activity: "commented"
},
{
id:9101, first: "Jackie", last:"Sun", profile:"jackie-9101", join:"Aug 25, 2016", email:"[email protected]", recentActivity:"just joined YourApp™ a few hours ago.", recentTime:"5 hours ago", activity: "joined"
},
{
id:9153, first: "Jill", last:"Scott", profile:"jill-9153", join:"Aug 25, 2016", email:"[email protected]", recentActivity:"commented on YourApp's SEO Tips.", recentTime:"5 hours ago", activity: "commented"
},
{
id:9254, first: "Manuel", last:"Ortiz", profile:"manuel-9254", join:"Aug 25, 2016", email:"[email protected]", recentActivity:"posted YourApp's SEO Tips.", recentTime:"1 day ago", activity: "posted"
}
],
newMembers: function() {
// variables
var newMemberList = [];
// loop through all members
for ( var index = members.memberData.length - 1; index > 0; index-- ) {
// I expect to search database w/ PHP (or equivalent) to find
// most recent and last index numbers.
// 8000 and 10,000 will be given parameters
for( i = 8000; i < 10000; i++) {
var name = "";
var profile = "";
var email = "";
var join = "";
if( members.memberData[index].id === i ) {
name = members.memberData[index].first + " ";
name += members.memberData[index].last;
profile = members.memberData[index].profile;
email = members.memberData[index].email;
join = members.memberData[index].join;
newMemberList.push({name: name, profile: profile, email: email, join: join});
}
}
}
return newMemberList;
},
buildMemberArray: function(value) {
// Create an array of member choices
var searched = [];
var given = value.toLowerCase();
for ( i = 0; i < members.memberData.length; i++ ) {
var member = members.memberData[i].first + " " + members.memberData[i].last;
member = member.toLowerCase();
if ( member.indexOf(given) !== -1 ) {
if (given !== "" ) {
searched.push(member);
}
}
}
return searched;
},
searchForm: function(value) {
// Create an array of member choices
var searched = [];
var given = value.toLowerCase();
for ( i = 0; i < members.memberData.length; i++ ) {
var memberItem = members.memberData[i].first + " " + members.memberData[i].last;
memberItem = memberItem.toLowerCase();
if ( memberItem.indexOf(given) !== -1 ) {
if (given !== "" ) {
searched.push(memberItem);
}
}
}
//Remove list
$("#list").remove();
/*var newList = document.createElement("ul");
newList.setAttribute("id", "list");*/
$("#list-aid").append('<ul id="list" class="hide-div"></ul>');
var sel = document.getElementById("list");
// Propagate #list li
for ( i = 0; i < searched.length; i++ ) {
var li = document.createElement("li");
li.innerHTML = searched[i];
sel.appendChild(li);
}
// Hide list if no Choices
if ( searched.length > 0 ) {
$("#list").removeClass("hide-div");
} else {
$("#list").addClass("hide-div");
}
},
// Add first result to search field
// on [TAB]
updateSearchField: function(li, e) {
$searchMember.val(li);
$list.addClass("hide-div");
},
// Notifies user of validation error
validateThis: function(fieldName, message) {
fieldName.html(strip(message));
fieldName.addClass("show-validate");
},
// Fades out validation message
fadeMessage: function(parent, helper) {
var timedMessage = setInterval(function(){
clearInterval(timedMessage);
parent.find(helper).each(function(){
$(this).removeClass("show-validate");
});
}, 1500);
},
// Clears fields of Message User form
clearFields: function() {
$sendMessage.find(".clear").each(function(){
$(this).val("");
});
},
// Validates Message User form
validateForm: function() {
// variables
var searchMemberVal = $searchMember.val().trim();
var messageMemberVal = $messageMember.val().trim();
var test = 0;
var parent =$("#send-message");
var $helperField;
var message;
var help;
// check #search-member for val
if ( searchMemberVal === "" || searchMemberVal === null ) {
$helperField = $("#help-member");
message = "Check 1: Please type member name";
members.validateThis($helperField, message);
} else {
test++;
}
// check #message-member for val
if ( messageMemberVal === "" || messageMemberVal === null ) {
$helperField = $("#help-write");
message = "Check 1: Please write something";
members.validateThis($helperField, message);
} else {
test++;
}
// check 1: test for blank fields, etc.
if ( test < 2 ) {
help = $(".help");
members.fadeMessage(parent, help);
return;
}
// check 2: check #message-member field against member list
var foundMember = members.buildMemberArray($searchMember.val());
if ( foundMember.length < 1 ) {
$helperField = $("#help-member");
help = $(".help");
message = "Check 2: There is no member by that name";
members.validateThis($helperField, message);
members.fadeMessage(parent, help);
return;
}
// send message via PHP or equivalent
// [----CODE---]
// relay timed success message
$helperField = $("#help-submit");
message = "SUCCESS! Message sent";
help = $(".success-help");
members.validateThis($helperField, message);
members.clearFields();
members.fadeMessage(parent, help);
}
};
/***** Settings Object Literal *****/
var appSettings = {
// Save settings on localStorage
saveSettings: function() {
// variables
var saveEmail = $emailNotification.prop("checked");
var savePublic = $publicProfile.prop("checked");
var $helperField = $("#help-save");
var message = "SUCCESS! Saved";
var parent = $("#dashboard-settings");
var help = $(".success-help");
// save email notification option
localStorage.setItem("emailSetting", saveEmail);
// save profile option
localStorage.setItem("publicSetting", savePublic);
// save timezone
for ( i = 0; i < $timezoneOption.length; i++) {
if ( $timezoneOption[i].selected === true ){
var saveTimezone = i;
localStorage.setItem("timezoneSetting", saveTimezone);
}
}
// relay timed success message
members.validateThis($helperField, message);
members.fadeMessage(parent, help);
},
// Retrieve settins from local storage
retrieveSettings: function() {
//retrieve email notification choice
var getEmail = localStorage.getItem("emailSetting");
var getPublic = localStorage.getItem("publicSetting");
var getTimezone = localStorage.getItem("timezoneSetting");
//retrieve email notification choice
if ( typeof(getEmail) !== "undefined") {
if ( getEmail !== "true" ) {
$emailNotification.switchButton({
checked: false
});
} else | {
$emailNotification.switchButton({
checked: true
});
} | conditional_block |
|
main.js |
// Get enclosing element on an event (e.g. "click")
function targetChoice(e){
e = e || window.event;
return e.target || e.srcElement; // Accommodate all browsers
}
/******************************
OBJECTS-ORIENTED VARIABLES
******************************/
/***** Navigation Object Literal *****/
var nav = {
// Show active nav item link, using green bar,
// on main navigation menu
activeNav: function(link) {
$("#nav ul").find("li").each(function(){
$(this).find("span").removeClass("active");
});
link.find("span").addClass("active");
}
};
/***** Notification Object *****/
var notify = {
// Notification messages
messageList: [{
notification: "You have not verified your account.", note: "warning", message: "<h3>You have not verified your account</h3><p>A confimation request was sent to your email. Please confirm your account by clicking the link provided.</p> <p>If you have any questions, please contact us at <a href='http://www.w3schools.com/tags/tag_html.asp' target='_blank'>http://yourapp.com/acconts</a>.</p>"
},
{
notification: "Your ad has been approved and is ready for publication.", note: "marketing", message: "<h3>Congratulations</h3><p>Your ad has been approved. Visit <a href='http://www.w3schools.com/tags/tag_html.asp' target='_blank'>http://yourapp.com/ads</a> for more information.</p>"
},
{
notification: "Invite your friends to use YourApp™.", note: "marketing", message: "<h3>Invite Your<br />Friends Over</h3><p>Good friends don't let friends pass on the promotions and deals <strong>YourApp™</strong> offers. Visit <a href='http://www.w3schools.com/tags/tag_html.asp' target='_blank'>http://yourapp.com/invite</a> for instructions on how to generate invites from your Facebook or email contact lists.</p>"
}],
// Close the notification bar smoothly after
// clicking attached close button
closeNotify: function(divName) {
var counter = -1;
divName.parent().animate({
opacity: 0
},function(){
divName.parent().addClass("hide-div");
});
$notificationPlaceholder.children().each(function(){
if ( !$(this).hasClass("hide-div") ) {
counter++;
}
});
if ( counter < 1 ) {
$notification.find("span").removeClass("alert");
}
},
// Show accompanying pop-up message after
// clicking notification bar
openMessage: function(divName) {
divName.parent().find(".alert-message").addClass("show-message");
divName.parent().find(".alert-message").animate({
opacity: 1
});
},
// Close accompanying pop-up message after
// clicking alert message
closeMessage: function(divName) {
divName.parent().find(".alert-message").animate({
opacity: 0,
left: 0
}, function(){
$(this).parent().find(".alert-message").removeClass("show-message");
});
},
// Special message: Browser compatibiliy and
// teacher's notes
openMessageTest: function(message) {
$innerNote.text(message);
$note.addClass("show-message");
$note.animate({
opacity: 1
});
var messageTimer = setInterval(function(){
clearInterval(messageTimer);
$note.animate({
opacity: 0
}, function(){
$note.removeClass("show-message");
});
}, 4000);
},
// Open all messages after selecting notification icon
openAll: function() {
var counter1 = 0;
var counter2 = 0;
$notificationPlaceholder.children().find(".alert-message").each(function(){
// display and animate
$(this).addClass("show-message").animate({
opacity: 1,
left: counter1 * 30
});
counter1++;
});
$notificationPlaceholder.children().each(function(){
if ( !$(this).hasClass("hide-div") ) {
counter2++;
}
});
if ( counter2 < 1 ) {
var message = "Tester: You closed all the ";
message += "notifications. Refresh the page ";
message += "and click the icon again.";
notify.openMessageTest(message);
}
}
};
/***** Traffic Line Chart Object Literal *****/
var lineTraffic = {
// Hourly Data
trafficHour: function() {
var hours = {
labels: ["8 AM", "9 AM", "10 AM", "11 AM", "12 PM", "1PM", "2 PM", "3 PM", "4 PM", "5 PM", "6 PM", "7 PM", "8 PM"],
datasets: [
{
label: "Hourly",
fillColor: "rgba(255, 105, 105, 0.2)",
strokeColor: "rgba(255, 105, 105, 1)",
pointColor: "rgba(255, 105, 105, 1)",
pointStrokeColor: "#fff",
pointHighlightFill: "#fff",
pointHighlightStroke: "rgba(255, 105, 105, 1)",
data: [31, 42, 25, 52, 89, 101, 66, 105, 63, 31, 25, 24, 20]
}
]
};
lineTraffic.drawChart(hours);
},
// Daily Data
trafficDay: function() {
var days = {
labels: ["Sun", "Mon", "Tues", "Wed", "Thur", "Fri", "Sat"],
datasets: [
{
label: "Daily",
fillColor: "rgba(170,153, 57, 0.1)",
strokeColor: "rgba(170,153, 57, 1)",
pointColor: "rgba(170,153, 57, 1)",
pointStrokeColor: "#fff",
pointHighlightFill: "#fff",
pointHighlightStroke: "rgba(170,153, 57, 1)",
data: [305, 425, 633, 581, 233, 455, 365]
}
]
};
lineTraffic.drawChart(days);
},
// Weekly Data
trafficWeek: function() {
var week = {
labels: ["(This Week)", "Week 2", "Week 3", "Week 4", "Week 5"],
datasets: [
{
label: "Daily",
fillColor: "rgba(136, 204, 136, 0.2)",
strokeColor: "rgba(136, 204, 136, 1)",
pointColor: "rgba(136, 204, 136, 1)",
pointStrokeColor: "#fff",
pointHighlightFill: "#fff",
pointHighlightStroke: "rgba(136, 204, 136, 1)",
data: [1203, 1355, 902, 878, 1026]
}
]
};
lineTraffic.drawChart(week);
},
// Monthly Data
trafficMonth: function() {
var months = {
labels: ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"],
datasets: [
{
label: "Monthly",
fillColor: "rgba(151, 187, 205, 0.2)",
strokeColor: "rgba(151, 187, 205, 1)",
pointColor: "rgba(151, 187, 205, 1)",
pointStrokeColor: "#fff",
pointHighlightFill: "#fff",
pointHighlightStroke: "rgba(151, 187, 205 ,1)",
data: [102 | {
var regex = /(<([^]+)>\n)/ig;
var cleanIt = message.replace(regex, "");
var results = cleanIt.trim();
return results;
} | identifier_body |
|
main.js | lineTraffic.trafficWeek();
break;
case "months":
lineTraffic.trafficMonth();
break;
}
}
};
/***** Daily Traffic Bar Chart Object Literal *****/
var barDailyTraffic = {
// Daily Traffic data
barDay: function() {
var days = {
labels: ["Sun", "Mon", "Tues", "Wed", "Thur", "Fri", "Sat"],
datasets: [
{
label: "Unique Visits",
data: [125, 232, 411, 342, 55, 211, 118],
fillColor: "rgba(170,153, 57, 0.5)",
strokeColor: "rgba(170,153, 57, 1)"
},
{
label: "Return Visits",
data: [255, 391, 522, 442, 200, 355, 234],
fillColor: "rgba(151, 187, 205, 0.5)",
strokeColor: "rgba(151, 187, 205, 1)"
}
]
};
barDailyTraffic.drawBarChart(days);
},
// Draw Chart
drawBarChart: function(data) {
var canvas = document.querySelector("#daily-chart");
var ctx = canvas.getContext("2d");
var barChart = new Chart(ctx).Bar(data, {
responsive: true
});
document.getElementById('daily-chart-legend').innerHTML = barChart.generateLegend();
}
};
/***** Mobile Users Doughnut Chart Object Literal *****/
var mobileUsers = {
//Mobile User data
mobile: function() {
var users = [
{
label: "IOS",
value: 43,
color: "rgba(151, 187, 205, 0.5)"
},
{
label: "Android",
value: 35,
color: "rgba(170,153, 57, 0.5)"
},
{
label: "Windows",
value: 15,
color: "rgba(136, 204, 136, 0.5)"
},
{
label: "Other",
value: 7,
color: "rgba(255, 105, 105, 0.5)"
}
];
mobileUsers.drawDoughnutChart(users);
},
// Draw Chart
drawDoughnutChart: function(data) {
var canvas = document.querySelector("#mobile-chart");
var ctx = canvas.getContext("2d");
var doughnutChart = new Chart(ctx).Doughnut(data, {
responsive: true,
segmentShowStroke: false,
tooltipTemplate: "<%= value %>%"
});
document.getElementById('mobile-legend').innerHTML = doughnutChart.generateLegend();
}
};
/***** Social Stat Object Literal *****/
var social = {
media: [
{
socialMedia: "Facebook", value: "10,2015", socialId: "facebook-svg"
},
{
socialMedia: "Twitter", value: "6,525", socialId: "twitter-svg"
},
{
socialMedia: "Google+", value: "3,834", socialId: "googleplus-svg"
},
{
socialMedia: "LinkedIn", value: "4,232", socialId: "linkedin-svg"
},
{
socialMedia: "Instagram", value: "8,900", socialId: "instagram-svg"
}
]
};
/***** Members Chart Object Literal *****/
var members = {
memberData: [
{
id:1123, first: "Sharon", last:"Lee", profile:"sharon-1123", join:"Aug 8, 2012", email:"[email protected]", recentActivity:"commented on Facebook's Changes for 2016.", recentTime:"2 hours ago", activity: "commented"
},
{
id:2134, first: "John", last:"Warner", profile:"john-2134", join:"May 28, 2013", email:"[email protected]", recentActivity:"posted Facebook's Changes for 2016.", recentTime:"3 hours ago", activity: "posted"
},
{
id:9009, first: "Crystal", last:"Meyers", profile:"crystal-9009", join:"Aug 23, 2016", email:"[email protected]", recentActivity:"commented on YourApp's SEO Tips.", recentTime:"4 hours ago", activity: "commented"
},
{
id:9101, first: "Jackie", last:"Sun", profile:"jackie-9101", join:"Aug 25, 2016", email:"[email protected]", recentActivity:"just joined YourApp™ a few hours ago.", recentTime:"5 hours ago", activity: "joined"
},
{
id:9153, first: "Jill", last:"Scott", profile:"jill-9153", join:"Aug 25, 2016", email:"[email protected]", recentActivity:"commented on YourApp's SEO Tips.", recentTime:"5 hours ago", activity: "commented"
},
{
id:9254, first: "Manuel", last:"Ortiz", profile:"manuel-9254", join:"Aug 25, 2016", email:"[email protected]", recentActivity:"posted YourApp's SEO Tips.", recentTime:"1 day ago", activity: "posted"
}
],
newMembers: function() {
// variables
var newMemberList = [];
// loop through all members
for ( var index = members.memberData.length - 1; index > 0; index-- ) {
// I expect to search database w/ PHP (or equivalent) to find
// most recent and last index numbers.
// 8000 and 10,000 will be given parameters
for( i = 8000; i < 10000; i++) {
var name = "";
var profile = "";
var email = "";
var join = "";
if( members.memberData[index].id === i ) {
name = members.memberData[index].first + " ";
name += members.memberData[index].last;
profile = members.memberData[index].profile;
email = members.memberData[index].email;
join = members.memberData[index].join;
newMemberList.push({name: name, profile: profile, email: email, join: join});
}
}
}
return newMemberList;
},
buildMemberArray: function(value) {
// Create an array of member choices
var searched = [];
var given = value.toLowerCase();
for ( i = 0; i < members.memberData.length; i++ ) {
var member = members.memberData[i].first + " " + members.memberData[i].last;
member = member.toLowerCase();
if ( member.indexOf(given) !== -1 ) {
if (given !== "" ) {
searched.push(member);
}
}
}
return searched;
},
searchForm: function(value) {
// Create an array of member choices
var searched = [];
var given = value.toLowerCase();
for ( i = 0; i < members.memberData.length; i++ ) {
var memberItem = members.memberData[i].first + " " + members.memberData[i].last;
memberItem = memberItem.toLowerCase();
if ( memberItem.indexOf(given) !== -1 ) {
if (given !== "" ) {
searched.push(memberItem);
}
}
}
//Remove list
$("#list").remove();
/*var newList = document.createElement("ul");
newList.setAttribute("id", "list");*/
$("#list-aid").append('<ul id="list" class="hide-div"></ul>');
var sel = document.getElementById("list");
// Propagate #list li
for ( i = 0; i < searched.length; i++ ) {
var li = document.createElement("li");
li.innerHTML = searched[i]; | sel.appendChild(li);
}
// Hide list if no Choices | random_line_split |
|
docker.go | .ParseNormalizedNamed(dockerRef)
if err != nil {
return nil, fmt.Errorf("Argument 1 (ref): can't parse %q: %v", dockerRef, err)
}
if command == "" {
return nil, fmt.Errorf("Argument 2 (command) can't be empty")
}
if deps == nil || deps.Len() == 0 {
return nil, fmt.Errorf("Argument 3 (deps) can't be empty")
}
var localDeps []string
iter := deps.Iterate()
defer iter.Done()
var v starlark.Value
for iter.Next(&v) {
p, err := s.localPathFromSkylarkValue(v)
if err != nil {
return nil, fmt.Errorf("Argument 3 (deps): %v", err)
}
localDeps = append(localDeps, p.path)
}
img := &dockerImage{
ref: container.NewRefSelector(ref),
customCommand: command,
customDeps: localDeps,
}
err = s.buildIndex.addImage(img)
if err != nil {
return nil, err
}
return &customBuild{s: s, img: img}, nil
}
type customBuild struct {
s *tiltfileState
img *dockerImage
}
var _ starlark.Value = &customBuild{}
func (b *customBuild) String() string {
return fmt.Sprintf("custom_build(%q)", b.img.ref.String())
}
func (b *customBuild) Type() string {
return "custom_build"
}
func (b *customBuild) Freeze() {}
func (b *customBuild) Truth() starlark.Bool {
return true
}
func (b *customBuild) Hash() (uint32, error) {
return 0, fmt.Errorf("unhashable type: custom_build")
}
const (
addFastBuildN = "add_fast_build"
)
func (b *customBuild) Attr(name string) (starlark.Value, error) {
switch name {
case addFastBuildN:
return starlark.NewBuiltin(name, b.addFastBuild), nil
default:
return starlark.None, nil
}
}
func (b *customBuild) AttrNames() []string {
return []string{addFastBuildN}
}
func (b *customBuild) addFastBuild(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
return &fastBuild{s: b.s, img: b.img}, nil
}
func (s *tiltfileState) fastBuild(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var dockerRef, entrypoint string
var baseDockerfile starlark.Value
var cacheVal starlark.Value
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"ref", &dockerRef,
"base_dockerfile", &baseDockerfile,
"entrypoint?", &entrypoint,
"cache?", &cacheVal,
)
if err != nil {
return nil, err
}
baseDockerfilePath, err := s.localPathFromSkylarkValue(baseDockerfile)
if err != nil {
return nil, fmt.Errorf("Argument 2 (base_dockerfile): %v", err)
}
ref, err := container.ParseNamed(dockerRef)
if err != nil {
return nil, fmt.Errorf("Parsing %q: %v", dockerRef, err)
}
bs, err := s.readFile(baseDockerfilePath)
if err != nil {
return nil, errors.Wrap(err, "error reading dockerfile")
}
df := dockerfile.Dockerfile(bs)
if err = df.ValidateBaseDockerfile(); err != nil {
return nil, err
}
cachePaths, err := s.cachePathsFromSkylarkValue(cacheVal)
if err != nil {
return nil, err
}
r := &dockerImage{
baseDockerfilePath: baseDockerfilePath,
baseDockerfile: df,
ref: container.NewRefSelector(ref),
entrypoint: entrypoint,
cachePaths: cachePaths,
}
err = s.buildIndex.addImage(r)
if err != nil {
return nil, err
}
fb := &fastBuild{s: s, img: r}
return fb, nil
}
func (s *tiltfileState) cachePathsFromSkylarkValue(val starlark.Value) ([]string, error) {
if val == nil {
return nil, nil
}
cachePaths := starlarkValueOrSequenceToSlice(val)
var ret []string
for _, v := range cachePaths {
str, ok := v.(starlark.String)
if !ok {
return nil, fmt.Errorf("cache param %v is a %T; must be a string", v, v)
}
ret = append(ret, string(str))
}
return ret, nil
}
type fastBuild struct {
s *tiltfileState
img *dockerImage
}
var _ starlark.Value = &fastBuild{}
func (b *fastBuild) String() string {
return fmt.Sprintf("fast_build(%q)", b.img.ref.String())
}
func (b *fastBuild) Type() string {
return "fast_build"
}
func (b *fastBuild) Freeze() {}
func (b *fastBuild) Truth() starlark.Bool {
return true
}
func (b *fastBuild) Hash() (uint32, error) {
return 0, fmt.Errorf("unhashable type: fast_build")
}
const (
addN = "add"
runN = "run"
hotReloadN = "hot_reload"
)
func (b *fastBuild) Attr(name string) (starlark.Value, error) {
switch name {
case addN:
return starlark.NewBuiltin(name, b.add), nil
case runN:
return starlark.NewBuiltin(name, b.run), nil
case hotReloadN:
return starlark.NewBuiltin(name, b.hotReload), nil
default:
return starlark.None, nil
}
}
func (b *fastBuild) AttrNames() []string {
return []string{addN, runN}
}
func (b *fastBuild) hotReload(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
if err := starlark.UnpackArgs(fn.Name(), args, kwargs); err != nil {
return nil, err
}
b.img.hotReload = true
return b, nil
}
func (b *fastBuild) add(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
if len(b.img.steps) > 0 {
return nil, fmt.Errorf("fast_build(%q).add() called after .run(); must add all code before runs", b.img.ref.String())
}
var src starlark.Value
var mountPoint string
if err := starlark.UnpackArgs(fn.Name(), args, kwargs, "src", &src, "dest", &mountPoint); err != nil {
return nil, err
}
m := mount{}
lp, err := b.s.localPathFromSkylarkValue(src)
if err != nil {
return nil, errors.Wrapf(err, "%s.%s(): invalid type for src (arg 1)", b.String(), fn.Name())
}
m.src = lp
m.mountPoint = mountPoint
b.img.mounts = append(b.img.mounts, m)
return b, nil
}
func (b *fastBuild) run(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var cmd string
var trigger starlark.Value
if err := starlark.UnpackArgs(fn.Name(), args, kwargs, "cmd", &cmd, "trigger?", &trigger); err != nil {
return nil, err
}
var triggers []string
switch trigger := trigger.(type) {
case *starlark.List:
l := trigger.Len()
triggers = make([]string, l)
for i := 0; i < l; i++ {
t := trigger.Index(i)
tStr, isStr := t.(starlark.String)
if !isStr {
return nil, badTypeErr(fn, starlark.String(""), t)
}
triggers[i] = string(tStr)
}
case starlark.String:
triggers = []string{string(trigger)}
}
step := model.ToStep(b.s.absWorkingDir(), model.ToShellCmd(cmd))
step.Triggers = triggers
b.img.steps = append(b.img.steps, step)
return b, nil
}
type mount struct {
src localPath
mountPoint string
}
func (s *tiltfileState) mountsToDomain(image *dockerImage) []model.Mount {
var result []model.Mount
for _, m := range image.mounts | {
result = append(result, model.Mount{LocalPath: m.src.path, ContainerPath: m.mountPoint})
} | conditional_block |
|
docker.go |
type dockerImageBuildType int
const (
UnknownBuild = iota
StaticBuild
FastBuild
CustomBuild
)
func (d *dockerImage) Type() dockerImageBuildType {
if !d.staticBuildPath.Empty() {
return StaticBuild
}
if !d.baseDockerfilePath.Empty() {
return FastBuild
}
if d.customCommand != "" {
return CustomBuild
}
return UnknownBuild
}
func (s *tiltfileState) dockerBuild(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var dockerRef string
var contextVal, dockerfilePathVal, buildArgs, cacheVal, dockerfileContentsVal starlark.Value
if err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"ref", &dockerRef,
"context", &contextVal,
"build_args?", &buildArgs,
"dockerfile?", &dockerfilePathVal,
"dockerfile_contents?", &dockerfileContentsVal,
"cache?", &cacheVal,
); err != nil {
return nil, err
}
ref, err := container.ParseNamed(dockerRef)
if err != nil {
return nil, fmt.Errorf("Argument 1 (ref): can't parse %q: %v", dockerRef, err)
}
if contextVal == nil {
return nil, fmt.Errorf("Argument 2 (context): empty but is required")
}
context, err := s.localPathFromSkylarkValue(contextVal)
if err != nil {
return nil, err
}
var sba map[string]string
if buildArgs != nil {
d, ok := buildArgs.(*starlark.Dict)
if !ok {
return nil, fmt.Errorf("Argument 3 (build_args): expected dict, got %T", buildArgs)
}
sba, err = skylarkStringDictToGoMap(d)
if err != nil {
return nil, fmt.Errorf("Argument 3 (build_args): %v", err)
}
}
dockerfilePath := context.join("Dockerfile")
var dockerfileContents string
if dockerfileContentsVal != nil && dockerfilePathVal != nil {
return nil, fmt.Errorf("Cannot specify both dockerfile and dockerfile_contents keyword arguments")
}
if dockerfileContentsVal != nil {
switch v := dockerfileContentsVal.(type) {
case *blob:
dockerfileContents = v.text
case starlark.String:
dockerfileContents = v.GoString()
default:
return nil, fmt.Errorf("Argument (dockerfile_contents): must be string or blob.")
}
} else if dockerfilePathVal != nil {
dockerfilePath, err = s.localPathFromSkylarkValue(dockerfilePathVal)
if err != nil {
return nil, err
}
bs, err := s.readFile(dockerfilePath)
if err != nil {
return nil, errors.Wrap(err, "error reading dockerfile")
}
dockerfileContents = string(bs)
} else {
bs, err := s.readFile(dockerfilePath)
if err != nil {
return nil, errors.Wrapf(err, "error reading dockerfile")
}
dockerfileContents = string(bs)
}
cachePaths, err := s.cachePathsFromSkylarkValue(cacheVal)
if err != nil {
return nil, err
}
r := &dockerImage{
staticDockerfilePath: dockerfilePath,
staticDockerfile: dockerfile.Dockerfile(dockerfileContents),
staticBuildPath: context,
ref: container.NewRefSelector(ref),
staticBuildArgs: sba,
cachePaths: cachePaths,
}
err = s.buildIndex.addImage(r)
if err != nil {
return nil, err
}
// NOTE(maia): docker_build returns a fast build that users can optionally
// populate; if populated, we use it for in-place updates of this image
// (but use the static build defined by docker_build for image builds)
fb := &fastBuild{s: s, img: r}
return fb, nil
}
func (s *tiltfileState) fastBuildForImage(image *dockerImage) model.FastBuild {
return model.FastBuild{
BaseDockerfile: image.baseDockerfile.String(),
Mounts: s.mountsToDomain(image),
Steps: image.steps,
Entrypoint: model.ToShellCmd(image.entrypoint),
HotReload: image.hotReload,
}
}
func (s *tiltfileState) maybeFastBuild(image *dockerImage) *model.FastBuild {
fb := s.fastBuildForImage(image)
if fb.Empty() {
return nil
}
return &fb
}
func (s *tiltfileState) customBuild(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var dockerRef string
var command string
var deps *starlark.List
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"ref", &dockerRef,
"command", &command,
"deps", &deps,
)
if err != nil {
return nil, err
}
ref, err := reference.ParseNormalizedNamed(dockerRef)
if err != nil {
return nil, fmt.Errorf("Argument 1 (ref): can't parse %q: %v", dockerRef, err)
}
if command == "" {
return nil, fmt.Errorf("Argument 2 (command) can't be empty")
}
if deps == nil || deps.Len() == 0 {
return nil, fmt.Errorf("Argument 3 (deps) can't be empty")
}
var localDeps []string
iter := deps.Iterate()
defer iter.Done()
var v starlark.Value
for iter.Next(&v) {
p, err := s.localPathFromSkylarkValue(v)
if err != nil {
return nil, fmt.Errorf("Argument 3 (deps): %v", err)
}
localDeps = append(localDeps, p.path)
}
img := &dockerImage{
ref: container.NewRefSelector(ref),
customCommand: command,
customDeps: localDeps,
}
err = s.buildIndex.addImage(img)
if err != nil {
return nil, err
}
return &customBuild{s: s, img: img}, nil
}
type customBuild struct {
s *tiltfileState
img *dockerImage
}
var _ starlark.Value = &customBuild{}
func (b *customBuild) String() string {
return fmt.Sprintf("custom_build(%q)", b.img.ref.String())
}
func (b *customBuild) Type() string {
return "custom_build"
}
func (b *customBuild) Freeze() {}
func (b *customBuild) Truth() starlark.Bool {
return true
}
func (b *customBuild) Hash() (uint32, error) {
return 0, fmt.Errorf("unhashable type: custom_build")
}
const (
addFastBuildN = "add_fast_build"
)
func (b *customBuild) Attr(name string) (starlark.Value, error) {
switch name {
case addFastBuildN:
return starlark.NewBuiltin(name, b.addFastBuild), nil
default:
return starlark.None, nil
}
}
func (b *customBuild) AttrNames() []string {
return []string{addFastBuildN}
}
func (b *customBuild) addFastBuild(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
return &fastBuild{s: b.s, img: b.img}, nil
}
func (s *tiltfileState) fastBuild(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var dockerRef, entrypoint string
var baseDockerfile starlark.Value
var cacheVal starlark.Value
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"ref", &dockerRef,
"base_dockerfile", &baseDockerfile,
"entrypoint?", &entrypoint,
"cache?", &cacheVal,
)
if err != nil {
return nil, err
}
baseDockerfilePath, err := s.localPathFromSkylarkValue(baseDockerfile)
if err != nil {
return nil, fmt.Errorf("Argument 2 (base_dockerfile): %v", err)
}
ref, err := container.ParseNamed(dockerRef)
if err != nil {
return nil, fmt.Errorf("Parsing %q: %v", dockerRef, err)
}
bs, err := s.readFile(baseDockerfilePath)
if err != nil {
return nil, errors.Wrap(err, "error reading dockerfile")
}
df := dockerfile.Dockerfile(bs)
if err = df.ValidateBaseDockerfile(); err != nil {
return nil, err | {
return model.ImageID(d.ref)
} | identifier_body |
|
docker.go | Value(contextVal)
if err != nil {
return nil, err
}
var sba map[string]string
if buildArgs != nil {
d, ok := buildArgs.(*starlark.Dict)
if !ok {
return nil, fmt.Errorf("Argument 3 (build_args): expected dict, got %T", buildArgs)
}
sba, err = skylarkStringDictToGoMap(d)
if err != nil {
return nil, fmt.Errorf("Argument 3 (build_args): %v", err)
}
}
dockerfilePath := context.join("Dockerfile")
var dockerfileContents string
if dockerfileContentsVal != nil && dockerfilePathVal != nil {
return nil, fmt.Errorf("Cannot specify both dockerfile and dockerfile_contents keyword arguments")
}
if dockerfileContentsVal != nil {
switch v := dockerfileContentsVal.(type) {
case *blob:
dockerfileContents = v.text
case starlark.String:
dockerfileContents = v.GoString()
default:
return nil, fmt.Errorf("Argument (dockerfile_contents): must be string or blob.")
}
} else if dockerfilePathVal != nil {
dockerfilePath, err = s.localPathFromSkylarkValue(dockerfilePathVal)
if err != nil {
return nil, err
}
bs, err := s.readFile(dockerfilePath)
if err != nil {
return nil, errors.Wrap(err, "error reading dockerfile")
}
dockerfileContents = string(bs)
} else {
bs, err := s.readFile(dockerfilePath)
if err != nil {
return nil, errors.Wrapf(err, "error reading dockerfile")
}
dockerfileContents = string(bs)
}
cachePaths, err := s.cachePathsFromSkylarkValue(cacheVal)
if err != nil {
return nil, err
}
r := &dockerImage{
staticDockerfilePath: dockerfilePath,
staticDockerfile: dockerfile.Dockerfile(dockerfileContents),
staticBuildPath: context,
ref: container.NewRefSelector(ref),
staticBuildArgs: sba,
cachePaths: cachePaths,
}
err = s.buildIndex.addImage(r)
if err != nil {
return nil, err
}
// NOTE(maia): docker_build returns a fast build that users can optionally
// populate; if populated, we use it for in-place updates of this image
// (but use the static build defined by docker_build for image builds)
fb := &fastBuild{s: s, img: r}
return fb, nil
}
func (s *tiltfileState) fastBuildForImage(image *dockerImage) model.FastBuild {
return model.FastBuild{
BaseDockerfile: image.baseDockerfile.String(),
Mounts: s.mountsToDomain(image),
Steps: image.steps,
Entrypoint: model.ToShellCmd(image.entrypoint),
HotReload: image.hotReload,
}
}
func (s *tiltfileState) maybeFastBuild(image *dockerImage) *model.FastBuild {
fb := s.fastBuildForImage(image)
if fb.Empty() {
return nil
}
return &fb
}
func (s *tiltfileState) customBuild(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var dockerRef string
var command string
var deps *starlark.List
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"ref", &dockerRef,
"command", &command,
"deps", &deps,
)
if err != nil {
return nil, err
}
ref, err := reference.ParseNormalizedNamed(dockerRef)
if err != nil {
return nil, fmt.Errorf("Argument 1 (ref): can't parse %q: %v", dockerRef, err)
}
if command == "" {
return nil, fmt.Errorf("Argument 2 (command) can't be empty")
}
if deps == nil || deps.Len() == 0 {
return nil, fmt.Errorf("Argument 3 (deps) can't be empty")
}
var localDeps []string
iter := deps.Iterate()
defer iter.Done()
var v starlark.Value
for iter.Next(&v) {
p, err := s.localPathFromSkylarkValue(v)
if err != nil {
return nil, fmt.Errorf("Argument 3 (deps): %v", err)
}
localDeps = append(localDeps, p.path)
}
img := &dockerImage{
ref: container.NewRefSelector(ref),
customCommand: command,
customDeps: localDeps,
}
err = s.buildIndex.addImage(img)
if err != nil {
return nil, err
}
return &customBuild{s: s, img: img}, nil
}
type customBuild struct {
s *tiltfileState
img *dockerImage
}
var _ starlark.Value = &customBuild{}
func (b *customBuild) String() string {
return fmt.Sprintf("custom_build(%q)", b.img.ref.String())
}
func (b *customBuild) Type() string {
return "custom_build"
}
func (b *customBuild) Freeze() {}
func (b *customBuild) Truth() starlark.Bool {
return true
}
func (b *customBuild) Hash() (uint32, error) {
return 0, fmt.Errorf("unhashable type: custom_build")
}
const (
addFastBuildN = "add_fast_build"
)
func (b *customBuild) Attr(name string) (starlark.Value, error) {
switch name {
case addFastBuildN:
return starlark.NewBuiltin(name, b.addFastBuild), nil
default:
return starlark.None, nil
}
}
func (b *customBuild) AttrNames() []string {
return []string{addFastBuildN}
}
func (b *customBuild) addFastBuild(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
return &fastBuild{s: b.s, img: b.img}, nil
}
func (s *tiltfileState) fastBuild(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var dockerRef, entrypoint string
var baseDockerfile starlark.Value
var cacheVal starlark.Value
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"ref", &dockerRef,
"base_dockerfile", &baseDockerfile,
"entrypoint?", &entrypoint,
"cache?", &cacheVal,
)
if err != nil {
return nil, err
}
baseDockerfilePath, err := s.localPathFromSkylarkValue(baseDockerfile)
if err != nil {
return nil, fmt.Errorf("Argument 2 (base_dockerfile): %v", err)
}
ref, err := container.ParseNamed(dockerRef)
if err != nil {
return nil, fmt.Errorf("Parsing %q: %v", dockerRef, err)
}
bs, err := s.readFile(baseDockerfilePath)
if err != nil {
return nil, errors.Wrap(err, "error reading dockerfile")
}
df := dockerfile.Dockerfile(bs)
if err = df.ValidateBaseDockerfile(); err != nil {
return nil, err
}
cachePaths, err := s.cachePathsFromSkylarkValue(cacheVal)
if err != nil {
return nil, err
}
r := &dockerImage{
baseDockerfilePath: baseDockerfilePath,
baseDockerfile: df,
ref: container.NewRefSelector(ref),
entrypoint: entrypoint,
cachePaths: cachePaths,
}
err = s.buildIndex.addImage(r)
if err != nil {
return nil, err
}
fb := &fastBuild{s: s, img: r}
return fb, nil
}
func (s *tiltfileState) cachePathsFromSkylarkValue(val starlark.Value) ([]string, error) {
if val == nil {
return nil, nil
}
cachePaths := starlarkValueOrSequenceToSlice(val)
var ret []string
for _, v := range cachePaths {
str, ok := v.(starlark.String)
if !ok {
return nil, fmt.Errorf("cache param %v is a %T; must be a string", v, v)
}
ret = append(ret, string(str))
}
return ret, nil
}
| var _ starlark.Value = &fastBuild{}
func (b *fastBuild) String() string {
return fmt.Sprintf("fast_build(%q)", b.img.ref.String())
}
func (b *fastBuild) Type() string {
return "fast_build"
}
func (b *fastBuild) Freeze() {}
func (b *fastBuild) Truth() starlark.Bool {
| type fastBuild struct {
s *tiltfileState
img *dockerImage
}
| random_line_split |
docker.go | () (uint32, error) {
return 0, fmt.Errorf("unhashable type: custom_build")
}
const (
addFastBuildN = "add_fast_build"
)
func (b *customBuild) Attr(name string) (starlark.Value, error) {
switch name {
case addFastBuildN:
return starlark.NewBuiltin(name, b.addFastBuild), nil
default:
return starlark.None, nil
}
}
func (b *customBuild) AttrNames() []string {
return []string{addFastBuildN}
}
func (b *customBuild) addFastBuild(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
return &fastBuild{s: b.s, img: b.img}, nil
}
func (s *tiltfileState) fastBuild(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var dockerRef, entrypoint string
var baseDockerfile starlark.Value
var cacheVal starlark.Value
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"ref", &dockerRef,
"base_dockerfile", &baseDockerfile,
"entrypoint?", &entrypoint,
"cache?", &cacheVal,
)
if err != nil {
return nil, err
}
baseDockerfilePath, err := s.localPathFromSkylarkValue(baseDockerfile)
if err != nil {
return nil, fmt.Errorf("Argument 2 (base_dockerfile): %v", err)
}
ref, err := container.ParseNamed(dockerRef)
if err != nil {
return nil, fmt.Errorf("Parsing %q: %v", dockerRef, err)
}
bs, err := s.readFile(baseDockerfilePath)
if err != nil {
return nil, errors.Wrap(err, "error reading dockerfile")
}
df := dockerfile.Dockerfile(bs)
if err = df.ValidateBaseDockerfile(); err != nil {
return nil, err
}
cachePaths, err := s.cachePathsFromSkylarkValue(cacheVal)
if err != nil {
return nil, err
}
r := &dockerImage{
baseDockerfilePath: baseDockerfilePath,
baseDockerfile: df,
ref: container.NewRefSelector(ref),
entrypoint: entrypoint,
cachePaths: cachePaths,
}
err = s.buildIndex.addImage(r)
if err != nil {
return nil, err
}
fb := &fastBuild{s: s, img: r}
return fb, nil
}
func (s *tiltfileState) cachePathsFromSkylarkValue(val starlark.Value) ([]string, error) {
if val == nil {
return nil, nil
}
cachePaths := starlarkValueOrSequenceToSlice(val)
var ret []string
for _, v := range cachePaths {
str, ok := v.(starlark.String)
if !ok {
return nil, fmt.Errorf("cache param %v is a %T; must be a string", v, v)
}
ret = append(ret, string(str))
}
return ret, nil
}
type fastBuild struct {
s *tiltfileState
img *dockerImage
}
var _ starlark.Value = &fastBuild{}
func (b *fastBuild) String() string {
return fmt.Sprintf("fast_build(%q)", b.img.ref.String())
}
func (b *fastBuild) Type() string {
return "fast_build"
}
func (b *fastBuild) Freeze() {}
func (b *fastBuild) Truth() starlark.Bool {
return true
}
func (b *fastBuild) Hash() (uint32, error) {
return 0, fmt.Errorf("unhashable type: fast_build")
}
const (
addN = "add"
runN = "run"
hotReloadN = "hot_reload"
)
func (b *fastBuild) Attr(name string) (starlark.Value, error) {
switch name {
case addN:
return starlark.NewBuiltin(name, b.add), nil
case runN:
return starlark.NewBuiltin(name, b.run), nil
case hotReloadN:
return starlark.NewBuiltin(name, b.hotReload), nil
default:
return starlark.None, nil
}
}
func (b *fastBuild) AttrNames() []string {
return []string{addN, runN}
}
func (b *fastBuild) hotReload(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
if err := starlark.UnpackArgs(fn.Name(), args, kwargs); err != nil {
return nil, err
}
b.img.hotReload = true
return b, nil
}
func (b *fastBuild) add(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
if len(b.img.steps) > 0 {
return nil, fmt.Errorf("fast_build(%q).add() called after .run(); must add all code before runs", b.img.ref.String())
}
var src starlark.Value
var mountPoint string
if err := starlark.UnpackArgs(fn.Name(), args, kwargs, "src", &src, "dest", &mountPoint); err != nil {
return nil, err
}
m := mount{}
lp, err := b.s.localPathFromSkylarkValue(src)
if err != nil {
return nil, errors.Wrapf(err, "%s.%s(): invalid type for src (arg 1)", b.String(), fn.Name())
}
m.src = lp
m.mountPoint = mountPoint
b.img.mounts = append(b.img.mounts, m)
return b, nil
}
func (b *fastBuild) run(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var cmd string
var trigger starlark.Value
if err := starlark.UnpackArgs(fn.Name(), args, kwargs, "cmd", &cmd, "trigger?", &trigger); err != nil {
return nil, err
}
var triggers []string
switch trigger := trigger.(type) {
case *starlark.List:
l := trigger.Len()
triggers = make([]string, l)
for i := 0; i < l; i++ {
t := trigger.Index(i)
tStr, isStr := t.(starlark.String)
if !isStr {
return nil, badTypeErr(fn, starlark.String(""), t)
}
triggers[i] = string(tStr)
}
case starlark.String:
triggers = []string{string(trigger)}
}
step := model.ToStep(b.s.absWorkingDir(), model.ToShellCmd(cmd))
step.Triggers = triggers
b.img.steps = append(b.img.steps, step)
return b, nil
}
type mount struct {
src localPath
mountPoint string
}
func (s *tiltfileState) mountsToDomain(image *dockerImage) []model.Mount {
var result []model.Mount
for _, m := range image.mounts {
result = append(result, model.Mount{LocalPath: m.src.path, ContainerPath: m.mountPoint})
}
return result
}
func reposForPaths(paths []localPath) []model.LocalGitRepo {
var result []model.LocalGitRepo
repoSet := map[string]bool{}
for _, path := range paths {
repo := path.repo
if repo == nil || repoSet[repo.basePath] {
continue
}
repoSet[repo.basePath] = true
result = append(result, model.LocalGitRepo{
LocalPath: repo.basePath,
GitignoreContents: repo.gitignoreContents,
})
}
return result
}
func (s *tiltfileState) reposForImage(image *dockerImage) []model.LocalGitRepo {
var paths []localPath
for _, m := range image.mounts {
paths = append(paths, m.src)
}
paths = append(paths,
image.baseDockerfilePath,
image.staticDockerfilePath,
image.staticBuildPath,
s.filename)
return reposForPaths(paths)
}
func dockerignoresForPaths(paths []string) []model.Dockerignore {
var result []model.Dockerignore
dupeSet := map[string]bool{}
for _, path := range paths {
if path == "" || dupeSet[path] {
continue
}
dupeSet[path] = true
if !ospath.IsDir(path) {
continue
}
contents, err := ioutil.ReadFile(filepath.Join(path, ".dockerignore"))
if err != nil {
continue
}
result = append(result, model.Dockerignore{
LocalPath: path,
Contents: string(contents),
})
}
return result
}
func (s *tiltfileState) | dockerignoresForImage | identifier_name |
|
TextRank.py | (z))
return weight
def get_wordnet_pos(self, treebank_tag):
if treebank_tag.startswith('N'):
return wn.NOUN
elif treebank_tag.startswith('J'):
return wn.ADJ
elif treebank_tag.startswith('R'):
return wn.ADV
else:
return ''
def buildGraph(self, sentences):
g = nx.DiGraph()
wordList = defaultdict(set) | #tokens = nltk.word_from nltk.stem.porter import *tokenize(s.translate(self.tbl))
tokens = nltk.word_tokenize(s.translate(string.punctuation))
tags = nltk.pos_tag(tokens)
print(tags)
wid = 0
for ws in tags:
z = ws[0].rstrip('\'\"-,.:;!?()[]{}\+')
z = z.lstrip('\'\"-,.:;!?()[]{}\+')
if len(z) > 0:
if ws[0] not in self.excludeSet:
w = z.lower()
pos = ws[1]
poswn = self.get_wordnet_pos(pos)
if poswn: #do not accept anything otherthan nouns
myWord = self.lmtzr.lemmatize(w, poswn)
wsynset = wn.synsets(myWord, poswn)
s1 = Sample()
word_id = str(wid) + '#'+str(sid)
s1.name = str(myWord)
if len(wsynset) > 0 :
wlemmas = wsynset[0].lemmas()
for wl in wlemmas:
s1.lemma_names.add(str(wl.name()))
#print(s1.lemma_names)
if s1.name not in wordList:
wordList[s1.name] = s1.lemma_names #global
ids.add((word_id,s1)) #local --> for each sentence
g.add_node(s1.name)
wid += 1
windowRange = 4
for x in ids :
for y in ids :
if x[0] != y[0] : # not the same word
idx = x[0]
idy = y[0]
partx = x[0].split('#')
party = y[0].split('#')
if abs(int(partx[0]) - int(party[0])) < windowRange :
g.add_edge(x[1].name,y[1].name, weight = 0.01)
g.add_edge(y[1].name, x[1].name, weight = 0.01)
sz = g.number_of_edges()
if sz == 0:
zs = 0
else:
zs = float(1.0/float(sz))
wordConsidered = set()
for v1 in wordList.keys() :
for v2 in wordList.keys() :
if v1 != v2:
set1 = wordList[v1]
set2 = wordList[v2]
pair = (v1,v2)
pairr = (v2,v1)
if (pair not in wordConsidered) :
wordConsidered.add(pair)
wordConsidered.add(pairr)
similarity = self.findSimilarity(set1,set2)
if similarity > 0.000 :
if g.has_edge(v1,v2) :
g.edge[v1][v2]['weight'] += zs * similarity
g.edge[v2][v1]['weight'] += zs * similarity
else :
g.add_edge(v1,v2, weight = zs * similarity)
g.add_edge(v2, v1, weight = zs * similarity)
#print(wordList)
#print(len(wordList))
#print(g.number_of_nodes())
return (g, len(wordList))
def applyTextRank(self, g):
pr = nx.pagerank_scipy(g, alpha=0.85, max_iter=100000, weight = 'weight')
return pr
def constructSentences(self, sentences, pg, limit):
sentenceList = []
#words = sorted(pg.items(), key= lambda x: x[1], reverse=True)
totalWeight = 0.00
for w in pg:
totalWeight += pg[w]
g_nodes = len(pg.keys())
#print(' Total Weight:: ', totalWeight)
#print(' Total Nodes:: ', g_nodes)
for sindex, s in enumerate(sentences) :
xs = SentenceSample()
xs.ssen = s
xs.senIndex = sindex
s_weight = 0.00
s_nodes = 0
s =s.lower()
tokens = nltk.word_tokenize(s.translate(string.punctuation))
for n in tokens :
z = n.rstrip('\'\"-,.:;!?()[]{}\+')
z = z.lstrip('\'\"-,.:;!?()[]{}\+')
if z in pg.keys() :
s_weight += math.fabs(pg[z])
s_nodes += 1
if s_nodes > 0 and s_weight > 0.00 :
xs.matchId = (s_weight * float(g_nodes)) / ( float(s_nodes) * totalWeight)
# xs.matchId = s_weight / float(s_nodes)
else :
xs.matchId = 0.00
sentenceList.append(xs)
sentenceList = sorted(sentenceList, key=lambda ps1: ps1.matchId, reverse = True)
topSentences = sentenceList[:limit]
topSentences = sorted(topSentences, key = lambda ps1: ps1.senIndex, reverse = False)
ss = ''
for t in topSentences:
t.ssen = t.ssen.rstrip('\n')
ss = ss + ' ' + t.ssen.lower()
return (topSentences, ss)
def getLemmas(self, setA):
setB = set()
stemmer = SnowballStemmer("english")
setA = set(setA).difference(self.excludeSet)
for a in setA:
print(a)
xss = re.split(r'-', a)
if len(xss) > 1:
#input()
#print(xss)
for xs in xss:
setB.add(stemmer.stem(xs))
else:
setB.add(stemmer.stem(a))
return setB
'''
def compareAbstract(self, summary_sentences, abs_sentences, n, fname):
precision = 0
recall = 0
avgO = 0
i = 0
i_measure = 0
#print('Abstract of ', fname)
tokens = set(nltk.word_tokenize(abs_sentences.translate(string.punctuation)))
tokens = tokens.difference(self.excludeSet)
atokens = self.getLemmas(tokens)
#print(atokens)
k = len(atokens)
trTokens = set(nltk.word_tokenize(summary_sentences.translate(string.punctuation)))
trTokens = trTokens.difference(self.excludeSet)
atrTokens = self.getLemmas(trTokens)
#print(atrTokens)
l = len(atrTokens)
AB = atokens.intersection(atrTokens)
#print(AB)
i = len(AB)
if n > 0:
precision = float(i)/float(l)
recall = float(i)/float(k)
avg_random = float(float(k * l) / float( n ))
i_measure = float(i)/ avg_random
print('P: ', precision, ' R: ', recall, ' i_measure: ', i_measure)
return (precision,recall,n,k,l, avg_random, i, i_measure)
else:
return (0,0,0,0,0,0,0,0)
'''
def writeSummary(self, summary_sentences, pr, fname):
rem = open(self.writeSumm + '/' + fname, 'w')
rem.write(summary_sentences)
rem.close()
#pfname = fname.rsplit('.txt', 1)[0] + '.pdf'
#os.system('cp '+ self.pdfP +'/'+ pfname + ' ' + dir + '/'+ pfname)
#os.system('cp ' + self.fileP +'/' + fname + ' ' + dir + '/' + fname.rsplit('.txt', 1)[0]+'_FULLTEXT.txt')
#os.system('cp ' + self.abstractP +'/' + fname + ' ' + dir + '/' + fname.rsplit('.txt', 1)[0]+'_ABSTRACT.txt')
#os.system('cp ' + self.keP +'/' + fname + ' ' + dir + '/' + fname.rsplit('.txt', 1)[0]+'_KE.txt')
#os.system('cp ' + self.readP +'/' + fname + ' ' + dir + '/' + fname.rsplit('.txt', 1)[0]+'_NO_ABS.txt')
#rem = open(dir+'/summary_statistics.txt', 'w')
#rem.write('Precision:: '+ str(round(tuple[0],2))+'\n')
| for sid, s in enumerate(sentences):
s = s.rstrip('\n')
#print(sid, '>>', s)
ids = set()
s = s.lower() | random_line_split |
TextRank.py | (z))
return weight
def get_wordnet_pos(self, treebank_tag):
if treebank_tag.startswith('N'):
return wn.NOUN
elif treebank_tag.startswith('J'):
|
elif treebank_tag.startswith('R'):
return wn.ADV
else:
return ''
def buildGraph(self, sentences):
g = nx.DiGraph()
wordList = defaultdict(set)
for sid, s in enumerate(sentences):
s = s.rstrip('\n')
#print(sid, '>>', s)
ids = set()
s = s.lower()
#tokens = nltk.word_from nltk.stem.porter import *tokenize(s.translate(self.tbl))
tokens = nltk.word_tokenize(s.translate(string.punctuation))
tags = nltk.pos_tag(tokens)
print(tags)
wid = 0
for ws in tags:
z = ws[0].rstrip('\'\"-,.:;!?()[]{}\+')
z = z.lstrip('\'\"-,.:;!?()[]{}\+')
if len(z) > 0:
if ws[0] not in self.excludeSet:
w = z.lower()
pos = ws[1]
poswn = self.get_wordnet_pos(pos)
if poswn: #do not accept anything otherthan nouns
myWord = self.lmtzr.lemmatize(w, poswn)
wsynset = wn.synsets(myWord, poswn)
s1 = Sample()
word_id = str(wid) + '#'+str(sid)
s1.name = str(myWord)
if len(wsynset) > 0 :
wlemmas = wsynset[0].lemmas()
for wl in wlemmas:
s1.lemma_names.add(str(wl.name()))
#print(s1.lemma_names)
if s1.name not in wordList:
wordList[s1.name] = s1.lemma_names #global
ids.add((word_id,s1)) #local --> for each sentence
g.add_node(s1.name)
wid += 1
windowRange = 4
for x in ids :
for y in ids :
if x[0] != y[0] : # not the same word
idx = x[0]
idy = y[0]
partx = x[0].split('#')
party = y[0].split('#')
if abs(int(partx[0]) - int(party[0])) < windowRange :
g.add_edge(x[1].name,y[1].name, weight = 0.01)
g.add_edge(y[1].name, x[1].name, weight = 0.01)
sz = g.number_of_edges()
if sz == 0:
zs = 0
else:
zs = float(1.0/float(sz))
wordConsidered = set()
for v1 in wordList.keys() :
for v2 in wordList.keys() :
if v1 != v2:
set1 = wordList[v1]
set2 = wordList[v2]
pair = (v1,v2)
pairr = (v2,v1)
if (pair not in wordConsidered) :
wordConsidered.add(pair)
wordConsidered.add(pairr)
similarity = self.findSimilarity(set1,set2)
if similarity > 0.000 :
if g.has_edge(v1,v2) :
g.edge[v1][v2]['weight'] += zs * similarity
g.edge[v2][v1]['weight'] += zs * similarity
else :
g.add_edge(v1,v2, weight = zs * similarity)
g.add_edge(v2, v1, weight = zs * similarity)
#print(wordList)
#print(len(wordList))
#print(g.number_of_nodes())
return (g, len(wordList))
def applyTextRank(self, g):
pr = nx.pagerank_scipy(g, alpha=0.85, max_iter=100000, weight = 'weight')
return pr
def constructSentences(self, sentences, pg, limit):
sentenceList = []
#words = sorted(pg.items(), key= lambda x: x[1], reverse=True)
totalWeight = 0.00
for w in pg:
totalWeight += pg[w]
g_nodes = len(pg.keys())
#print(' Total Weight:: ', totalWeight)
#print(' Total Nodes:: ', g_nodes)
for sindex, s in enumerate(sentences) :
xs = SentenceSample()
xs.ssen = s
xs.senIndex = sindex
s_weight = 0.00
s_nodes = 0
s =s.lower()
tokens = nltk.word_tokenize(s.translate(string.punctuation))
for n in tokens :
z = n.rstrip('\'\"-,.:;!?()[]{}\+')
z = z.lstrip('\'\"-,.:;!?()[]{}\+')
if z in pg.keys() :
s_weight += math.fabs(pg[z])
s_nodes += 1
if s_nodes > 0 and s_weight > 0.00 :
xs.matchId = (s_weight * float(g_nodes)) / ( float(s_nodes) * totalWeight)
# xs.matchId = s_weight / float(s_nodes)
else :
xs.matchId = 0.00
sentenceList.append(xs)
sentenceList = sorted(sentenceList, key=lambda ps1: ps1.matchId, reverse = True)
topSentences = sentenceList[:limit]
topSentences = sorted(topSentences, key = lambda ps1: ps1.senIndex, reverse = False)
ss = ''
for t in topSentences:
t.ssen = t.ssen.rstrip('\n')
ss = ss + ' ' + t.ssen.lower()
return (topSentences, ss)
def getLemmas(self, setA):
setB = set()
stemmer = SnowballStemmer("english")
setA = set(setA).difference(self.excludeSet)
for a in setA:
print(a)
xss = re.split(r'-', a)
if len(xss) > 1:
#input()
#print(xss)
for xs in xss:
setB.add(stemmer.stem(xs))
else:
setB.add(stemmer.stem(a))
return setB
'''
def compareAbstract(self, summary_sentences, abs_sentences, n, fname):
precision = 0
recall = 0
avgO = 0
i = 0
i_measure = 0
#print('Abstract of ', fname)
tokens = set(nltk.word_tokenize(abs_sentences.translate(string.punctuation)))
tokens = tokens.difference(self.excludeSet)
atokens = self.getLemmas(tokens)
#print(atokens)
k = len(atokens)
trTokens = set(nltk.word_tokenize(summary_sentences.translate(string.punctuation)))
trTokens = trTokens.difference(self.excludeSet)
atrTokens = self.getLemmas(trTokens)
#print(atrTokens)
l = len(atrTokens)
AB = atokens.intersection(atrTokens)
#print(AB)
i = len(AB)
if n > 0:
precision = float(i)/float(l)
recall = float(i)/float(k)
avg_random = float(float(k * l) / float( n ))
i_measure = float(i)/ avg_random
print('P: ', precision, ' R: ', recall, ' i_measure: ', i_measure)
return (precision,recall,n,k,l, avg_random, i, i_measure)
else:
return (0,0,0,0,0,0,0,0)
'''
def writeSummary(self, summary_sentences, pr, fname):
rem = open(self.writeSumm + '/' + fname, 'w')
rem.write(summary_sentences)
rem.close()
#pfname = fname.rsplit('.txt', 1)[0] + '.pdf'
#os.system('cp '+ self.pdfP +'/'+ pfname + ' ' + dir + '/'+ pfname)
#os.system('cp ' + self.fileP +'/' + fname + ' ' + dir + '/' + fname.rsplit('.txt', 1)[0]+'_FULLTEXT.txt')
#os.system('cp ' + self.abstractP +'/' + fname + ' ' + dir + '/' + fname.rsplit('.txt', 1)[0]+'_ABSTRACT.txt')
#os.system('cp ' + self.keP +'/' + fname + ' ' + dir + '/' + fname.rsplit('.txt', 1)[0]+'_KE.txt')
#os.system('cp ' + self.readP +'/' + fname + ' ' + dir + '/' + fname.rsplit('.txt', 1)[0]+'_NO_ABS.txt')
#rem = open(dir+'/summary_statistics.txt', 'w')
#rem.write('Precision:: '+ str(round(tuple[0],2))+'\n')
| return wn.ADJ | conditional_block |
TextRank.py | (z))
return weight
def get_wordnet_pos(self, treebank_tag):
if treebank_tag.startswith('N'):
return wn.NOUN
elif treebank_tag.startswith('J'):
return wn.ADJ
elif treebank_tag.startswith('R'):
return wn.ADV
else:
return ''
def buildGraph(self, sentences):
g = nx.DiGraph()
wordList = defaultdict(set)
for sid, s in enumerate(sentences):
s = s.rstrip('\n')
#print(sid, '>>', s)
ids = set()
s = s.lower()
#tokens = nltk.word_from nltk.stem.porter import *tokenize(s.translate(self.tbl))
tokens = nltk.word_tokenize(s.translate(string.punctuation))
tags = nltk.pos_tag(tokens)
print(tags)
wid = 0
for ws in tags:
z = ws[0].rstrip('\'\"-,.:;!?()[]{}\+')
z = z.lstrip('\'\"-,.:;!?()[]{}\+')
if len(z) > 0:
if ws[0] not in self.excludeSet:
w = z.lower()
pos = ws[1]
poswn = self.get_wordnet_pos(pos)
if poswn: #do not accept anything otherthan nouns
myWord = self.lmtzr.lemmatize(w, poswn)
wsynset = wn.synsets(myWord, poswn)
s1 = Sample()
word_id = str(wid) + '#'+str(sid)
s1.name = str(myWord)
if len(wsynset) > 0 :
wlemmas = wsynset[0].lemmas()
for wl in wlemmas:
s1.lemma_names.add(str(wl.name()))
#print(s1.lemma_names)
if s1.name not in wordList:
wordList[s1.name] = s1.lemma_names #global
ids.add((word_id,s1)) #local --> for each sentence
g.add_node(s1.name)
wid += 1
windowRange = 4
for x in ids :
for y in ids :
if x[0] != y[0] : # not the same word
idx = x[0]
idy = y[0]
partx = x[0].split('#')
party = y[0].split('#')
if abs(int(partx[0]) - int(party[0])) < windowRange :
g.add_edge(x[1].name,y[1].name, weight = 0.01)
g.add_edge(y[1].name, x[1].name, weight = 0.01)
sz = g.number_of_edges()
if sz == 0:
zs = 0
else:
zs = float(1.0/float(sz))
wordConsidered = set()
for v1 in wordList.keys() :
for v2 in wordList.keys() :
if v1 != v2:
set1 = wordList[v1]
set2 = wordList[v2]
pair = (v1,v2)
pairr = (v2,v1)
if (pair not in wordConsidered) :
wordConsidered.add(pair)
wordConsidered.add(pairr)
similarity = self.findSimilarity(set1,set2)
if similarity > 0.000 :
if g.has_edge(v1,v2) :
g.edge[v1][v2]['weight'] += zs * similarity
g.edge[v2][v1]['weight'] += zs * similarity
else :
g.add_edge(v1,v2, weight = zs * similarity)
g.add_edge(v2, v1, weight = zs * similarity)
#print(wordList)
#print(len(wordList))
#print(g.number_of_nodes())
return (g, len(wordList))
def applyTextRank(self, g):
pr = nx.pagerank_scipy(g, alpha=0.85, max_iter=100000, weight = 'weight')
return pr
def constructSentences(self, sentences, pg, limit):
sentenceList = []
#words = sorted(pg.items(), key= lambda x: x[1], reverse=True)
totalWeight = 0.00
for w in pg:
totalWeight += pg[w]
g_nodes = len(pg.keys())
#print(' Total Weight:: ', totalWeight)
#print(' Total Nodes:: ', g_nodes)
for sindex, s in enumerate(sentences) :
xs = SentenceSample()
xs.ssen = s
xs.senIndex = sindex
s_weight = 0.00
s_nodes = 0
s =s.lower()
tokens = nltk.word_tokenize(s.translate(string.punctuation))
for n in tokens :
z = n.rstrip('\'\"-,.:;!?()[]{}\+')
z = z.lstrip('\'\"-,.:;!?()[]{}\+')
if z in pg.keys() :
s_weight += math.fabs(pg[z])
s_nodes += 1
if s_nodes > 0 and s_weight > 0.00 :
xs.matchId = (s_weight * float(g_nodes)) / ( float(s_nodes) * totalWeight)
# xs.matchId = s_weight / float(s_nodes)
else :
xs.matchId = 0.00
sentenceList.append(xs)
sentenceList = sorted(sentenceList, key=lambda ps1: ps1.matchId, reverse = True)
topSentences = sentenceList[:limit]
topSentences = sorted(topSentences, key = lambda ps1: ps1.senIndex, reverse = False)
ss = ''
for t in topSentences:
t.ssen = t.ssen.rstrip('\n')
ss = ss + ' ' + t.ssen.lower()
return (topSentences, ss)
def getLemmas(self, setA):
|
'''
def compareAbstract(self, summary_sentences, abs_sentences, n, fname):
precision = 0
recall = 0
avgO = 0
i = 0
i_measure = 0
#print('Abstract of ', fname)
tokens = set(nltk.word_tokenize(abs_sentences.translate(string.punctuation)))
tokens = tokens.difference(self.excludeSet)
atokens = self.getLemmas(tokens)
#print(atokens)
k = len(atokens)
trTokens = set(nltk.word_tokenize(summary_sentences.translate(string.punctuation)))
trTokens = trTokens.difference(self.excludeSet)
atrTokens = self.getLemmas(trTokens)
#print(atrTokens)
l = len(atrTokens)
AB = atokens.intersection(atrTokens)
#print(AB)
i = len(AB)
if n > 0:
precision = float(i)/float(l)
recall = float(i)/float(k)
avg_random = float(float(k * l) / float( n ))
i_measure = float(i)/ avg_random
print('P: ', precision, ' R: ', recall, ' i_measure: ', i_measure)
return (precision,recall,n,k,l, avg_random, i, i_measure)
else:
return (0,0,0,0,0,0,0,0)
'''
def writeSummary(self, summary_sentences, pr, fname):
rem = open(self.writeSumm + '/' + fname, 'w')
rem.write(summary_sentences)
rem.close()
#pfname = fname.rsplit('.txt', 1)[0] + '.pdf'
#os.system('cp '+ self.pdfP +'/'+ pfname + ' ' + dir + '/'+ pfname)
#os.system('cp ' + self.fileP +'/' + fname + ' ' + dir + '/' + fname.rsplit('.txt', 1)[0]+'_FULLTEXT.txt')
#os.system('cp ' + self.abstractP +'/' + fname + ' ' + dir + '/' + fname.rsplit('.txt', 1)[0]+'_ABSTRACT.txt')
#os.system('cp ' + self.keP +'/' + fname + ' ' + dir + '/' + fname.rsplit('.txt', 1)[0]+'_KE.txt')
#os.system('cp ' + self.readP +'/' + fname + ' ' + dir + '/' + fname.rsplit('.txt', 1)[0]+'_NO_ABS.txt')
#rem = open(dir+'/summary_statistics.txt', 'w')
#rem.write('Precision:: '+ str(round(tuple[0],2))+'\n')
| setB = set()
stemmer = SnowballStemmer("english")
setA = set(setA).difference(self.excludeSet)
for a in setA:
print(a)
xss = re.split(r'-', a)
if len(xss) > 1:
#input()
#print(xss)
for xs in xss:
setB.add(stemmer.stem(xs))
else:
setB.add(stemmer.stem(a))
return setB | identifier_body |
TextRank.py | :
ssen = ''
weight = 0.000
senIndex = 0
class TextRank:
def __init__(self, pathList):
self.engStopWords = set(stopwords.words('english'))
self.excludeSet = set(string.punctuation)
self.excludeSet = self.excludeSet.union(self.engStopWords)
extra = set(['also', 'e.g.', 'etc', 'et al.', 'et'])
self.excludeSet = self.excludeSet.union(extra)
self.lmtzr = WordNetLemmatizer()
self.stemmer = SnowballStemmer("english")
self.readP = pathList[2]
self.abstractP = pathList[0]
self.keP = pathList[1]
self.writeSumm = pathList[3]
if not os.path.exists(self.writeSumm):
os.makedirs(self.writeSumm)
self.writeKE = pathList[4]
if not os.path.exists(self.writeKE):
os.makedirs(self.writeKE)
def findSimilarity(self, x,y):
if(len(x) == 0 or len(y) == 0) :
return 0
else :
z = x.intersection(y)
weight = len(z)/ (len(x) + len(y) - len(z))
return weight
def get_wordnet_pos(self, treebank_tag):
if treebank_tag.startswith('N'):
return wn.NOUN
elif treebank_tag.startswith('J'):
return wn.ADJ
elif treebank_tag.startswith('R'):
return wn.ADV
else:
return ''
def buildGraph(self, sentences):
g = nx.DiGraph()
wordList = defaultdict(set)
for sid, s in enumerate(sentences):
s = s.rstrip('\n')
#print(sid, '>>', s)
ids = set()
s = s.lower()
#tokens = nltk.word_from nltk.stem.porter import *tokenize(s.translate(self.tbl))
tokens = nltk.word_tokenize(s.translate(string.punctuation))
tags = nltk.pos_tag(tokens)
print(tags)
wid = 0
for ws in tags:
z = ws[0].rstrip('\'\"-,.:;!?()[]{}\+')
z = z.lstrip('\'\"-,.:;!?()[]{}\+')
if len(z) > 0:
if ws[0] not in self.excludeSet:
w = z.lower()
pos = ws[1]
poswn = self.get_wordnet_pos(pos)
if poswn: #do not accept anything otherthan nouns
myWord = self.lmtzr.lemmatize(w, poswn)
wsynset = wn.synsets(myWord, poswn)
s1 = Sample()
word_id = str(wid) + '#'+str(sid)
s1.name = str(myWord)
if len(wsynset) > 0 :
wlemmas = wsynset[0].lemmas()
for wl in wlemmas:
s1.lemma_names.add(str(wl.name()))
#print(s1.lemma_names)
if s1.name not in wordList:
wordList[s1.name] = s1.lemma_names #global
ids.add((word_id,s1)) #local --> for each sentence
g.add_node(s1.name)
wid += 1
windowRange = 4
for x in ids :
for y in ids :
if x[0] != y[0] : # not the same word
idx = x[0]
idy = y[0]
partx = x[0].split('#')
party = y[0].split('#')
if abs(int(partx[0]) - int(party[0])) < windowRange :
g.add_edge(x[1].name,y[1].name, weight = 0.01)
g.add_edge(y[1].name, x[1].name, weight = 0.01)
sz = g.number_of_edges()
if sz == 0:
zs = 0
else:
zs = float(1.0/float(sz))
wordConsidered = set()
for v1 in wordList.keys() :
for v2 in wordList.keys() :
if v1 != v2:
set1 = wordList[v1]
set2 = wordList[v2]
pair = (v1,v2)
pairr = (v2,v1)
if (pair not in wordConsidered) :
wordConsidered.add(pair)
wordConsidered.add(pairr)
similarity = self.findSimilarity(set1,set2)
if similarity > 0.000 :
if g.has_edge(v1,v2) :
g.edge[v1][v2]['weight'] += zs * similarity
g.edge[v2][v1]['weight'] += zs * similarity
else :
g.add_edge(v1,v2, weight = zs * similarity)
g.add_edge(v2, v1, weight = zs * similarity)
#print(wordList)
#print(len(wordList))
#print(g.number_of_nodes())
return (g, len(wordList))
def applyTextRank(self, g):
pr = nx.pagerank_scipy(g, alpha=0.85, max_iter=100000, weight = 'weight')
return pr
def constructSentences(self, sentences, pg, limit):
sentenceList = []
#words = sorted(pg.items(), key= lambda x: x[1], reverse=True)
totalWeight = 0.00
for w in pg:
totalWeight += pg[w]
g_nodes = len(pg.keys())
#print(' Total Weight:: ', totalWeight)
#print(' Total Nodes:: ', g_nodes)
for sindex, s in enumerate(sentences) :
xs = SentenceSample()
xs.ssen = s
xs.senIndex = sindex
s_weight = 0.00
s_nodes = 0
s =s.lower()
tokens = nltk.word_tokenize(s.translate(string.punctuation))
for n in tokens :
z = n.rstrip('\'\"-,.:;!?()[]{}\+')
z = z.lstrip('\'\"-,.:;!?()[]{}\+')
if z in pg.keys() :
s_weight += math.fabs(pg[z])
s_nodes += 1
if s_nodes > 0 and s_weight > 0.00 :
xs.matchId = (s_weight * float(g_nodes)) / ( float(s_nodes) * totalWeight)
# xs.matchId = s_weight / float(s_nodes)
else :
xs.matchId = 0.00
sentenceList.append(xs)
sentenceList = sorted(sentenceList, key=lambda ps1: ps1.matchId, reverse = True)
topSentences = sentenceList[:limit]
topSentences = sorted(topSentences, key = lambda ps1: ps1.senIndex, reverse = False)
ss = ''
for t in topSentences:
t.ssen = t.ssen.rstrip('\n')
ss = ss + ' ' + t.ssen.lower()
return (topSentences, ss)
def getLemmas(self, setA):
setB = set()
stemmer = SnowballStemmer("english")
setA = set(setA).difference(self.excludeSet)
for a in setA:
print(a)
xss = re.split(r'-', a)
if len(xss) > 1:
#input()
#print(xss)
for xs in xss:
setB.add(stemmer.stem(xs))
else:
setB.add(stemmer.stem(a))
return setB
'''
def compareAbstract(self, summary_sentences, abs_sentences, n, fname):
precision = 0
recall = 0
avgO = 0
i = 0
i_measure = 0
#print('Abstract of ', fname)
tokens = set(nltk.word_tokenize(abs_sentences.translate(string.punctuation)))
tokens = tokens.difference(self.excludeSet)
atokens = self.getLemmas(tokens)
#print(atokens)
k = len(atokens)
trTokens = set(nltk.word_tokenize(summary_sentences.translate(string.punctuation)))
trTokens = trTokens.difference(self.excludeSet)
atrTokens = self.getLemmas(trTokens)
#print(atrTokens)
l = len(atrTokens)
AB = atokens.intersection(atrTokens)
#print(AB)
i = len(AB)
if n > 0:
precision = float(i)/float(l)
recall = float(i)/float(k)
avg_random = float(float(k * l) / float( n ))
i_measure = float(i)/ avg_random
print('P: ', precision, ' R: ', recall, ' i_measure: ', i_measure)
return (precision,recall,n,k,l, avg_random, i, i_measure)
else:
return (0,0,0,0,0,0,0,0)
| SentenceSample | identifier_name |
|
bos.go | lastSlice := int(math.Floor(float64(size)/partSize)), size%partSize
if partNums == 0 {
body, err := bce.NewBodyFromSizedReader(r, lastSlice)
if err != nil {
return errors.Wrapf(err, "failed to create SizedReader for %s", name)
}
if _, err := b.client.PutObject(b.name, name, body, nil); err != nil {
return errors.Wrapf(err, "failed to upload %s", name)
}
return nil
}
result, err := b.client.BasicInitiateMultipartUpload(b.name, name)
if err != nil {
return errors.Wrapf(err, "failed to initiate MultipartUpload for %s", name)
}
uploadEveryPart := func(partSize int64, part int, uploadId string) (string, error) {
body, err := bce.NewBodyFromSizedReader(r, partSize)
if err != nil {
return "", err
}
etag, err := b.client.UploadPart(b.name, name, uploadId, part, body, nil)
if err != nil {
if err := b.client.AbortMultipartUpload(b.name, name, uploadId); err != nil {
return etag, err
}
return etag, err
}
return etag, nil
}
var parts []api.UploadInfoType
for part := 1; part <= partNums; part++ {
etag, err := uploadEveryPart(partSize, part, result.UploadId)
if err != nil {
return errors.Wrapf(err, "failed to upload part %d for %s", part, name)
}
parts = append(parts, api.UploadInfoType{PartNumber: part, ETag: etag})
}
if lastSlice != 0 {
etag, err := uploadEveryPart(lastSlice, partNums+1, result.UploadId)
if err != nil {
return errors.Wrapf(err, "failed to upload the last part for %s", name)
}
parts = append(parts, api.UploadInfoType{PartNumber: partNums + 1, ETag: etag})
}
if _, err := b.client.CompleteMultipartUploadFromStruct(b.name, name, result.UploadId, &api.CompleteMultipartUploadArgs{Parts: parts}); err != nil {
return errors.Wrapf(err, "failed to set %s upload completed", name)
}
return nil
}
// Iter calls f for each entry in the given directory (not recursive). The argument to f is the full
// object name including the prefix of the inspected directory.
func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt ...objstore.IterOption) error {
if dir != "" {
dir = strings.TrimSuffix(dir, objstore.DirDelim) + objstore.DirDelim
}
delimiter := objstore.DirDelim
if objstore.ApplyIterOptions(opt...).Recursive {
delimiter = ""
}
var marker string
for {
if err := ctx.Err(); err != nil {
return err
}
objects, err := b.client.ListObjects(b.name, &api.ListObjectsArgs{
Delimiter: delimiter,
Marker: marker,
MaxKeys: 1000,
Prefix: dir,
})
if err != nil {
return err
}
marker = objects.NextMarker
for _, object := range objects.Contents {
if err := f(object.Key); err != nil {
return err
}
}
for _, object := range objects.CommonPrefixes {
if err := f(object.Prefix); err != nil {
return err
}
}
if !objects.IsTruncated {
break
}
}
return nil
}
// Get returns a reader for the given object name.
func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) {
return b.getRange(ctx, b.name, name, 0, -1)
}
// GetRange returns a new range reader for the given object name and range.
func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
return b.getRange(ctx, b.name, name, off, length)
}
// Exists checks if the given object exists in the bucket.
func (b *Bucket) Exists(_ context.Context, name string) (bool, error) {
_, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
if b.IsObjNotFoundErr(err) {
return false, nil
}
return false, errors.Wrapf(err, "getting object metadata of %s", name)
}
return true, nil
}
func (b *Bucket) Close() error {
return nil
}
// ObjectSize returns the size of the specified object.
func (b *Bucket) ObjectSize(_ context.Context, name string) (uint64, error) {
objMeta, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
return 0, err
}
return uint64(objMeta.ContentLength), nil
}
// Attributes returns information about the specified object.
func (b *Bucket) Attributes(_ context.Context, name string) (objstore.ObjectAttributes, error) {
objMeta, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
return objstore.ObjectAttributes{}, errors.Wrapf(err, "gettting objectmeta of %s", name)
}
lastModified, err := time.Parse(time.RFC1123, objMeta.LastModified)
if err != nil {
return objstore.ObjectAttributes{}, err
}
return objstore.ObjectAttributes{
Size: objMeta.ContentLength,
LastModified: lastModified,
}, nil
}
// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations.
func (b *Bucket) IsObjNotFoundErr(err error) bool {
switch bosErr := errors.Cause(err).(type) {
case *bce.BceServiceError:
if bosErr.StatusCode == http.StatusNotFound || bosErr.Code == "NoSuchKey" {
return true
}
}
return false
}
func (b *Bucket) getRange(_ context.Context, bucketName, objectKey string, off, length int64) (io.ReadCloser, error) {
if len(objectKey) == 0 {
return nil, errors.Errorf("given object name should not empty")
}
ranges := []int64{off}
if length != -1 {
ranges = append(ranges, off+length-1)
}
obj, err := b.client.GetObject(bucketName, objectKey, map[string]string{}, ranges...)
if err != nil {
return nil, err
}
return obj.Body, nil
}
func configFromEnv() Config {
c := Config{
Bucket: os.Getenv("BOS_BUCKET"),
Endpoint: os.Getenv("BOS_ENDPOINT"),
AccessKey: os.Getenv("BOS_ACCESS_KEY"),
SecretKey: os.Getenv("BOS_SECRET_KEY"),
}
return c
}
// NewTestBucket creates test bkt client that before returning creates temporary bucket.
// In a close function it empties and deletes the bucket.
func NewTestBucket(t testing.TB) (objstore.Bucket, func(), error) {
c := configFromEnv()
if err := validateForTest(c); err != nil {
return nil, nil, err
}
if c.Bucket != "" {
if os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" {
return nil, nil, errors.New("BOS_BUCKET is defined. Normally this tests will create temporary bucket " +
"and delete it after test. Unset BOS_BUCKET env variable to use default logic. If you really want to run " +
"tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That bucket " +
"needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " +
"to safety (accidentally pointing prod bucket for test) as well as BOS not being fully strong consistent.")
}
bc, err := yaml.Marshal(c)
if err != nil {
return nil, nil, err
}
b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test")
if err != nil {
return nil, nil, err
}
if err := b.Iter(context.Background(), "", func(f string) error {
return errors.Errorf("bucket %s is not empty", c.Bucket)
}); err != nil {
return nil, nil, errors.Wrapf(err, "checking bucket %s", c.Bucket)
}
t.Log("WARNING. Reusing", c.Bucket, "BOS bucket for BOS tests. Manual cleanup afterwards is required")
return b, func() {}, nil
}
src := rand.NewSource(time.Now().UnixNano())
tmpBucketName := strings.Replace(fmt.Sprintf("test_%x", src.Int63()), "_", "-", -1)
if len(tmpBucketName) >= 31 | {
tmpBucketName = tmpBucketName[:31]
} | conditional_block |
|
bos.go | conf.SecretKey == "" {
return errors.New("insufficient BOS configuration information")
}
return nil
}
// parseConfig unmarshal a buffer into a Config with default HTTPConfig values.
func parseConfig(conf []byte) (Config, error) {
config := Config{}
if err := yaml.Unmarshal(conf, &config); err != nil {
return Config{}, err
}
return config, nil
}
// NewBucket new bos bucket.
func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) {
if logger == nil {
logger = log.NewNopLogger()
}
config, err := parseConfig(conf)
if err != nil {
return nil, errors.Wrap(err, "parsing BOS configuration")
}
return NewBucketWithConfig(logger, config, component)
}
// NewBucketWithConfig returns a new Bucket using the provided bos config struct.
func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) {
if err := config.validate(); err != nil {
return nil, errors.Wrap(err, "validating BOS configuration")
}
client, err := bos.NewClient(config.AccessKey, config.SecretKey, config.Endpoint)
if err != nil {
return nil, errors.Wrap(err, "creating BOS client")
}
client.Config.UserAgent = fmt.Sprintf("thanos-%s", component)
bkt := &Bucket{
logger: logger,
client: client,
name: config.Bucket,
}
return bkt, nil
}
// Name returns the bucket name for the provider.
func (b *Bucket) Name() string {
return b.name
}
// Delete removes the object with the given name.
func (b *Bucket) Delete(_ context.Context, name string) error {
return b.client.DeleteObject(b.name, name)
}
// Upload the contents of the reader as an object into the bucket.
func (b *Bucket) Upload(_ context.Context, name string, r io.Reader) error {
size, err := objstore.TryToGetSize(r)
if err != nil {
return errors.Wrapf(err, "getting size of %s", name)
}
partNums, lastSlice := int(math.Floor(float64(size)/partSize)), size%partSize
if partNums == 0 {
body, err := bce.NewBodyFromSizedReader(r, lastSlice)
if err != nil {
return errors.Wrapf(err, "failed to create SizedReader for %s", name)
}
if _, err := b.client.PutObject(b.name, name, body, nil); err != nil {
return errors.Wrapf(err, "failed to upload %s", name)
}
return nil
}
result, err := b.client.BasicInitiateMultipartUpload(b.name, name)
if err != nil {
return errors.Wrapf(err, "failed to initiate MultipartUpload for %s", name)
}
uploadEveryPart := func(partSize int64, part int, uploadId string) (string, error) {
body, err := bce.NewBodyFromSizedReader(r, partSize)
if err != nil {
return "", err
}
etag, err := b.client.UploadPart(b.name, name, uploadId, part, body, nil)
if err != nil {
if err := b.client.AbortMultipartUpload(b.name, name, uploadId); err != nil {
return etag, err
}
return etag, err
}
return etag, nil
}
var parts []api.UploadInfoType
for part := 1; part <= partNums; part++ {
etag, err := uploadEveryPart(partSize, part, result.UploadId)
if err != nil {
return errors.Wrapf(err, "failed to upload part %d for %s", part, name)
}
parts = append(parts, api.UploadInfoType{PartNumber: part, ETag: etag})
}
if lastSlice != 0 {
etag, err := uploadEveryPart(lastSlice, partNums+1, result.UploadId)
if err != nil {
return errors.Wrapf(err, "failed to upload the last part for %s", name)
}
parts = append(parts, api.UploadInfoType{PartNumber: partNums + 1, ETag: etag})
}
if _, err := b.client.CompleteMultipartUploadFromStruct(b.name, name, result.UploadId, &api.CompleteMultipartUploadArgs{Parts: parts}); err != nil {
return errors.Wrapf(err, "failed to set %s upload completed", name)
}
return nil
}
// Iter calls f for each entry in the given directory (not recursive). The argument to f is the full
// object name including the prefix of the inspected directory.
func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt ...objstore.IterOption) error {
if dir != "" {
dir = strings.TrimSuffix(dir, objstore.DirDelim) + objstore.DirDelim
}
delimiter := objstore.DirDelim
if objstore.ApplyIterOptions(opt...).Recursive {
delimiter = ""
}
var marker string
for {
if err := ctx.Err(); err != nil {
return err
}
objects, err := b.client.ListObjects(b.name, &api.ListObjectsArgs{
Delimiter: delimiter,
Marker: marker,
MaxKeys: 1000,
Prefix: dir,
})
if err != nil {
return err
}
marker = objects.NextMarker
for _, object := range objects.Contents {
if err := f(object.Key); err != nil {
return err
}
}
for _, object := range objects.CommonPrefixes {
if err := f(object.Prefix); err != nil {
return err
}
}
if !objects.IsTruncated {
break
}
}
return nil
}
// Get returns a reader for the given object name.
func (b *Bucket) | (ctx context.Context, name string) (io.ReadCloser, error) {
return b.getRange(ctx, b.name, name, 0, -1)
}
// GetRange returns a new range reader for the given object name and range.
func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
return b.getRange(ctx, b.name, name, off, length)
}
// Exists checks if the given object exists in the bucket.
func (b *Bucket) Exists(_ context.Context, name string) (bool, error) {
_, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
if b.IsObjNotFoundErr(err) {
return false, nil
}
return false, errors.Wrapf(err, "getting object metadata of %s", name)
}
return true, nil
}
func (b *Bucket) Close() error {
return nil
}
// ObjectSize returns the size of the specified object.
func (b *Bucket) ObjectSize(_ context.Context, name string) (uint64, error) {
objMeta, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
return 0, err
}
return uint64(objMeta.ContentLength), nil
}
// Attributes returns information about the specified object.
func (b *Bucket) Attributes(_ context.Context, name string) (objstore.ObjectAttributes, error) {
objMeta, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
return objstore.ObjectAttributes{}, errors.Wrapf(err, "gettting objectmeta of %s", name)
}
lastModified, err := time.Parse(time.RFC1123, objMeta.LastModified)
if err != nil {
return objstore.ObjectAttributes{}, err
}
return objstore.ObjectAttributes{
Size: objMeta.ContentLength,
LastModified: lastModified,
}, nil
}
// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations.
func (b *Bucket) IsObjNotFoundErr(err error) bool {
switch bosErr := errors.Cause(err).(type) {
case *bce.BceServiceError:
if bosErr.StatusCode == http.StatusNotFound || bosErr.Code == "NoSuchKey" {
return true
}
}
return false
}
func (b *Bucket) getRange(_ context.Context, bucketName, objectKey string, off, length int64) (io.ReadCloser, error) {
if len(objectKey) == 0 {
return nil, errors.Errorf("given object name should not empty")
}
ranges := []int64{off}
if length != -1 {
ranges = append(ranges, off+length-1)
}
obj, err := b.client.GetObject(bucketName, objectKey, map[string]string{}, ranges...)
if err != nil {
return nil, err
}
return obj.Body, nil
}
func configFromEnv() Config {
c := Config{
Bucket: os.Getenv("BOS_BUCKET"),
Endpoint: os.Getenv("BOS_ENDPOINT"),
AccessKey: os.Getenv("BOS_ACCESS_KEY"),
SecretKey: os.Getenv("BOS_SECRET_KEY"),
}
return c
}
// NewTestBucket creates test bkt client that before returning | Get | identifier_name |
bos.go | ||
conf.SecretKey == "" {
return errors.New("insufficient BOS configuration information")
}
return nil
}
// parseConfig unmarshal a buffer into a Config with default HTTPConfig values.
func parseConfig(conf []byte) (Config, error) {
config := Config{}
if err := yaml.Unmarshal(conf, &config); err != nil {
return Config{}, err
}
return config, nil
}
// NewBucket new bos bucket.
func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) {
if logger == nil {
logger = log.NewNopLogger()
}
config, err := parseConfig(conf)
if err != nil {
return nil, errors.Wrap(err, "parsing BOS configuration")
}
return NewBucketWithConfig(logger, config, component)
}
// NewBucketWithConfig returns a new Bucket using the provided bos config struct.
func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) {
if err := config.validate(); err != nil {
return nil, errors.Wrap(err, "validating BOS configuration")
}
client, err := bos.NewClient(config.AccessKey, config.SecretKey, config.Endpoint)
if err != nil {
return nil, errors.Wrap(err, "creating BOS client")
}
client.Config.UserAgent = fmt.Sprintf("thanos-%s", component)
bkt := &Bucket{
logger: logger,
client: client,
name: config.Bucket,
}
return bkt, nil
}
// Name returns the bucket name for the provider.
func (b *Bucket) Name() string {
return b.name
}
// Delete removes the object with the given name.
func (b *Bucket) Delete(_ context.Context, name string) error {
return b.client.DeleteObject(b.name, name)
}
// Upload the contents of the reader as an object into the bucket.
func (b *Bucket) Upload(_ context.Context, name string, r io.Reader) error {
size, err := objstore.TryToGetSize(r)
if err != nil {
return errors.Wrapf(err, "getting size of %s", name)
}
partNums, lastSlice := int(math.Floor(float64(size)/partSize)), size%partSize
if partNums == 0 {
body, err := bce.NewBodyFromSizedReader(r, lastSlice)
if err != nil {
return errors.Wrapf(err, "failed to create SizedReader for %s", name)
}
if _, err := b.client.PutObject(b.name, name, body, nil); err != nil {
return errors.Wrapf(err, "failed to upload %s", name)
}
return nil
}
result, err := b.client.BasicInitiateMultipartUpload(b.name, name)
if err != nil {
return errors.Wrapf(err, "failed to initiate MultipartUpload for %s", name)
}
uploadEveryPart := func(partSize int64, part int, uploadId string) (string, error) {
body, err := bce.NewBodyFromSizedReader(r, partSize)
if err != nil {
return "", err
}
etag, err := b.client.UploadPart(b.name, name, uploadId, part, body, nil)
if err != nil {
if err := b.client.AbortMultipartUpload(b.name, name, uploadId); err != nil {
return etag, err
}
return etag, err
}
return etag, nil
}
var parts []api.UploadInfoType
for part := 1; part <= partNums; part++ {
etag, err := uploadEveryPart(partSize, part, result.UploadId)
if err != nil {
return errors.Wrapf(err, "failed to upload part %d for %s", part, name)
}
parts = append(parts, api.UploadInfoType{PartNumber: part, ETag: etag})
}
if lastSlice != 0 {
etag, err := uploadEveryPart(lastSlice, partNums+1, result.UploadId)
if err != nil {
return errors.Wrapf(err, "failed to upload the last part for %s", name)
}
parts = append(parts, api.UploadInfoType{PartNumber: partNums + 1, ETag: etag})
}
if _, err := b.client.CompleteMultipartUploadFromStruct(b.name, name, result.UploadId, &api.CompleteMultipartUploadArgs{Parts: parts}); err != nil {
return errors.Wrapf(err, "failed to set %s upload completed", name)
}
return nil
}
// Iter calls f for each entry in the given directory (not recursive). The argument to f is the full
// object name including the prefix of the inspected directory.
func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt ...objstore.IterOption) error {
if dir != "" {
dir = strings.TrimSuffix(dir, objstore.DirDelim) + objstore.DirDelim
}
delimiter := objstore.DirDelim
if objstore.ApplyIterOptions(opt...).Recursive {
delimiter = ""
}
var marker string
for {
if err := ctx.Err(); err != nil {
return err
}
objects, err := b.client.ListObjects(b.name, &api.ListObjectsArgs{
Delimiter: delimiter,
Marker: marker,
MaxKeys: 1000,
Prefix: dir,
})
if err != nil {
return err
}
marker = objects.NextMarker
for _, object := range objects.Contents {
if err := f(object.Key); err != nil {
return err
}
}
for _, object := range objects.CommonPrefixes {
if err := f(object.Prefix); err != nil {
return err
}
}
if !objects.IsTruncated {
break
}
}
return nil
}
// Get returns a reader for the given object name.
func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) {
return b.getRange(ctx, b.name, name, 0, -1)
}
// GetRange returns a new range reader for the given object name and range.
func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
return b.getRange(ctx, b.name, name, off, length)
}
| _, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
if b.IsObjNotFoundErr(err) {
return false, nil
}
return false, errors.Wrapf(err, "getting object metadata of %s", name)
}
return true, nil
}
func (b *Bucket) Close() error {
return nil
}
// ObjectSize returns the size of the specified object.
func (b *Bucket) ObjectSize(_ context.Context, name string) (uint64, error) {
objMeta, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
return 0, err
}
return uint64(objMeta.ContentLength), nil
}
// Attributes returns information about the specified object.
func (b *Bucket) Attributes(_ context.Context, name string) (objstore.ObjectAttributes, error) {
objMeta, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
return objstore.ObjectAttributes{}, errors.Wrapf(err, "gettting objectmeta of %s", name)
}
lastModified, err := time.Parse(time.RFC1123, objMeta.LastModified)
if err != nil {
return objstore.ObjectAttributes{}, err
}
return objstore.ObjectAttributes{
Size: objMeta.ContentLength,
LastModified: lastModified,
}, nil
}
// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations.
func (b *Bucket) IsObjNotFoundErr(err error) bool {
switch bosErr := errors.Cause(err).(type) {
case *bce.BceServiceError:
if bosErr.StatusCode == http.StatusNotFound || bosErr.Code == "NoSuchKey" {
return true
}
}
return false
}
func (b *Bucket) getRange(_ context.Context, bucketName, objectKey string, off, length int64) (io.ReadCloser, error) {
if len(objectKey) == 0 {
return nil, errors.Errorf("given object name should not empty")
}
ranges := []int64{off}
if length != -1 {
ranges = append(ranges, off+length-1)
}
obj, err := b.client.GetObject(bucketName, objectKey, map[string]string{}, ranges...)
if err != nil {
return nil, err
}
return obj.Body, nil
}
func configFromEnv() Config {
c := Config{
Bucket: os.Getenv("BOS_BUCKET"),
Endpoint: os.Getenv("BOS_ENDPOINT"),
AccessKey: os.Getenv("BOS_ACCESS_KEY"),
SecretKey: os.Getenv("BOS_SECRET_KEY"),
}
return c
}
// NewTestBucket creates test bkt client that before returning | // Exists checks if the given object exists in the bucket.
func (b *Bucket) Exists(_ context.Context, name string) (bool, error) { | random_line_split |
bos.go | conf.SecretKey == "" {
return errors.New("insufficient BOS configuration information")
}
return nil
}
// parseConfig unmarshal a buffer into a Config with default HTTPConfig values.
func parseConfig(conf []byte) (Config, error) {
config := Config{}
if err := yaml.Unmarshal(conf, &config); err != nil {
return Config{}, err
}
return config, nil
}
// NewBucket new bos bucket.
func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) {
if logger == nil {
logger = log.NewNopLogger()
}
config, err := parseConfig(conf)
if err != nil {
return nil, errors.Wrap(err, "parsing BOS configuration")
}
return NewBucketWithConfig(logger, config, component)
}
// NewBucketWithConfig returns a new Bucket using the provided bos config struct.
func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) {
if err := config.validate(); err != nil {
return nil, errors.Wrap(err, "validating BOS configuration")
}
client, err := bos.NewClient(config.AccessKey, config.SecretKey, config.Endpoint)
if err != nil {
return nil, errors.Wrap(err, "creating BOS client")
}
client.Config.UserAgent = fmt.Sprintf("thanos-%s", component)
bkt := &Bucket{
logger: logger,
client: client,
name: config.Bucket,
}
return bkt, nil
}
// Name returns the bucket name for the provider.
func (b *Bucket) Name() string |
// Delete removes the object with the given name.
func (b *Bucket) Delete(_ context.Context, name string) error {
return b.client.DeleteObject(b.name, name)
}
// Upload the contents of the reader as an object into the bucket.
func (b *Bucket) Upload(_ context.Context, name string, r io.Reader) error {
size, err := objstore.TryToGetSize(r)
if err != nil {
return errors.Wrapf(err, "getting size of %s", name)
}
partNums, lastSlice := int(math.Floor(float64(size)/partSize)), size%partSize
if partNums == 0 {
body, err := bce.NewBodyFromSizedReader(r, lastSlice)
if err != nil {
return errors.Wrapf(err, "failed to create SizedReader for %s", name)
}
if _, err := b.client.PutObject(b.name, name, body, nil); err != nil {
return errors.Wrapf(err, "failed to upload %s", name)
}
return nil
}
result, err := b.client.BasicInitiateMultipartUpload(b.name, name)
if err != nil {
return errors.Wrapf(err, "failed to initiate MultipartUpload for %s", name)
}
uploadEveryPart := func(partSize int64, part int, uploadId string) (string, error) {
body, err := bce.NewBodyFromSizedReader(r, partSize)
if err != nil {
return "", err
}
etag, err := b.client.UploadPart(b.name, name, uploadId, part, body, nil)
if err != nil {
if err := b.client.AbortMultipartUpload(b.name, name, uploadId); err != nil {
return etag, err
}
return etag, err
}
return etag, nil
}
var parts []api.UploadInfoType
for part := 1; part <= partNums; part++ {
etag, err := uploadEveryPart(partSize, part, result.UploadId)
if err != nil {
return errors.Wrapf(err, "failed to upload part %d for %s", part, name)
}
parts = append(parts, api.UploadInfoType{PartNumber: part, ETag: etag})
}
if lastSlice != 0 {
etag, err := uploadEveryPart(lastSlice, partNums+1, result.UploadId)
if err != nil {
return errors.Wrapf(err, "failed to upload the last part for %s", name)
}
parts = append(parts, api.UploadInfoType{PartNumber: partNums + 1, ETag: etag})
}
if _, err := b.client.CompleteMultipartUploadFromStruct(b.name, name, result.UploadId, &api.CompleteMultipartUploadArgs{Parts: parts}); err != nil {
return errors.Wrapf(err, "failed to set %s upload completed", name)
}
return nil
}
// Iter calls f for each entry in the given directory (not recursive). The argument to f is the full
// object name including the prefix of the inspected directory.
func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt ...objstore.IterOption) error {
if dir != "" {
dir = strings.TrimSuffix(dir, objstore.DirDelim) + objstore.DirDelim
}
delimiter := objstore.DirDelim
if objstore.ApplyIterOptions(opt...).Recursive {
delimiter = ""
}
var marker string
for {
if err := ctx.Err(); err != nil {
return err
}
objects, err := b.client.ListObjects(b.name, &api.ListObjectsArgs{
Delimiter: delimiter,
Marker: marker,
MaxKeys: 1000,
Prefix: dir,
})
if err != nil {
return err
}
marker = objects.NextMarker
for _, object := range objects.Contents {
if err := f(object.Key); err != nil {
return err
}
}
for _, object := range objects.CommonPrefixes {
if err := f(object.Prefix); err != nil {
return err
}
}
if !objects.IsTruncated {
break
}
}
return nil
}
// Get returns a reader for the given object name.
func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) {
return b.getRange(ctx, b.name, name, 0, -1)
}
// GetRange returns a new range reader for the given object name and range.
func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
return b.getRange(ctx, b.name, name, off, length)
}
// Exists checks if the given object exists in the bucket.
func (b *Bucket) Exists(_ context.Context, name string) (bool, error) {
_, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
if b.IsObjNotFoundErr(err) {
return false, nil
}
return false, errors.Wrapf(err, "getting object metadata of %s", name)
}
return true, nil
}
func (b *Bucket) Close() error {
return nil
}
// ObjectSize returns the size of the specified object.
func (b *Bucket) ObjectSize(_ context.Context, name string) (uint64, error) {
objMeta, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
return 0, err
}
return uint64(objMeta.ContentLength), nil
}
// Attributes returns information about the specified object.
func (b *Bucket) Attributes(_ context.Context, name string) (objstore.ObjectAttributes, error) {
objMeta, err := b.client.GetObjectMeta(b.name, name)
if err != nil {
return objstore.ObjectAttributes{}, errors.Wrapf(err, "gettting objectmeta of %s", name)
}
lastModified, err := time.Parse(time.RFC1123, objMeta.LastModified)
if err != nil {
return objstore.ObjectAttributes{}, err
}
return objstore.ObjectAttributes{
Size: objMeta.ContentLength,
LastModified: lastModified,
}, nil
}
// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations.
func (b *Bucket) IsObjNotFoundErr(err error) bool {
switch bosErr := errors.Cause(err).(type) {
case *bce.BceServiceError:
if bosErr.StatusCode == http.StatusNotFound || bosErr.Code == "NoSuchKey" {
return true
}
}
return false
}
func (b *Bucket) getRange(_ context.Context, bucketName, objectKey string, off, length int64) (io.ReadCloser, error) {
if len(objectKey) == 0 {
return nil, errors.Errorf("given object name should not empty")
}
ranges := []int64{off}
if length != -1 {
ranges = append(ranges, off+length-1)
}
obj, err := b.client.GetObject(bucketName, objectKey, map[string]string{}, ranges...)
if err != nil {
return nil, err
}
return obj.Body, nil
}
func configFromEnv() Config {
c := Config{
Bucket: os.Getenv("BOS_BUCKET"),
Endpoint: os.Getenv("BOS_ENDPOINT"),
AccessKey: os.Getenv("BOS_ACCESS_KEY"),
SecretKey: os.Getenv("BOS_SECRET_KEY"),
}
return c
}
// NewTestBucket creates test bkt client that before | {
return b.name
} | identifier_body |
remote.py | _is_error(ex, 429):
#import pdb; pdb.set_trace()
raise Gmail.UserRateException(ex)
elif ex_is_error(ex, 500):
raise Gmail.GenericException(ex)
else:
raise Gmail.BatchException(ex)
responses.append(resp)
http = creds.authorize(Http(timeout=30))
service = build('gmail', 'v1', http=http)
batch = service.new_batch_http_request()
responses = []
for gid, cmd in cmds:
batch.add(cmd, callback=lambda a, b, c: handler(a, b, c,
responses),
request_id=gid)
batch.execute(http=http)
return responses
@staticmethod
def worker(my_idx, inq, outq):
"""Entry point for new executor threads.
Downloading (or importing) metadata is limited by the round-trip time to
Gmail if we only use one thread. This wrapper function makes it
possible to start multiple threads (currently limited to two because
that is how many concurrent requests from the same user Gmail alows) to
reduce the import time.
Commands come in via a thread-safe queue (inq) and response data is
written to another thread-safe queue (outq). This function does not
interpret the data in either queue. It merly acts as a dumb pipeline
between the two endpoints.
:param inq queue.Queue: Inress queue. Commands received on this
queue are sent to a batch_executor.
:param outq queue.Queue: Egress queue. Data returned by the batch
executor is written to the queue for consumption by the
initiator.
"""
print("worker %d: starting" % my_idx)
backoff = .001
while True:
cmd = inq.get()
if cmd is None:
break
ridx, creds, cmds = cmd
backoff = max(backoff / 2, 0.001)
while True:
try:
responses = Gmail.batch_executor(creds, cmds)
except Gmail.UserRateException:
print(f'worker {my_idx}: backoff {backoff} sec')
sleep(backoff)
backoff = min(backoff * 2, 1.0)
except Exception as ex:
outq.put([ridx, ex])
break
else:
outq.put([ridx, responses])
break
inq.task_done()
print("worker %d stoping" % my_idx)
def __init__(self, **kwargs):
"""Initialize a new object using the options passed in."""
self.opts = self.options.push(kwargs)
data_dir = os.path.normpath(os.path.join(os.path.dirname(__file__),
'../data'))
if self.opts.credentials_path is None:
self.opts.set(credentials_path='%s-creds.json' % self.opts.email)
if os.path.relpath(self.opts.client_secret_file):
self.opts.set(client_secret_file=os.path.join(data_dir,
self.opts.client_secret_file))
if os.path.relpath(self.opts.credentials_path):
self.opts.set(credentials_path=os.path.join(data_dir,
self.opts.credentials_path))
self.creds = None
self.service = None
self.threads = []
if self.opts.num_workers >= 1:
self.outq = queue.Queue(maxsize=self.opts.num_workers + 1)
self.inq = queue.Queue(maxsize=self.opts.num_workers + 1)
for idx in range(self.opts.num_workers):
werker = lambda: self.worker(idx, self.outq, self.inq)
# It's OK for these threads to not free up resources on exit
# since they don't store permanent state.
# FIXME: should I even keep a pointer to the tread?
self.threads.append(threading.Thread(daemon=True,
target=werker))
self.threads[idx].start()
@property
def poll_interval(self):
|
@property
def scopes(self):
"""Scopes used for authorization."""
return [scope.rsplit('/', 1)[1] for scope in self.opts.scopes]
@property
def writable(self):
"""Whether the account was authorized as read-only or not."""
return 'gmail.modify' in self.scopes
@property
def can_send(self):
"""Whether the scopes list includes the ability to send mail."""
return ('gmail.compose' in self.scopes or
'gmail.send' in self.scopes)
def get_credentials(self):
"Read, or create one if it does not exist, the credentials file."
store = file.Storage(self.opts.credentials_path)
creds = store.get()
if not creds or creds.invalid:
# Clear out argv so argparse in run_flow() is happy.
argv = sys.argv
sys.argv = []
flow = client.flow_from_clientsecrets(self.opts.client_secret_file,
self.opts.scopes)
creds = tools.run_flow(flow, store)
sys.argv = argv
return creds
def reachable(self):
"""Whether the Gmail endpoint is reachable."""
service = build('gmail', 'v1', http=Http(timeout=1.0))
url = urlparse.urlparse(service._baseUrl)
host = url.hostname
port = url.port
try:
socket.getaddrinfo(host, port, proto=socket.IPPROTO_TCP)
except (socket.herror, socket.gaierror, URLError, OSError):
return False
return True
def authorize(self):
"Authorize the service to access the user's mailbox."
if not self.service:
self.creds = self.get_credentials()
http = self.creds.authorize(Http(timeout=10.0))
self.service = build('gmail', 'v1', http=http)
assert self.service is not None
def authorized(func):
"Ensure service is authorized to access the user's mailbox."
def func_wrap (self, *args, **kwargs):
if self.service is None:
self.authorize()
return func(self, *args, **kwargs)
return func_wrap
@authorized
def get_profile(self):
"Return the user's profile."
# Call the Gmail API
results = self.service.users().getProfile(userId='me').execute()
return results
@authorized
def get_labels(self):
"Return a list of labels."
# Call the Gmail API
results = self.service.users().labels().list(userId='me').execute()
return results.get('labels', [])
@authorized
def get_history_id(self, start=1):
"Get the current history id of the mailbox."
try:
hist = self.service.users().history()
results = hist.list(userId='me', startHistoryId=start).execute()
if 'historyId' in results:
return int(results['historyId'])
else:
raise Gmail.GenericException("no historyId field returned")
except googleapiclient.errors.HttpError:
# this happens if the original historyId is too old,
# try to get last message and the historyId from it.
for mset in self.list_messages(1):
(_, mset) = mset
msg = self.get_message(mset[0]['id'])
return int(msg['historyId'])
@authorized
def get_history_since(self, start=0):
"""Get a list of changes since the given start point (a history id)."""
hist = self.service.users().history()
try:
results = hist.list(userId='me', startHistoryId=start).execute()
if 'history' in results:
yield results['history']
while 'nextPageToken' in results:
results = hist.list(userId='me',
pageToken=results['nextPageToken'],
startHistoryId=start).execute()
if 'history' in results:
yield results['history']
except googleapiclient.errors.HttpError as ex:
if ex.resp.status == 404:
raise Gmail.NoHistoryException
elif ex.resp.status == 403:
raise Gmail.UserRateException(ex)
else:
raise Gmail.GenericException(ex)
@authorized
def list_messages(self, limit=1, query=None):
"Returns a list of messages (max = limit)."
total = 0
token = None
results = []
if query is None:
query = self.opts.query
while True:
results = self.service.users().messages().list(userId='me',
pageToken=token,
q=query,
maxResults=limit,
includeSpamTrash=True).\
execute()
if 'messages' in results:
total += results['resultSizeEstimate']
yield results['resultSizeEstimate'], results['messages']
if 'nextPageToken' in results:
token = results['nextPageToken']
else:
break
if limit is not None and total >= limit:
break
@authorized
def get_message(self, id, format='minimal'):
"""Get the message in the given format."""
try:
return self.service.users().messages().get(userId='me',
id=id,
format=format).\
execute()
except googleapiclient.errors.HttpError as ex:
if ex.resp.status == 403 or ex.resp.status == 500:
return self.get_message(id, format)
else:
raise ex
@authorized
| """How often to poll for new messages / updates."""
return self.opts.poll_interval | identifier_body |
remote.py | _is_error(ex, 429):
#import pdb; pdb.set_trace()
raise Gmail.UserRateException(ex)
elif ex_is_error(ex, 500):
raise Gmail.GenericException(ex)
else:
raise Gmail.BatchException(ex)
responses.append(resp)
http = creds.authorize(Http(timeout=30))
service = build('gmail', 'v1', http=http)
batch = service.new_batch_http_request()
responses = []
for gid, cmd in cmds:
batch.add(cmd, callback=lambda a, b, c: handler(a, b, c,
responses),
request_id=gid)
batch.execute(http=http)
return responses
@staticmethod
def worker(my_idx, inq, outq):
"""Entry point for new executor threads.
Downloading (or importing) metadata is limited by the round-trip time to
Gmail if we only use one thread. This wrapper function makes it
possible to start multiple threads (currently limited to two because
that is how many concurrent requests from the same user Gmail alows) to
reduce the import time.
Commands come in via a thread-safe queue (inq) and response data is
written to another thread-safe queue (outq). This function does not
interpret the data in either queue. It merly acts as a dumb pipeline
between the two endpoints.
:param inq queue.Queue: Inress queue. Commands received on this
queue are sent to a batch_executor.
:param outq queue.Queue: Egress queue. Data returned by the batch
executor is written to the queue for consumption by the
initiator.
"""
print("worker %d: starting" % my_idx)
backoff = .001
while True:
cmd = inq.get()
if cmd is None:
break
ridx, creds, cmds = cmd
backoff = max(backoff / 2, 0.001)
while True:
try:
responses = Gmail.batch_executor(creds, cmds)
except Gmail.UserRateException:
print(f'worker {my_idx}: backoff {backoff} sec')
sleep(backoff)
backoff = min(backoff * 2, 1.0)
except Exception as ex: | else:
outq.put([ridx, responses])
break
inq.task_done()
print("worker %d stoping" % my_idx)
def __init__(self, **kwargs):
"""Initialize a new object using the options passed in."""
self.opts = self.options.push(kwargs)
data_dir = os.path.normpath(os.path.join(os.path.dirname(__file__),
'../data'))
if self.opts.credentials_path is None:
self.opts.set(credentials_path='%s-creds.json' % self.opts.email)
if os.path.relpath(self.opts.client_secret_file):
self.opts.set(client_secret_file=os.path.join(data_dir,
self.opts.client_secret_file))
if os.path.relpath(self.opts.credentials_path):
self.opts.set(credentials_path=os.path.join(data_dir,
self.opts.credentials_path))
self.creds = None
self.service = None
self.threads = []
if self.opts.num_workers >= 1:
self.outq = queue.Queue(maxsize=self.opts.num_workers + 1)
self.inq = queue.Queue(maxsize=self.opts.num_workers + 1)
for idx in range(self.opts.num_workers):
werker = lambda: self.worker(idx, self.outq, self.inq)
# It's OK for these threads to not free up resources on exit
# since they don't store permanent state.
# FIXME: should I even keep a pointer to the tread?
self.threads.append(threading.Thread(daemon=True,
target=werker))
self.threads[idx].start()
@property
def poll_interval(self):
"""How often to poll for new messages / updates."""
return self.opts.poll_interval
@property
def scopes(self):
"""Scopes used for authorization."""
return [scope.rsplit('/', 1)[1] for scope in self.opts.scopes]
@property
def writable(self):
"""Whether the account was authorized as read-only or not."""
return 'gmail.modify' in self.scopes
@property
def can_send(self):
"""Whether the scopes list includes the ability to send mail."""
return ('gmail.compose' in self.scopes or
'gmail.send' in self.scopes)
def get_credentials(self):
"Read, or create one if it does not exist, the credentials file."
store = file.Storage(self.opts.credentials_path)
creds = store.get()
if not creds or creds.invalid:
# Clear out argv so argparse in run_flow() is happy.
argv = sys.argv
sys.argv = []
flow = client.flow_from_clientsecrets(self.opts.client_secret_file,
self.opts.scopes)
creds = tools.run_flow(flow, store)
sys.argv = argv
return creds
def reachable(self):
"""Whether the Gmail endpoint is reachable."""
service = build('gmail', 'v1', http=Http(timeout=1.0))
url = urlparse.urlparse(service._baseUrl)
host = url.hostname
port = url.port
try:
socket.getaddrinfo(host, port, proto=socket.IPPROTO_TCP)
except (socket.herror, socket.gaierror, URLError, OSError):
return False
return True
def authorize(self):
"Authorize the service to access the user's mailbox."
if not self.service:
self.creds = self.get_credentials()
http = self.creds.authorize(Http(timeout=10.0))
self.service = build('gmail', 'v1', http=http)
assert self.service is not None
def authorized(func):
"Ensure service is authorized to access the user's mailbox."
def func_wrap (self, *args, **kwargs):
if self.service is None:
self.authorize()
return func(self, *args, **kwargs)
return func_wrap
@authorized
def get_profile(self):
"Return the user's profile."
# Call the Gmail API
results = self.service.users().getProfile(userId='me').execute()
return results
@authorized
def get_labels(self):
"Return a list of labels."
# Call the Gmail API
results = self.service.users().labels().list(userId='me').execute()
return results.get('labels', [])
@authorized
def get_history_id(self, start=1):
"Get the current history id of the mailbox."
try:
hist = self.service.users().history()
results = hist.list(userId='me', startHistoryId=start).execute()
if 'historyId' in results:
return int(results['historyId'])
else:
raise Gmail.GenericException("no historyId field returned")
except googleapiclient.errors.HttpError:
# this happens if the original historyId is too old,
# try to get last message and the historyId from it.
for mset in self.list_messages(1):
(_, mset) = mset
msg = self.get_message(mset[0]['id'])
return int(msg['historyId'])
@authorized
def get_history_since(self, start=0):
"""Get a list of changes since the given start point (a history id)."""
hist = self.service.users().history()
try:
results = hist.list(userId='me', startHistoryId=start).execute()
if 'history' in results:
yield results['history']
while 'nextPageToken' in results:
results = hist.list(userId='me',
pageToken=results['nextPageToken'],
startHistoryId=start).execute()
if 'history' in results:
yield results['history']
except googleapiclient.errors.HttpError as ex:
if ex.resp.status == 404:
raise Gmail.NoHistoryException
elif ex.resp.status == 403:
raise Gmail.UserRateException(ex)
else:
raise Gmail.GenericException(ex)
@authorized
def list_messages(self, limit=1, query=None):
"Returns a list of messages (max = limit)."
total = 0
token = None
results = []
if query is None:
query = self.opts.query
while True:
results = self.service.users().messages().list(userId='me',
pageToken=token,
q=query,
maxResults=limit,
includeSpamTrash=True).\
execute()
if 'messages' in results:
total += results['resultSizeEstimate']
yield results['resultSizeEstimate'], results['messages']
if 'nextPageToken' in results:
token = results['nextPageToken']
else:
break
if limit is not None and total >= limit:
break
@authorized
def get_message(self, id, format='minimal'):
"""Get the message in the given format."""
try:
return self.service.users().messages().get(userId='me',
id=id,
format=format).\
execute()
except googleapiclient.errors.HttpError as ex:
if ex.resp.status == 403 or ex.resp.status == 500:
return self.get_message(id, format)
else:
raise ex
@authorized
def | outq.put([ridx, ex])
break | random_line_split |
remote.py | _is_error(ex, 429):
#import pdb; pdb.set_trace()
raise Gmail.UserRateException(ex)
elif ex_is_error(ex, 500):
raise Gmail.GenericException(ex)
else:
raise Gmail.BatchException(ex)
responses.append(resp)
http = creds.authorize(Http(timeout=30))
service = build('gmail', 'v1', http=http)
batch = service.new_batch_http_request()
responses = []
for gid, cmd in cmds:
|
batch.execute(http=http)
return responses
@staticmethod
def worker(my_idx, inq, outq):
"""Entry point for new executor threads.
Downloading (or importing) metadata is limited by the round-trip time to
Gmail if we only use one thread. This wrapper function makes it
possible to start multiple threads (currently limited to two because
that is how many concurrent requests from the same user Gmail alows) to
reduce the import time.
Commands come in via a thread-safe queue (inq) and response data is
written to another thread-safe queue (outq). This function does not
interpret the data in either queue. It merly acts as a dumb pipeline
between the two endpoints.
:param inq queue.Queue: Inress queue. Commands received on this
queue are sent to a batch_executor.
:param outq queue.Queue: Egress queue. Data returned by the batch
executor is written to the queue for consumption by the
initiator.
"""
print("worker %d: starting" % my_idx)
backoff = .001
while True:
cmd = inq.get()
if cmd is None:
break
ridx, creds, cmds = cmd
backoff = max(backoff / 2, 0.001)
while True:
try:
responses = Gmail.batch_executor(creds, cmds)
except Gmail.UserRateException:
print(f'worker {my_idx}: backoff {backoff} sec')
sleep(backoff)
backoff = min(backoff * 2, 1.0)
except Exception as ex:
outq.put([ridx, ex])
break
else:
outq.put([ridx, responses])
break
inq.task_done()
print("worker %d stoping" % my_idx)
def __init__(self, **kwargs):
"""Initialize a new object using the options passed in."""
self.opts = self.options.push(kwargs)
data_dir = os.path.normpath(os.path.join(os.path.dirname(__file__),
'../data'))
if self.opts.credentials_path is None:
self.opts.set(credentials_path='%s-creds.json' % self.opts.email)
if os.path.relpath(self.opts.client_secret_file):
self.opts.set(client_secret_file=os.path.join(data_dir,
self.opts.client_secret_file))
if os.path.relpath(self.opts.credentials_path):
self.opts.set(credentials_path=os.path.join(data_dir,
self.opts.credentials_path))
self.creds = None
self.service = None
self.threads = []
if self.opts.num_workers >= 1:
self.outq = queue.Queue(maxsize=self.opts.num_workers + 1)
self.inq = queue.Queue(maxsize=self.opts.num_workers + 1)
for idx in range(self.opts.num_workers):
werker = lambda: self.worker(idx, self.outq, self.inq)
# It's OK for these threads to not free up resources on exit
# since they don't store permanent state.
# FIXME: should I even keep a pointer to the tread?
self.threads.append(threading.Thread(daemon=True,
target=werker))
self.threads[idx].start()
@property
def poll_interval(self):
"""How often to poll for new messages / updates."""
return self.opts.poll_interval
@property
def scopes(self):
"""Scopes used for authorization."""
return [scope.rsplit('/', 1)[1] for scope in self.opts.scopes]
@property
def writable(self):
"""Whether the account was authorized as read-only or not."""
return 'gmail.modify' in self.scopes
@property
def can_send(self):
"""Whether the scopes list includes the ability to send mail."""
return ('gmail.compose' in self.scopes or
'gmail.send' in self.scopes)
def get_credentials(self):
"Read, or create one if it does not exist, the credentials file."
store = file.Storage(self.opts.credentials_path)
creds = store.get()
if not creds or creds.invalid:
# Clear out argv so argparse in run_flow() is happy.
argv = sys.argv
sys.argv = []
flow = client.flow_from_clientsecrets(self.opts.client_secret_file,
self.opts.scopes)
creds = tools.run_flow(flow, store)
sys.argv = argv
return creds
def reachable(self):
"""Whether the Gmail endpoint is reachable."""
service = build('gmail', 'v1', http=Http(timeout=1.0))
url = urlparse.urlparse(service._baseUrl)
host = url.hostname
port = url.port
try:
socket.getaddrinfo(host, port, proto=socket.IPPROTO_TCP)
except (socket.herror, socket.gaierror, URLError, OSError):
return False
return True
def authorize(self):
"Authorize the service to access the user's mailbox."
if not self.service:
self.creds = self.get_credentials()
http = self.creds.authorize(Http(timeout=10.0))
self.service = build('gmail', 'v1', http=http)
assert self.service is not None
def authorized(func):
"Ensure service is authorized to access the user's mailbox."
def func_wrap (self, *args, **kwargs):
if self.service is None:
self.authorize()
return func(self, *args, **kwargs)
return func_wrap
@authorized
def get_profile(self):
"Return the user's profile."
# Call the Gmail API
results = self.service.users().getProfile(userId='me').execute()
return results
@authorized
def get_labels(self):
"Return a list of labels."
# Call the Gmail API
results = self.service.users().labels().list(userId='me').execute()
return results.get('labels', [])
@authorized
def get_history_id(self, start=1):
"Get the current history id of the mailbox."
try:
hist = self.service.users().history()
results = hist.list(userId='me', startHistoryId=start).execute()
if 'historyId' in results:
return int(results['historyId'])
else:
raise Gmail.GenericException("no historyId field returned")
except googleapiclient.errors.HttpError:
# this happens if the original historyId is too old,
# try to get last message and the historyId from it.
for mset in self.list_messages(1):
(_, mset) = mset
msg = self.get_message(mset[0]['id'])
return int(msg['historyId'])
@authorized
def get_history_since(self, start=0):
"""Get a list of changes since the given start point (a history id)."""
hist = self.service.users().history()
try:
results = hist.list(userId='me', startHistoryId=start).execute()
if 'history' in results:
yield results['history']
while 'nextPageToken' in results:
results = hist.list(userId='me',
pageToken=results['nextPageToken'],
startHistoryId=start).execute()
if 'history' in results:
yield results['history']
except googleapiclient.errors.HttpError as ex:
if ex.resp.status == 404:
raise Gmail.NoHistoryException
elif ex.resp.status == 403:
raise Gmail.UserRateException(ex)
else:
raise Gmail.GenericException(ex)
@authorized
def list_messages(self, limit=1, query=None):
"Returns a list of messages (max = limit)."
total = 0
token = None
results = []
if query is None:
query = self.opts.query
while True:
results = self.service.users().messages().list(userId='me',
pageToken=token,
q=query,
maxResults=limit,
includeSpamTrash=True).\
execute()
if 'messages' in results:
total += results['resultSizeEstimate']
yield results['resultSizeEstimate'], results['messages']
if 'nextPageToken' in results:
token = results['nextPageToken']
else:
break
if limit is not None and total >= limit:
break
@authorized
def get_message(self, id, format='minimal'):
"""Get the message in the given format."""
try:
return self.service.users().messages().get(userId='me',
id=id,
format=format).\
execute()
except googleapiclient.errors.HttpError as ex:
if ex.resp.status == 403 or ex.resp.status == 500:
return self.get_message(id, format)
else:
raise ex
@authorized
| batch.add(cmd, callback=lambda a, b, c: handler(a, b, c,
responses),
request_id=gid) | conditional_block |
remote.py | _is_error(ex, 429):
#import pdb; pdb.set_trace()
raise Gmail.UserRateException(ex)
elif ex_is_error(ex, 500):
raise Gmail.GenericException(ex)
else:
raise Gmail.BatchException(ex)
responses.append(resp)
http = creds.authorize(Http(timeout=30))
service = build('gmail', 'v1', http=http)
batch = service.new_batch_http_request()
responses = []
for gid, cmd in cmds:
batch.add(cmd, callback=lambda a, b, c: handler(a, b, c,
responses),
request_id=gid)
batch.execute(http=http)
return responses
@staticmethod
def worker(my_idx, inq, outq):
"""Entry point for new executor threads.
Downloading (or importing) metadata is limited by the round-trip time to
Gmail if we only use one thread. This wrapper function makes it
possible to start multiple threads (currently limited to two because
that is how many concurrent requests from the same user Gmail alows) to
reduce the import time.
Commands come in via a thread-safe queue (inq) and response data is
written to another thread-safe queue (outq). This function does not
interpret the data in either queue. It merly acts as a dumb pipeline
between the two endpoints.
:param inq queue.Queue: Inress queue. Commands received on this
queue are sent to a batch_executor.
:param outq queue.Queue: Egress queue. Data returned by the batch
executor is written to the queue for consumption by the
initiator.
"""
print("worker %d: starting" % my_idx)
backoff = .001
while True:
cmd = inq.get()
if cmd is None:
break
ridx, creds, cmds = cmd
backoff = max(backoff / 2, 0.001)
while True:
try:
responses = Gmail.batch_executor(creds, cmds)
except Gmail.UserRateException:
print(f'worker {my_idx}: backoff {backoff} sec')
sleep(backoff)
backoff = min(backoff * 2, 1.0)
except Exception as ex:
outq.put([ridx, ex])
break
else:
outq.put([ridx, responses])
break
inq.task_done()
print("worker %d stoping" % my_idx)
def __init__(self, **kwargs):
"""Initialize a new object using the options passed in."""
self.opts = self.options.push(kwargs)
data_dir = os.path.normpath(os.path.join(os.path.dirname(__file__),
'../data'))
if self.opts.credentials_path is None:
self.opts.set(credentials_path='%s-creds.json' % self.opts.email)
if os.path.relpath(self.opts.client_secret_file):
self.opts.set(client_secret_file=os.path.join(data_dir,
self.opts.client_secret_file))
if os.path.relpath(self.opts.credentials_path):
self.opts.set(credentials_path=os.path.join(data_dir,
self.opts.credentials_path))
self.creds = None
self.service = None
self.threads = []
if self.opts.num_workers >= 1:
self.outq = queue.Queue(maxsize=self.opts.num_workers + 1)
self.inq = queue.Queue(maxsize=self.opts.num_workers + 1)
for idx in range(self.opts.num_workers):
werker = lambda: self.worker(idx, self.outq, self.inq)
# It's OK for these threads to not free up resources on exit
# since they don't store permanent state.
# FIXME: should I even keep a pointer to the tread?
self.threads.append(threading.Thread(daemon=True,
target=werker))
self.threads[idx].start()
@property
def poll_interval(self):
"""How often to poll for new messages / updates."""
return self.opts.poll_interval
@property
def scopes(self):
"""Scopes used for authorization."""
return [scope.rsplit('/', 1)[1] for scope in self.opts.scopes]
@property
def writable(self):
"""Whether the account was authorized as read-only or not."""
return 'gmail.modify' in self.scopes
@property
def can_send(self):
"""Whether the scopes list includes the ability to send mail."""
return ('gmail.compose' in self.scopes or
'gmail.send' in self.scopes)
def get_credentials(self):
"Read, or create one if it does not exist, the credentials file."
store = file.Storage(self.opts.credentials_path)
creds = store.get()
if not creds or creds.invalid:
# Clear out argv so argparse in run_flow() is happy.
argv = sys.argv
sys.argv = []
flow = client.flow_from_clientsecrets(self.opts.client_secret_file,
self.opts.scopes)
creds = tools.run_flow(flow, store)
sys.argv = argv
return creds
def reachable(self):
"""Whether the Gmail endpoint is reachable."""
service = build('gmail', 'v1', http=Http(timeout=1.0))
url = urlparse.urlparse(service._baseUrl)
host = url.hostname
port = url.port
try:
socket.getaddrinfo(host, port, proto=socket.IPPROTO_TCP)
except (socket.herror, socket.gaierror, URLError, OSError):
return False
return True
def authorize(self):
"Authorize the service to access the user's mailbox."
if not self.service:
self.creds = self.get_credentials()
http = self.creds.authorize(Http(timeout=10.0))
self.service = build('gmail', 'v1', http=http)
assert self.service is not None
def authorized(func):
"Ensure service is authorized to access the user's mailbox."
def func_wrap (self, *args, **kwargs):
if self.service is None:
self.authorize()
return func(self, *args, **kwargs)
return func_wrap
@authorized
def get_profile(self):
"Return the user's profile."
# Call the Gmail API
results = self.service.users().getProfile(userId='me').execute()
return results
@authorized
def get_labels(self):
"Return a list of labels."
# Call the Gmail API
results = self.service.users().labels().list(userId='me').execute()
return results.get('labels', [])
@authorized
def get_history_id(self, start=1):
"Get the current history id of the mailbox."
try:
hist = self.service.users().history()
results = hist.list(userId='me', startHistoryId=start).execute()
if 'historyId' in results:
return int(results['historyId'])
else:
raise Gmail.GenericException("no historyId field returned")
except googleapiclient.errors.HttpError:
# this happens if the original historyId is too old,
# try to get last message and the historyId from it.
for mset in self.list_messages(1):
(_, mset) = mset
msg = self.get_message(mset[0]['id'])
return int(msg['historyId'])
@authorized
def | (self, start=0):
"""Get a list of changes since the given start point (a history id)."""
hist = self.service.users().history()
try:
results = hist.list(userId='me', startHistoryId=start).execute()
if 'history' in results:
yield results['history']
while 'nextPageToken' in results:
results = hist.list(userId='me',
pageToken=results['nextPageToken'],
startHistoryId=start).execute()
if 'history' in results:
yield results['history']
except googleapiclient.errors.HttpError as ex:
if ex.resp.status == 404:
raise Gmail.NoHistoryException
elif ex.resp.status == 403:
raise Gmail.UserRateException(ex)
else:
raise Gmail.GenericException(ex)
@authorized
def list_messages(self, limit=1, query=None):
"Returns a list of messages (max = limit)."
total = 0
token = None
results = []
if query is None:
query = self.opts.query
while True:
results = self.service.users().messages().list(userId='me',
pageToken=token,
q=query,
maxResults=limit,
includeSpamTrash=True).\
execute()
if 'messages' in results:
total += results['resultSizeEstimate']
yield results['resultSizeEstimate'], results['messages']
if 'nextPageToken' in results:
token = results['nextPageToken']
else:
break
if limit is not None and total >= limit:
break
@authorized
def get_message(self, id, format='minimal'):
"""Get the message in the given format."""
try:
return self.service.users().messages().get(userId='me',
id=id,
format=format).\
execute()
except googleapiclient.errors.HttpError as ex:
if ex.resp.status == 403 or ex.resp.status == 500:
return self.get_message(id, format)
else:
raise ex
@authorized
| get_history_since | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.