patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -683,5 +683,17 @@ describe('model', function() {
yield User.collection.drop();
});
});
+ it('should do a dryRun feat-10316', function() {
+ return co(function*() {
+ const userSchema = new mongoose.Schema({ username: String }, { password: String }, { email: String });
+ userSchema.index({ password: 1 });
+ userSchema.index({ email: 1 });
+ const User = db.model('Upson', userSchema);
+ yield User.collection.createIndex({ age: 1 });
+ const result = yield User.diffIndexes();
+ assert.deepStrictEqual(result.toDrop, ['age_1']);
+ assert.deepStrictEqual(result.toCreate, [{ password: 1 }, { email: 1 }]);
+ });
+ });
});
}); | 1 | 'use strict';
/**
* Test dependencies.
*/
const start = require('./common');
const assert = require('assert');
const co = require('co');
const random = require('../lib/utils').random;
const mongoose = start.mongoose;
const Schema = mongoose.Schema;
const ObjectId = Schema.Types.ObjectId;
describe('model', function() {
let db;
before(function() {
db = start();
return db.createCollection('Test').catch(() => {});
});
after(function(done) {
db.close(done);
});
beforeEach(() => db.deleteModel(/.*/));
afterEach(() => require('./util').clearTestData(db));
afterEach(() => require('./util').stopRemainingOps(db));
describe('indexes', function() {
this.timeout(5000);
it('are created when model is compiled', function() {
const Indexed = new Schema({
name: { type: String, index: true },
last: String,
email: String,
date: Date
});
Indexed.index({ last: 1, email: 1 }, { unique: true });
Indexed.index({ date: 1 }, { expires: 10 });
const IndexedModel = db.model('Test', Indexed);
let assertions = 0;
return co(function*() {
yield cb => IndexedModel.on('index', () => cb());
const indexes = yield IndexedModel.collection.getIndexes({ full: true });
indexes.forEach(function(index) {
switch (index.name) {
case '_id_':
case 'name_1':
case 'last_1_email_1':
assertions++;
break;
case 'date_1':
assertions++;
assert.equal(index.expireAfterSeconds, 10);
break;
}
});
assert.equal(assertions, 4);
});
});
it('of embedded documents', function(done) {
const BlogPosts = new Schema({
_id: { type: ObjectId, index: true },
title: { type: String, index: true },
desc: String
});
const User = new Schema({
name: { type: String, index: true },
blogposts: [BlogPosts]
});
const UserModel = db.model('Test', User);
let assertions = 0;
UserModel.on('index', function() {
UserModel.collection.getIndexes(function(err, indexes) {
assert.ifError(err);
function iter(index) {
if (index[0] === 'name') {
assertions++;
}
if (index[0] === 'blogposts._id') {
assertions++;
}
if (index[0] === 'blogposts.title') {
assertions++;
}
}
for (const i in indexes) {
indexes[i].forEach(iter);
}
assert.equal(assertions, 3);
done();
});
});
});
it('of embedded documents unless excludeIndexes (gh-5575) (gh-8343)', function(done) {
const BlogPost = Schema({
_id: { type: ObjectId },
title: { type: String, index: true },
desc: String
});
const otherSchema = Schema({
name: { type: String, index: true }
}, { excludeIndexes: true });
const User = new Schema({
name: { type: String, index: true },
blogposts: {
type: [BlogPost],
excludeIndexes: true
},
otherblogposts: [{ type: BlogPost, excludeIndexes: true }],
blogpost: {
type: BlogPost,
excludeIndexes: true
},
otherArr: [otherSchema]
});
const UserModel = db.model('Test', User);
UserModel.on('index', function() {
UserModel.collection.getIndexes(function(err, indexes) {
assert.ifError(err);
// Should only have _id and name indexes
const indexNames = Object.keys(indexes);
assert.deepEqual(indexNames.sort(), ['_id_', 'name_1']);
done();
});
});
});
it('of multiple embedded documents with same schema', function(done) {
const BlogPosts = new Schema({
_id: { type: ObjectId, unique: true },
title: { type: String, index: true },
desc: String
});
const User = new Schema({
name: { type: String, index: true },
blogposts: [BlogPosts],
featured: [BlogPosts]
});
const UserModel = db.model('Test', User);
let assertions = 0;
UserModel.on('index', function() {
UserModel.collection.getIndexes(function(err, indexes) {
assert.ifError(err);
function iter(index) {
if (index[0] === 'name') {
++assertions;
}
if (index[0] === 'blogposts._id') {
++assertions;
}
if (index[0] === 'blogposts.title') {
++assertions;
}
if (index[0] === 'featured._id') {
++assertions;
}
if (index[0] === 'featured.title') {
++assertions;
}
}
for (const i in indexes) {
indexes[i].forEach(iter);
}
assert.equal(assertions, 5);
done();
});
});
});
it('compound: on embedded docs', function(done) {
const BlogPosts = new Schema({
title: String,
desc: String
});
BlogPosts.index({ title: 1, desc: 1 });
const User = new Schema({
name: { type: String, index: true },
blogposts: [BlogPosts]
});
const UserModel = db.model('Test', User);
let found = 0;
UserModel.on('index', function() {
UserModel.collection.getIndexes(function(err, indexes) {
assert.ifError(err);
for (const index in indexes) {
switch (index) {
case 'name_1':
case 'blogposts.title_1_blogposts.desc_1':
++found;
break;
}
}
assert.equal(found, 2);
done();
});
});
});
it('nested embedded docs (gh-5199)', function(done) {
const SubSubSchema = mongoose.Schema({
nested2: String
});
SubSubSchema.index({ nested2: 1 });
const SubSchema = mongoose.Schema({
nested1: String,
subSub: SubSubSchema
});
SubSchema.index({ nested1: 1 });
const ContainerSchema = mongoose.Schema({
nested0: String,
sub: SubSchema
});
ContainerSchema.index({ nested0: 1 });
assert.deepEqual(ContainerSchema.indexes().map(function(v) { return v[0]; }), [
{ 'sub.subSub.nested2': 1 },
{ 'sub.nested1': 1 },
{ nested0: 1 }
]);
done();
});
it('primitive arrays (gh-3347)', function(done) {
const schema = new Schema({
arr: [{ type: String, unique: true }]
});
const indexes = schema.indexes();
assert.equal(indexes.length, 1);
assert.deepEqual(indexes[0][0], { arr: 1 });
assert.ok(indexes[0][1].unique);
done();
});
it('error should emit on the model', function(done) {
const schema = new Schema({ name: { type: String } });
const Test = db.model('Test', schema);
Test.create({ name: 'hi' }, { name: 'hi' }, function(err) {
assert.strictEqual(err, null);
Test.schema.index({ name: 1 }, { unique: true });
Test.schema.index({ other: 1 });
Test.on('index', function(err) {
assert.ok(/E11000 duplicate key error/.test(err.message), err);
done();
});
delete Test.$init;
Test.init().catch(() => {});
});
});
it('when one index creation errors', function(done) {
const userSchema = {
name: { type: String },
secondValue: { type: Boolean }
};
const User = new Schema(userSchema);
User.index({ name: 1 });
const User2 = new Schema(userSchema);
User2.index({ name: 1 }, { unique: true });
User2.index({ secondValue: 1 });
db.model('Test1', User, 'Test');
// Create model with second schema in same collection to add new indexes
const UserModel2 = db.model('Test2', User2, 'Test');
let assertions = 0;
UserModel2.on('index', function() {
UserModel2.collection.getIndexes(function(err, indexes) {
assert.ifError(err);
function iter(index) {
if (index[0] === 'name') {
assertions++;
}
if (index[0] === 'secondValue') {
assertions++;
}
}
for (const i in indexes) {
indexes[i].forEach(iter);
}
assert.equal(assertions, 2);
done();
});
});
});
it('creates descending indexes from schema definition(gh-8895)', function() {
return co(function*() {
const userSchema = new Schema({
name: { type: String, index: -1 },
address: { type: String, index: '-1' }
});
const User = db.model('User', userSchema);
yield User.init();
const indexes = yield User.collection.getIndexes();
assert.ok(indexes['name_-1']);
assert.ok(indexes['address_-1']);
});
});
describe('auto creation', function() {
it('can be disabled', function(done) {
const schema = new Schema({ name: { type: String, index: true } });
schema.set('autoIndex', false);
const Test = db.model('Test', schema);
Test.on('index', function() {
assert.ok(false, 'Model.ensureIndexes() was called');
});
// Create a doc because mongodb 3.0 getIndexes errors if db doesn't
// exist
Test.create({ name: 'Bacon' }, function(err) {
assert.ifError(err);
setTimeout(function() {
Test.collection.getIndexes(function(err, indexes) {
assert.ifError(err);
// Only default _id index should exist
assert.deepEqual(['_id_'], Object.keys(indexes));
done();
});
}, 100);
});
});
describe('global autoIndexes (gh-1875)', function() {
it('will create indexes as a default', function(done) {
const schema = new Schema({ name: { type: String, index: true } });
const Test = db.model('Test', schema);
Test.on('index', function(error) {
assert.ifError(error);
assert.ok(true, 'Model.ensureIndexes() was called');
Test.collection.getIndexes(function(err, indexes) {
assert.ifError(err);
assert.equal(Object.keys(indexes).length, 2);
done();
});
});
});
it('will not create indexes if the global auto index is false and schema option isnt set (gh-1875)', function(done) {
const db = start({ config: { autoIndex: false } });
const schema = new Schema({ name: { type: String, index: true } });
const Test = db.model('Test', schema);
Test.on('index', function() {
assert.ok(false, 'Model.ensureIndexes() was called');
});
Test.create({ name: 'Bacon' }, function(err) {
assert.ifError(err);
setTimeout(function() {
Test.collection.getIndexes(function(err, indexes) {
assert.ifError(err);
assert.deepEqual(['_id_'], Object.keys(indexes));
db.close(done);
});
}, 100);
});
});
});
});
describe.skip('model.ensureIndexes()', function() {
it('is a function', function(done) {
const schema = mongoose.Schema({ x: 'string' });
const Test = mongoose.createConnection().model('ensureIndexes-' + random, schema);
assert.equal(typeof Test.ensureIndexes, 'function');
done();
});
it('returns a Promise', function(done) {
const schema = mongoose.Schema({ x: 'string' });
const Test = mongoose.createConnection().model('ensureIndexes-' + random, schema);
const p = Test.ensureIndexes();
assert.ok(p instanceof mongoose.Promise);
done();
});
it('creates indexes', function(done) {
const schema = new Schema({ name: { type: String } });
const Test = db.model('ManualIndexing' + random(), schema, 'x' + random());
Test.schema.index({ name: 1 }, { sparse: true });
let called = false;
Test.on('index', function() {
called = true;
});
Test.ensureIndexes(function(err) {
assert.ifError(err);
assert.ok(called);
done();
});
});
});
});
it('sets correct partialFilterExpression for document array (gh-9091)', function() {
const childSchema = new Schema({ name: String });
childSchema.index({ name: 1 }, { partialFilterExpression: { name: { $exists: true } } });
const schema = new Schema({ arr: [childSchema] });
const Model = db.model('Test', schema);
return co(function*() {
yield Model.init();
yield Model.syncIndexes();
const indexes = yield Model.listIndexes();
assert.equal(indexes.length, 2);
assert.ok(indexes[1].partialFilterExpression);
assert.deepEqual(indexes[1].partialFilterExpression, {
'arr.name': { $exists: true }
});
});
});
it('skips automatic indexing on childSchema if autoIndex: false (gh-9150)', function() {
const nestedSchema = mongoose.Schema({
name: { type: String, index: true }
}, { autoIndex: false });
const schema = mongoose.Schema({
nested: nestedSchema,
top: { type: String, index: true }
});
let Model;
return Promise.resolve().
then(() => {
Model = db.model('Model', schema);
return Model.init();
}).
then(() => Model.listIndexes()).
then(indexes => {
assert.equal(indexes.length, 2);
assert.deepEqual(indexes[1].key, { top: 1 });
});
});
describe('discriminators with unique', function() {
this.timeout(5000);
it('converts to partial unique index (gh-6347)', function() {
const baseOptions = { discriminatorKey: 'kind' };
const baseSchema = new Schema({}, baseOptions);
const Base = db.model('Test', baseSchema);
const userSchema = new Schema({
emailId: { type: String, unique: true }, // Should become a partial
firstName: { type: String }
});
const User = Base.discriminator('User', userSchema);
const deviceSchema = new Schema({
_id: { type: Schema.ObjectId, auto: true },
name: { type: String, unique: true }, // Should become a partial
other: { type: String, index: true }, // Should become a partial
model: { type: String }
});
const Device = Base.discriminator('Device', deviceSchema);
return Promise.all([
Base.init(),
User.init(),
Device.init(),
Base.create({}),
User.create({ emailId: '[email protected]', firstName: 'Val' }),
Device.create({ name: 'Samsung', model: 'Galaxy' })
]).then(() => Base.listIndexes()).
then(indexes => indexes.find(i => i.key.other)).
then(index => {
assert.deepEqual(index.key, { other: 1 });
assert.deepEqual(index.partialFilterExpression, { kind: 'Device' });
});
});
it('decorated discriminator index with syncIndexes (gh-6347)', function() {
const baseOptions = { discriminatorKey: 'kind' };
const baseSchema = new Schema({}, baseOptions);
const Base = db.model('Test', baseSchema);
const userSchema = new Schema({
emailId: { type: String, unique: true }, // Should become a partial
firstName: { type: String }
});
const User = Base.discriminator('User', userSchema);
return User.init().
then(() => User.syncIndexes()).
then(dropped => assert.equal(dropped.length, 0));
});
it('uses schema-level collation by default (gh-9912)', function() {
return co(function*() {
yield db.db.collection('User').drop().catch(() => {});
const userSchema = new mongoose.Schema({ username: String }, {
collation: {
locale: 'en',
strength: 2
}
});
userSchema.index({ username: 1 }, { unique: true });
const User = db.model('User', userSchema, 'User');
yield User.init();
const indexes = yield User.listIndexes();
assert.equal(indexes.length, 2);
assert.deepEqual(indexes[1].key, { username: 1 });
assert.ok(indexes[1].collation);
assert.equal(indexes[1].collation.strength, 2);
yield User.collection.drop();
});
});
it('different collation with syncIndexes() (gh-8521)', function() {
return co(function*() {
yield db.db.collection('User').drop().catch(() => {});
let userSchema = new mongoose.Schema({ username: String });
userSchema.index({ username: 1 }, { unique: true });
let User = db.model('User', userSchema, 'User');
yield User.init();
let indexes = yield User.listIndexes();
assert.equal(indexes.length, 2);
assert.deepEqual(indexes[1].key, { username: 1 });
assert.ok(!indexes[1].collation);
userSchema = new mongoose.Schema({ username: String }, { autoIndex: false });
userSchema.index({ username: 1 }, {
unique: true,
collation: {
locale: 'en',
strength: 2
}
});
db.deleteModel('User');
User = db.model('User', userSchema, 'User');
yield User.syncIndexes();
indexes = yield User.listIndexes();
assert.equal(indexes.length, 2);
assert.deepEqual(indexes[1].key, { username: 1 });
assert.ok(!!indexes[1].collation);
yield User.collection.drop();
});
});
it('reports syncIndexes() error (gh-9303)', function() {
return co(function*() {
let userSchema = new mongoose.Schema({ username: String, email: String });
let User = db.model('User', userSchema);
yield User.createCollection().catch(() => {});
let indexes = yield User.listIndexes();
assert.equal(indexes.length, 1);
yield User.create([{ username: 'test', email: 'foo@bar' }, { username: 'test', email: 'foo@bar' }]);
userSchema = new mongoose.Schema({ username: String, email: String }, { autoIndex: false });
userSchema.index({ username: 1 }, { unique: true });
userSchema.index({ email: 1 });
db.deleteModel('User');
User = db.model('User', userSchema, 'User');
const err = yield User.syncIndexes().then(() => null, err => err);
assert.ok(err);
assert.equal(err.code, 11000);
indexes = yield User.listIndexes();
assert.equal(indexes.length, 2);
assert.deepEqual(indexes[1].key, { email: 1 });
yield User.collection.drop();
});
});
it('cleanIndexes (gh-6676)', function() {
return co(function*() {
let M = db.model('Test', new Schema({
name: { type: String, index: true }
}, { autoIndex: false }), 'Test');
yield M.createIndexes();
let indexes = yield M.listIndexes();
assert.deepEqual(indexes.map(i => i.key), [
{ _id: 1 },
{ name: 1 }
]);
M = db.model('Test', new Schema({
name: String
}, { autoIndex: false }), 'Test');
yield M.cleanIndexes();
indexes = yield M.listIndexes();
assert.deepEqual(indexes.map(i => i.key), [
{ _id: 1 }
]);
});
});
it('should prevent collation on text indexes (gh-10044)', function() {
return co(function*() {
const userSchema = new mongoose.Schema({ username: String }, {
collation: {
locale: 'en',
strength: 2
}
});
userSchema.index({ username: 'text' }, { unique: true });
const User = db.model('User', userSchema, 'User');
yield User.init();
const indexes = yield User.listIndexes();
assert.ok(!indexes[1].collation);
yield User.collection.drop();
});
});
});
});
| 1 | 14,640 | Add a `yield User.init()` here to wait for all indexes to finish building | Automattic-mongoose | js |
@@ -11,6 +11,8 @@
#include <ifaddrs.h>
#include <arpa/inet.h>
+DEFINE_string(local_ip, "", "local ip");
+
namespace nebula {
namespace network {
| 1 | /* Copyright (c) 2018 - present, VE Software Inc. All rights reserved
*
* This source code is licensed under Apache 2.0 License
* (found in the LICENSE.Apache file in the root directory)
*/
#include "base/Base.h"
#include "network/NetworkUtils.h"
#include "fs/FileUtils.h"
#include <netdb.h>
#include <ifaddrs.h>
#include <arpa/inet.h>
namespace nebula {
namespace network {
static const int32_t kMaxHostNameLen = 256;
std::string NetworkUtils::getHostname() {
char hn[kMaxHostNameLen];
if (gethostname(hn, kMaxHostNameLen) < 0) {
LOG(ERROR) << "gethostname error : " << strerror(errno);
return "";
}
return std::string(hn);
}
StatusOr<std::string> NetworkUtils::getIPv4FromDevice(const std::string &device) {
if (device == "any") {
return "0.0.0.0";
}
auto result = listDeviceAndIPv4s();
if (!result.ok()) {
return std::move(result).status();
}
auto iter = result.value().find(device);
if (iter == result.value().end()) {
return Status::Error("No IPv4 address found for `%s'", device.c_str());
}
return iter->second;
}
StatusOr<std::vector<std::string>> NetworkUtils::listIPv4s() {
auto result = listDeviceAndIPv4s();
if (!result.ok()) {
return std::move(result).status();
}
auto getval = [] (const auto &entry) {
return entry.second;
};
std::vector<std::string> ipv4s;
ipv4s.resize(result.value().size());
std::transform(result.value().begin(), result.value().end(), ipv4s.begin(), getval);
return ipv4s;
}
StatusOr<std::unordered_map<std::string, std::string>> NetworkUtils::listDeviceAndIPv4s() {
struct ifaddrs *iflist;
std::unordered_map<std::string, std::string> dev2ipv4s;
if (::getifaddrs(&iflist) != 0) {
return Status::Error("%s", ::strerror(errno));
}
for (auto *ifa = iflist; ifa != nullptr; ifa = ifa->ifa_next) {
// Skip non-IPv4 devices
if (ifa->ifa_addr->sa_family != AF_INET) {
continue;
}
auto *addr = reinterpret_cast<struct sockaddr_in*>(ifa->ifa_addr);
// inet_ntoa is thread safe but not re-entrant,
// we could use inet_ntop instead when we need support for IPv6
dev2ipv4s[ifa->ifa_name] = ::inet_ntoa(addr->sin_addr);
}
::freeifaddrs(iflist);
if (dev2ipv4s.empty()) {
return Status::Error("No IPv4 devices found");
}
return dev2ipv4s;
}
bool NetworkUtils::getDynamicPortRange(uint16_t& low, uint16_t& high) {
FILE* pipe = popen("cat /proc/sys/net/ipv4/ip_local_port_range", "r");
if (!pipe) {
LOG(ERROR) << "Failed to open /proc/sys/net/ipv4/ip_local_port_range: "
<< strerror(errno);
return false;
}
fscanf(pipe, "%hu %hu", &low, &high);
if (pclose(pipe) < 0) {
LOG(ERROR) << "Failed to close the pipe: " << strerror(errno);
return false;
}
return true;
}
std::unordered_set<uint16_t> NetworkUtils::getPortsInUse() {
static const std::regex regex("[^:]+:[^:]+:([0-9A-F]+).+");
fs::FileUtils::FileLineIterator iter("/proc/net/tcp", ®ex);
std::unordered_set<uint16_t> inUse;
while (iter.valid()) {
auto &sm = iter.matched();
inUse.emplace(std::stoul(sm[1].str(), NULL, 16));
}
return std::move(inUse);
}
uint16_t NetworkUtils::getAvailablePort() {
uint16_t low = 0;
uint16_t high = 0;
CHECK(getDynamicPortRange(low, high))
<< "Failed to get the dynamic port range";
VLOG(1) << "Dynamic port range is [" << low << ", " << high << "]";
std::unordered_set<uint16_t> portsInUse = getPortsInUse();
uint16_t port = 0;
do {
port = folly::Random::rand32(low, static_cast<int32_t>(high) + 1);
} while (portsInUse.find(port) != portsInUse.end());
return port;
}
bool NetworkUtils::ipv4ToInt(const std::string& ipStr, uint32_t& ip) {
std::vector<folly::StringPiece> parts;
folly::split(".", ipStr, parts, true);
if (parts.size() != 4) {
return false;
}
ip = 0;
for (auto& s : parts) {
ip <<= 8;
try {
ip |= folly::to<uint8_t>(s);
} catch (const std::exception& ex) {
LOG(ERROR) << "Invalid ip string: \"" << ipStr << "\"";
return false;
}
}
return true;
}
std::string NetworkUtils::intToIPv4(uint32_t ip) {
static const std::vector<std::string> kDict{
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12",
"13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24",
"25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36",
"37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48",
"49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60",
"61", "62", "63", "64", "65", "66", "67", "68", "69", "70", "71", "72",
"73", "74", "75", "76", "77", "78", "79", "80", "81", "82", "83", "84",
"85", "86", "87", "88", "89", "90", "91", "92", "93", "94", "95", "96",
"97", "98", "99", "100", "101", "102", "103", "104", "105", "106",
"107", "108", "109", "110", "111", "112", "113", "114", "115", "116",
"117", "118", "119", "120", "121", "122", "123", "124", "125", "126",
"127", "128", "129", "130", "131", "132", "133", "134", "135", "136",
"137", "138", "139", "140", "141", "142", "143", "144", "145", "146",
"147", "148", "149", "150", "151", "152", "153", "154", "155", "156",
"157", "158", "159", "160", "161", "162", "163", "164", "165", "166",
"167", "168", "169", "170", "171", "172", "173", "174", "175", "176",
"177", "178", "179", "180", "181", "182", "183", "184", "185", "186",
"187", "188", "189", "190", "191", "192", "193", "194", "195", "196",
"197", "198", "199", "200", "201", "202", "203", "204", "205", "206",
"207", "208", "209", "210", "211", "212", "213", "214", "215", "216",
"217", "218", "219", "220", "221", "222", "223", "224", "225", "226",
"227", "228", "229", "230", "231", "232", "233", "234", "235", "236",
"237", "238", "239", "240", "241", "242", "243", "244", "245", "246",
"247", "248", "249", "250", "251", "252", "253", "254", "255"
};
auto& f1 = kDict[ip & 0x000000FF];
auto& f2 = kDict[(ip >> 8) & 0x000000FF];
auto& f3 = kDict[(ip >> 16) & 0x000000FF];
auto& f4 = kDict[(ip >> 24) & 0x000000FF];
char buf[16];
char* pt = buf;
strcpy(pt, f4.c_str());
pt += f4.size();
*pt++ = '.';
strcpy(pt, f3.c_str());
pt += f3.size();
*pt++ = '.';
strcpy(pt, f2.c_str());
pt += f2.size();
*pt++ = '.';
strcpy(pt, f1.c_str());
pt += f1.size();
return buf;
}
} // namespace network
} // namespace nebula
| 1 | 14,359 | I don't think this belongs here. If the application can take a local_ip as input, the application should take care of it, not in this common library | vesoft-inc-nebula | cpp |
@@ -524,6 +524,17 @@ class Theme
return ThemeData::forTheme($this);
}
+ /**
+ * Remove data specific to this theme
+ * @return bool
+ */
+ public function removeCustomData()
+ {
+ if ($this->hasCustomData()) {
+ return $this->getCustomData()->delete();
+ }
+ }
+
/**
* Checks to see if the database layer has been enabled
* | 1 | <?php namespace Cms\Classes;
use App;
use Url;
use File;
use Yaml;
use Lang;
use Cache;
use Event;
use Config;
use Exception;
use SystemException;
use DirectoryIterator;
use ApplicationException;
use Cms\Models\ThemeData;
use System\Models\Parameter;
use October\Rain\Halcyon\Datasource\DbDatasource;
use October\Rain\Halcyon\Datasource\FileDatasource;
use October\Rain\Halcyon\Datasource\DatasourceInterface;
/**
* This class represents the CMS theme.
* CMS theme is a directory that contains all CMS objects - pages, layouts, partials and asset files..
* The theme parameters are specified in the theme.ini file in the theme root directory.
*
* @package october\cms
* @author Alexey Bobkov, Samuel Georges
*/
class Theme
{
/**
* @var string Specifies the theme directory name.
*/
protected $dirName;
/**
* @var mixed Keeps the cached configuration file values.
*/
protected $configCache;
/**
* @var mixed Active theme cache in memory
*/
protected static $activeThemeCache = false;
/**
* @var mixed Edit theme cache in memory
*/
protected static $editThemeCache = false;
const ACTIVE_KEY = 'cms::theme.active';
const EDIT_KEY = 'cms::theme.edit';
const CONFIG_KEY = 'cms::theme.config';
/**
* Loads the theme.
* @return self
*/
public static function load($dirName)
{
$theme = new static;
$theme->setDirName($dirName);
$theme->registerHalyconDatasource();
return $theme;
}
/**
* Returns the absolute theme path.
* @param string $dirName Optional theme directory. Defaults to $this->getDirName()
* @return string
*/
public function getPath($dirName = null)
{
if (!$dirName) {
$dirName = $this->getDirName();
}
return themes_path().'/'.$dirName;
}
/**
* Sets the theme directory name.
* @return void
*/
public function setDirName($dirName)
{
$this->dirName = $dirName;
}
/**
* Returns the theme directory name.
* @return string
*/
public function getDirName()
{
return $this->dirName;
}
/**
* Helper for {{ theme.id }} twig vars
* Returns a unique string for this theme.
* @return string
*/
public function getId()
{
return snake_case(str_replace('/', '-', $this->getDirName()));
}
/**
* Determines if a theme with given directory name exists
* @param string $dirName The theme directory
* @return bool
*/
public static function exists($dirName)
{
$theme = static::load($dirName);
$path = $theme->getPath();
return File::isDirectory($path);
}
/**
* Returns a list of pages in the theme.
* This method is used internally in the routing process and in the back-end UI.
* @param boolean $skipCache Indicates if the pages should be reloaded from the disk bypassing the cache.
* @return array Returns an array of \Cms\Classes\Page objects.
*/
public function listPages($skipCache = false)
{
return Page::listInTheme($this, $skipCache);
}
/**
* Returns true if this theme is the chosen active theme.
*/
public function isActiveTheme()
{
$activeTheme = self::getActiveTheme();
return $activeTheme && $activeTheme->getDirName() == $this->getDirName();
}
/**
* Returns the active theme code.
* By default the active theme is loaded from the cms.activeTheme parameter,
* but this behavior can be overridden by the cms.theme.getActiveTheme event listener.
* @return string
* If the theme doesn't exist, returns null.
*/
public static function getActiveThemeCode()
{
$activeTheme = Config::get('cms.activeTheme');
if (App::hasDatabase()) {
try {
try {
$dbResult = Cache::remember(self::ACTIVE_KEY, 1440, function () {
return Parameter::applyKey(self::ACTIVE_KEY)->value('value');
});
}
catch (Exception $ex) {
// Cache failed
$dbResult = Parameter::applyKey(self::ACTIVE_KEY)->value('value');
}
}
catch (Exception $ex) {
// Database failed
$dbResult = null;
}
if ($dbResult !== null && static::exists($dbResult)) {
$activeTheme = $dbResult;
}
}
/**
* @event cms.theme.getActiveTheme
* Overrides the active theme code.
*
* If a value is returned from this halting event, it will be used as the active
* theme code. Example usage:
*
* Event::listen('cms.theme.getActiveTheme', function() { return 'mytheme'; });
*
*/
$apiResult = Event::fire('cms.theme.getActiveTheme', [], true);
if ($apiResult !== null) {
$activeTheme = $apiResult;
}
if (!strlen($activeTheme)) {
throw new SystemException(Lang::get('cms::lang.theme.active.not_set'));
}
return $activeTheme;
}
/**
* Returns the active theme object.
* @return \Cms\Classes\Theme Returns the loaded theme object.
* If the theme doesn't exist, returns null.
*/
public static function getActiveTheme()
{
if (self::$activeThemeCache !== false) {
return self::$activeThemeCache;
}
$theme = static::load(static::getActiveThemeCode());
if (!File::isDirectory($theme->getPath())) {
return self::$activeThemeCache = null;
}
return self::$activeThemeCache = $theme;
}
/**
* Sets the active theme.
* The active theme code is stored in the database and overrides the configuration cms.activeTheme parameter.
* @param string $code Specifies the active theme code.
*/
public static function setActiveTheme($code)
{
self::resetCache();
Parameter::set(self::ACTIVE_KEY, $code);
/**
* @event cms.theme.setActiveTheme
* Fires when the active theme has been changed.
*
* If a value is returned from this halting event, it will be used as the active
* theme code. Example usage:
*
* Event::listen('cms.theme.setActiveTheme', function($code) {
* \Log::info("Theme has been changed to $code");
* });
*
*/
Event::fire('cms.theme.setActiveTheme', compact('code'));
}
/**
* Returns the edit theme code.
* By default the edit theme is loaded from the cms.editTheme parameter,
* but this behavior can be overridden by the cms.theme.getEditTheme event listeners.
* If the edit theme is not defined in the configuration file, the active theme
* is returned.
* @return string
*/
public static function getEditThemeCode()
{
$editTheme = Config::get('cms.editTheme');
if (!$editTheme) {
$editTheme = static::getActiveThemeCode();
}
/**
* @event cms.theme.getEditTheme
* Overrides the edit theme code.
*
* If a value is returned from this halting event, it will be used as the edit
* theme code. Example usage:
*
* Event::listen('cms.theme.getEditTheme', function() {
* return "the-edit-theme-code";
* });
*
*/
$apiResult = Event::fire('cms.theme.getEditTheme', [], true);
if ($apiResult !== null) {
$editTheme = $apiResult;
}
if (!strlen($editTheme)) {
throw new SystemException(Lang::get('cms::lang.theme.edit.not_set'));
}
return $editTheme;
}
/**
* Returns the edit theme.
* @return \Cms\Classes\Theme Returns the loaded theme object.
*/
public static function getEditTheme()
{
if (self::$editThemeCache !== false) {
return self::$editThemeCache;
}
$theme = static::load(static::getEditThemeCode());
if (!File::isDirectory($theme->getPath())) {
return self::$editThemeCache = null;
}
return self::$editThemeCache = $theme;
}
/**
* Returns a list of all themes.
* @return array Returns an array of the Theme objects.
*/
public static function all()
{
$it = new DirectoryIterator(themes_path());
$it->rewind();
$result = [];
foreach ($it as $fileinfo) {
if (!$fileinfo->isDir() || $fileinfo->isDot()) {
continue;
}
$theme = static::load($fileinfo->getFilename());
$result[] = $theme;
}
return $result;
}
/**
* Reads the theme.yaml file and returns the theme configuration values.
* @return array Returns the parsed configuration file values.
*/
public function getConfig()
{
if ($this->configCache !== null) {
return $this->configCache;
}
$path = $this->getPath().'/theme.yaml';
if (!File::exists($path)) {
return $this->configCache = [];
}
try {
if (Config::get('app.debug', false)) {
$config = Yaml::parseFile($path);
} else {
$cacheKey = self::CONFIG_KEY.'::'.$this->getDirName();
$config = Cache::rememberForever($cacheKey, function () use ($path) {
return Yaml::parseFile($path);
});
}
}
catch (Exception $ex) {
// Cache failed
$config = Yaml::parseFile($path);
}
/**
* @event cms.theme.extendConfig
* Extend basic theme configuration supplied by the theme by returning an array.
*
* Note if planning on extending form fields, use the `cms.theme.extendFormConfig`
* event instead.
*
* Example usage:
*
* Event::listen('cms.theme.extendConfig', function ($themeCode, &$config) {
* $config['name'] = 'October Theme';
* $config['description'] = 'Another great theme from October CMS';
* });
*
*/
Event::fire('cms.theme.extendConfig', [$this->getDirName(), &$config]);
return $this->configCache = $config;
}
/**
* Themes have a dedicated `form` option that provide form fields
* for customization, this is an immutable accessor for that and
* also an solid anchor point for extension.
* @return array
*/
public function getFormConfig()
{
$config = $this->getConfigArray('form');
/**
* @event cms.theme.extendFormConfig
* Extend form field configuration supplied by the theme by returning an array.
*
* Note if you are planning on using `assetVar` to inject CSS variables from a
* plugin registration file, make sure the plugin has elevated permissions.
*
* Example usage:
*
* Event::listen('cms.theme.extendFormConfig', function ($themeCode, &$config) {
* array_set($config, 'tabs.fields.header_color', [
* 'label' => 'Header Colour',
* 'type' => 'colorpicker',
* 'availableColors' => [#34495e, #708598, #3498db],
* 'assetVar' => 'header-bg',
* 'tab' => 'Global'
* ]);
* });
*
*/
Event::fire('cms.theme.extendFormConfig', [$this->getDirName(), &$config]);
return $config;
}
/**
* Returns a value from the theme configuration file by its name.
* @param string $name Specifies the configuration parameter name.
* @param mixed $default Specifies the default value to return in case if the parameter
* doesn't exist in the configuration file.
* @return mixed Returns the parameter value or a default value
*/
public function getConfigValue($name, $default = null)
{
return array_get($this->getConfig(), $name, $default);
}
/**
* Returns an array value from the theme configuration file by its name.
* If the value is a string, it is treated as a YAML file and loaded.
* @param string $name Specifies the configuration parameter name.
* @return array
*/
public function getConfigArray($name)
{
$result = array_get($this->getConfig(), $name, []);
if (is_string($result)) {
$fileName = File::symbolizePath($result);
if (File::isLocalPath($fileName)) {
$path = $fileName;
}
else {
$path = $this->getPath().'/'.$result;
}
if (!File::exists($path)) {
throw new ApplicationException('Path does not exist: '.$path);
}
$result = Yaml::parseFile($path);
}
return (array) $result;
}
/**
* Writes to the theme.yaml file with the supplied array values.
* @param array $values Data to write
* @param array $overwrite If true, undefined values are removed.
* @return void
*/
public function writeConfig($values = [], $overwrite = false)
{
if (!$overwrite) {
$values = $values + (array) $this->getConfig();
}
$path = $this->getPath().'/theme.yaml';
if (!File::exists($path)) {
throw new ApplicationException('Path does not exist: '.$path);
}
$contents = Yaml::render($values);
File::put($path, $contents);
$this->configCache = $values;
self::resetCache();
}
/**
* Returns the theme preview image URL.
* If the image file doesn't exist returns the placeholder image URL.
* @return string Returns the image URL.
*/
public function getPreviewImageUrl()
{
$previewPath = $this->getConfigValue('previewImage', 'assets/images/theme-preview.png');
if (File::exists($this->getPath().'/'.$previewPath)) {
return Url::asset('themes/'.$this->getDirName().'/'.$previewPath);
}
return Url::asset('modules/cms/assets/images/default-theme-preview.png');
}
/**
* Resets any memory or cache involved with the active or edit theme.
* @return void
*/
public static function resetCache()
{
self::$activeThemeCache = false;
self::$editThemeCache = false;
Cache::forget(self::ACTIVE_KEY);
Cache::forget(self::EDIT_KEY);
Cache::forget(self::CONFIG_KEY.'::'.(new self)->getDirName());
}
/**
* Returns true if this theme has form fields that supply customization data.
* @return bool
*/
public function hasCustomData()
{
return $this->getConfigValue('form', false);
}
/**
* Returns data specific to this theme
* @return Cms\Models\ThemeData
*/
public function getCustomData()
{
return ThemeData::forTheme($this);
}
/**
* Checks to see if the database layer has been enabled
*
* @return boolean
*/
public static function databaseLayerEnabled()
{
$enableDbLayer = Config::get('cms.databaseTemplates', false);
if (is_null($enableDbLayer)) {
$enableDbLayer = !Config::get('app.debug', false);
}
return $enableDbLayer && App::hasDatabase();
}
/**
* Ensures this theme is registered as a Halcyon datasource.
* @return void
*/
public function registerHalyconDatasource()
{
$resolver = App::make('halcyon');
if (!$resolver->hasDatasource($this->dirName)) {
if (static::databaseLayerEnabled()) {
$datasource = new AutoDatasource([
'database' => new DbDatasource($this->dirName, 'cms_theme_templates'),
'filesystem' => new FileDatasource($this->getPath(), App::make('files')),
]);
} else {
$datasource = new FileDatasource($this->getPath(), App::make('files'));
}
$resolver->addDatasource($this->dirName, $datasource);
}
}
/**
* Get the theme's datasource
*
* @return DatasourceInterface
*/
public function getDatasource()
{
$resolver = App::make('halcyon');
return $resolver->datasource($this->getDirName());
}
/**
* Implements the getter functionality.
* @param string $name
* @return void
*/
public function __get($name)
{
if ($this->hasCustomData()) {
return $this->getCustomData()->{$name};
}
return null;
}
/**
* Determine if an attribute exists on the object.
* @param string $key
* @return void
*/
public function __isset($key)
{
if ($this->hasCustomData()) {
$theme = $this->getCustomData();
return $theme->offsetExists($key);
}
return false;
}
}
| 1 | 16,444 | @DanHarrin please add a `return true` at the bottom of this method, as the docblock indicates a boolean return value. | octobercms-october | php |
@@ -25,7 +25,7 @@ import org.apache.lucene.index.SlowImpactsEnum;
/** Expert: A <code>Scorer</code> for documents matching a <code>Term</code>.
*/
-final class TermScorer extends Scorer {
+public final class TermScorer extends Scorer {
private final PostingsEnum postingsEnum;
private final ImpactsEnum impactsEnum;
private final DocIdSetIterator iterator; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.search;
import java.io.IOException;
import org.apache.lucene.index.ImpactsEnum;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SlowImpactsEnum;
/** Expert: A <code>Scorer</code> for documents matching a <code>Term</code>.
*/
final class TermScorer extends Scorer {
private final PostingsEnum postingsEnum;
private final ImpactsEnum impactsEnum;
private final DocIdSetIterator iterator;
private final LeafSimScorer docScorer;
private final ImpactsDISI impactsDisi;
/**
* Construct a {@link TermScorer} that will iterate all documents.
*/
TermScorer(Weight weight, PostingsEnum postingsEnum, LeafSimScorer docScorer) {
super(weight);
iterator = this.postingsEnum = postingsEnum;
impactsEnum = new SlowImpactsEnum(postingsEnum);
impactsDisi = new ImpactsDISI(impactsEnum, impactsEnum, docScorer.getSimScorer());
this.docScorer = docScorer;
}
/**
* Construct a {@link TermScorer} that will use impacts to skip blocks of
* non-competitive documents.
*/
TermScorer(Weight weight, ImpactsEnum impactsEnum, LeafSimScorer docScorer) {
super(weight);
postingsEnum = this.impactsEnum = impactsEnum;
impactsDisi = new ImpactsDISI(impactsEnum, impactsEnum, docScorer.getSimScorer());
iterator = impactsDisi;
this.docScorer = docScorer;
}
@Override
public int docID() {
return postingsEnum.docID();
}
final int freq() throws IOException {
return postingsEnum.freq();
}
@Override
public DocIdSetIterator iterator() {
return iterator;
}
@Override
public float score() throws IOException {
assert docID() != DocIdSetIterator.NO_MORE_DOCS;
return docScorer.score(postingsEnum.docID(), postingsEnum.freq());
}
@Override
public int advanceShallow(int target) throws IOException {
return impactsDisi.advanceShallow(target);
}
@Override
public float getMaxScore(int upTo) throws IOException {
return impactsDisi.getMaxScore(upTo);
}
@Override
public void setMinCompetitiveScore(float minScore) {
impactsDisi.setMinCompetitiveScore(minScore);
}
/** Returns a string representation of this <code>TermScorer</code>. */
@Override
public String toString() { return "scorer(" + weight + ")[" + super.toString() + "]"; }
}
| 1 | 38,046 | This is used in o.a.l.sandbox.search.BM25FQuery. | apache-lucene-solr | java |
@@ -41,7 +41,8 @@ public class XYShapeBoundingBoxQuery extends ShapeQuery {
@Override
protected PointValues.Relation relateRangeBBoxToQuery(int minXOffset, int minYOffset, byte[] minTriangle,
int maxXOffset, int maxYOffset, byte[] maxTriangle) {
- return rectangle2D.relateRangeBBox(minXOffset, minYOffset, minTriangle, maxXOffset, maxYOffset, maxTriangle);
+ return rectangle2D.relateRangeBBox(minXOffset, minYOffset, minTriangle, maxXOffset, maxYOffset, maxTriangle,
+ queryRelation == QueryRelation.INTERSECTS);
}
/** returns true if the query matches the encoded triangle */ | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.document;
import org.apache.lucene.document.ShapeField.QueryRelation;
import org.apache.lucene.geo.XYRectangle;
import org.apache.lucene.geo.XYRectangle2D;
import org.apache.lucene.index.PointValues;
/**
* Finds all previously indexed cartesian shapes that intersect the specified bounding box.
*
* <p>The field must be indexed using
* {@link org.apache.lucene.document.XYShape#createIndexableFields} added per document.
*
* @lucene.experimental
**/
public class XYShapeBoundingBoxQuery extends ShapeQuery {
final XYRectangle2D rectangle2D;
public XYShapeBoundingBoxQuery(String field, QueryRelation queryRelation, double minX, double maxX, double minY, double maxY) {
super(field, queryRelation);
XYRectangle rectangle = new XYRectangle(minX, maxX, minY, maxY);
this.rectangle2D = XYRectangle2D.create(rectangle);
}
@Override
protected PointValues.Relation relateRangeBBoxToQuery(int minXOffset, int minYOffset, byte[] minTriangle,
int maxXOffset, int maxYOffset, byte[] maxTriangle) {
return rectangle2D.relateRangeBBox(minXOffset, minYOffset, minTriangle, maxXOffset, maxYOffset, maxTriangle);
}
/** returns true if the query matches the encoded triangle */
@Override
protected boolean queryMatches(byte[] t, int[] scratchTriangle, QueryRelation queryRelation) {
// decode indexed triangle
ShapeField.decodeTriangle(t, scratchTriangle);
int aY = scratchTriangle[0];
int aX = scratchTriangle[1];
int bY = scratchTriangle[2];
int bX = scratchTriangle[3];
int cY = scratchTriangle[4];
int cX = scratchTriangle[5];
if (queryRelation == QueryRelation.WITHIN) {
return rectangle2D.containsTriangle(aX, aY, bX, bY, cX, cY);
}
return rectangle2D.intersectsTriangle(aX, aY, bX, bY, cX, cY);
}
@Override
public boolean equals(Object o) {
return sameClassAs(o) && equalsTo(getClass().cast(o));
}
@Override
protected boolean equalsTo(Object o) {
return super.equalsTo(o) && rectangle2D.equals(((XYShapeBoundingBoxQuery)o).rectangle2D);
}
@Override
public int hashCode() {
int hash = super.hashCode();
hash = 31 * hash + rectangle2D.hashCode();
return hash;
}
@Override
public String toString(String field) {
final StringBuilder sb = new StringBuilder();
sb.append(getClass().getSimpleName());
sb.append(':');
if (this.field.equals(field) == false) {
sb.append(" field=");
sb.append(this.field);
sb.append(':');
}
sb.append(rectangle2D.toString());
return sb.toString();
}
}
| 1 | 30,189 | Shouldn't this work as well for Disjoint? | apache-lucene-solr | java |
@@ -50,6 +50,9 @@ class TestCase(unittest.TestCase):
for nm, fn in Descriptors._descList:
try:
v = fn(m)
+ except RuntimeError:
+ # 3D descriptors fail since the mol has no conformers
+ pass
except Exception:
import traceback
traceback.print_exc() | 1 | #
# Copyright (C) 2007-2017 Greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" General descriptor testing code
"""
from __future__ import print_function
from rdkit import RDConfig
import unittest, os.path
import io
from rdkit.six.moves import cPickle
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem import AllChem
from rdkit.Chem import rdMolDescriptors
from rdkit.Chem import Lipinski
import numpy as np
def feq(n1, n2, tol=1e-4):
return abs(n1 - n2) <= tol
class TestCase(unittest.TestCase):
def testGithub1287(self):
smis = ('CCC', )
for smi in smis:
m = Chem.MolFromSmiles(smi)
self.assertTrue(m)
for nm, fn in Descriptors._descList:
try:
v = fn(m)
except Exception:
import traceback
traceback.print_exc()
raise AssertionError('SMILES: %s; Descriptor: %s' % (smi, nm))
def testBadAtomHandling(self):
smis = ('CC[Pu]', 'CC[*]')
for smi in smis:
m = Chem.MolFromSmiles(smi)
self.assertTrue(m)
for nm, fn in Descriptors._descList:
try:
v = fn(m)
except Exception:
import traceback
traceback.print_exc()
raise AssertionError('SMILES: %s; Descriptor: %s' % (smi, nm))
def testMolFormula(self):
for (smiles, expected) in (("[NH4+]", "H4N+"),
("c1ccccc1", "C6H6"),
("C1CCCCC1", "C6H12"),
("c1ccccc1O", "C6H6O"),
("C1CCCCC1O", "C6H12O"),
("C1CCCCC1=O", "C6H10O"),
("N[Na]", "H2NNa"),
("[C-][C-]", "C2-2"),
("[H]", "H"),
("[H-1]", "H-"),
("[H-1]", "H-"),
("[CH2]", "CH2"),
("[He-2]", "He-2"),
("[U+3]", "U+3"), ):
mol = Chem.MolFromSmiles(smiles)
actual = AllChem.CalcMolFormula(mol)
self.assertEqual(actual, expected)
def testMQNDetails(self):
refFile = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'MQNs_regress.pkl')
refFile2 = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'MQNs_non_strict_regress.pkl')
# figure out which definition we are currently using
m = Chem.MolFromSmiles("CC(C)(C)c1cc(O)c(cc1O)C(C)(C)C")
if Lipinski.NumRotatableBonds(m) == 2:
refFile = refFile2
with open(refFile, 'r') as intf:
buf = intf.read().replace('\r\n', '\n').encode('utf-8')
intf.close()
with io.BytesIO(buf) as inf:
pkl = inf.read()
refData = cPickle.loads(pkl, encoding='bytes')
fn = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'aromat_regress.txt')
ms = [x for x in Chem.SmilesMolSupplier(fn, delimiter='\t')]
refData2 = []
for i, m in enumerate(ms):
mqns = rdMolDescriptors.MQNs_(m)
refData2.append((m, mqns))
if mqns != refData[i][1]:
indices = [(j, x, y) for j, x, y in zip(range(len(mqns)), mqns, refData[i][1]) if x != y]
print(i, Chem.MolToSmiles(m), indices)
self.assertEqual(mqns, refData[i][1])
def testMQN(self):
m = Chem.MolFromSmiles("CC(C)(C)c1cc(O)c(cc1O)C(C)(C)C")
if Lipinski.NumRotatableBonds(m) == 2:
tgt = np.array(
[42917, 274, 870, 621, 135, 1582, 29, 3147, 5463, 6999, 470, 62588, 19055, 4424, 309, 24061,
17820, 1, 9303, 24146, 16076, 5560, 4262, 646, 746, 13725, 5430, 2629, 362, 24211, 15939,
292, 41, 20, 1852, 5642, 31, 9, 1, 2, 3060, 1750])
else:
tgt = np.array(
[42917, 274, 870, 621, 135, 1582, 29, 3147, 5463, 6999, 470, 62588, 19055, 4424, 309, 24061,
17820, 1, 8314, 24146, 16076, 5560, 4262, 646, 746, 13725, 5430, 2629, 362, 24211, 15939,
292, 41, 20, 1852, 5642, 31, 9, 1, 2, 3060, 1750])
fn = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'aromat_regress.txt')
ms = [x for x in Chem.SmilesMolSupplier(fn, delimiter='\t')]
vs = np.zeros((42, ), np.int32)
for m in ms:
vs += rdMolDescriptors.MQNs_(m)
self.assertFalse(False in (vs == tgt))
# - - - - -
if __name__ == '__main__':
unittest.main()
| 1 | 16,633 | Same here. This was included to quiet the test for the 3D descriptors. As we removed them, this exception handling is no longer required | rdkit-rdkit | cpp |
@@ -26,6 +26,7 @@
#include "lbann/utils/lbann_library.hpp"
#include "lbann/callbacks/callback_checkpoint.hpp"
+#include "lbann/data_store/data_store_jag.hpp"
namespace lbann {
| 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/utils/lbann_library.hpp"
#include "lbann/callbacks/callback_checkpoint.hpp"
namespace lbann {
/// Setup I/O thread pool that is shared across all models
std::unique_ptr<thread_pool> construct_io_thread_pool(lbann_comm *comm) {
int num_io_threads = num_free_cores_per_process(comm);
options *opts = options::get();
if(opts->has_int("num_io_threads")) {
int requested_io_threads = opts->get_int("num_io_threads");
if(requested_io_threads > 0 && requested_io_threads < num_io_threads) {
num_io_threads = requested_io_threads;
}
}
auto io_threads_offset = free_core_offset(comm);
if(comm->am_world_master()) {
std::cout << "\tNum. I/O Threads: " << num_io_threads <<
" (Limited to # Unused Compute Cores or 1)" << std::endl;
}
auto io_thread_pool = make_unique<thread_pool>();
io_thread_pool->launch_pinned_threads(num_io_threads, io_threads_offset);
return io_thread_pool;
}
std::unique_ptr<model> build_model_from_prototext(
int argc, char **argv,
lbann_data::LbannPB &pb,
lbann_comm *comm,
std::shared_ptr<thread_pool> io_thread_pool,
bool first_model) {
int random_seed = lbann_default_random_seed;
bool master = comm->am_world_master();
if (master) {
std::cerr << "starting build_model_from_prototext" << std::endl;
}
std::ostringstream err;
options *opts = options::get();
// Optionally over-ride some values in prototext
get_cmdline_overrides(*comm, pb);
customize_data_readers_index_list(*comm, pb);
lbann_data::Model *pb_model = pb.mutable_model();
// Adjust the number of parallel readers; this may be adjusted
// after calling split_trainers()
set_num_parallel_readers(*comm, pb);
// Check to see if the model wants to reduce the I/O parallelism
if(pb_model->serialize_io() && io_thread_pool->get_num_threads() != 1) {
if(master) {
std::cout << "Model " << pb_model->name() << " serialized the I/O threads" << std::endl;
}
io_thread_pool->relaunch_pinned_threads(1);
}
// Setup I/O threads
auto io_threads_per_process = io_thread_pool->get_num_threads();
auto io_threads_offset = io_thread_pool->get_threads_offset();
// Set algorithmic blocksize
if (pb_model->block_size() == 0 and master) {
err << "model does not provide a valid block size (" << pb_model->block_size() << ")";
LBANN_ERROR(err.str());
}
El::SetBlocksize(pb_model->block_size());
// Change random seed if needed.
if (pb_model->random_seed() > 0) {
random_seed = pb_model->random_seed();
// Reseed here so that setup is done with this new seed.
init_random(random_seed);
init_data_seq_random(random_seed);
}
// Initialize models differently if needed.
#ifndef LBANN_DETERMINISTIC
if (pb_model->random_init_models_differently()) {
random_seed = random_seed + comm->get_trainer_rank();
// Reseed here so that setup is done with this new seed.
init_random(random_seed);
init_data_seq_random(random_seed);
}
#else
if (pb_model->random_init_models_differently()) {
if (master) {
std::cout << "WARNING: Ignoring random_init_models_differently " <<
"due to sequential consistency" << std::endl;
}
}
#endif
// Set up the communicator and get the grid based on the first model's spec.
// We do not currently support splitting different models in different ways,
// as this implies different grids.
int procs_per_trainer = pb_model->procs_per_trainer();
if (procs_per_trainer == 0) {
procs_per_trainer = comm->get_procs_in_world();
}
if (first_model) {
comm->split_trainers(procs_per_trainer);
if (pb_model->num_parallel_readers() > procs_per_trainer) {
pb_model->set_num_parallel_readers(procs_per_trainer);
}
} else if (procs_per_trainer != comm->get_procs_per_trainer()) {
LBANN_ERROR("Model prototexts requesting different procs per model is not supported");
}
// Save info to file; this includes the complete prototext (with any over-rides
// from the cmd line) and various other info
save_session(*comm, argc, argv, pb);
// Report useful information
if (master) {
print_lbann_configuration(pb_model, comm, io_threads_per_process, io_threads_offset);
}
// Display how the OpenMP threads are provisioned
if (opts->has_string("print_affinity")) {
display_omp_setup();
}
// Initialize data readers
//@todo: code not in place for correctly handling image preprocessing
std::map<execution_mode, generic_data_reader *> data_readers;
bool is_shared_training_data_reader = pb_model->shareable_training_data_reader();
bool is_shared_testing_data_reader = pb_model->shareable_testing_data_reader();
if (opts->has_string("share_testing_data_readers")) {
is_shared_testing_data_reader = opts->get_bool("share_testing_data_readers");
}
init_data_readers(comm, pb, data_readers, is_shared_training_data_reader, is_shared_testing_data_reader);
// hack to prevent all data readers from loading identical data; instead,
// share a single copy. See data_reader_jag_conduit_hdf5 for example
if (first_model) {
if (opts->has_string("share_data_reader_data")) {
for (auto&& t : data_readers) {
opts->set_ptr((void*)t.second);
}
}
}
// User feedback
print_parameters(*comm, pb);
// Initalize model
std::unique_ptr<model> ret_model{
proto::construct_model(comm,
data_readers,
pb.optimizer(),
pb.model())
};
ret_model->setup(std::move(io_thread_pool));
if(opts->get_bool("disable_background_io_activity")) {
ret_model->allow_background_io_activity(false);
}
if (opts->get_bool("use_data_store")) {
if (master) {
std::cout << "\nUSING DATA STORE!\n\n";
}
for (auto&& r : data_readers) {
if (!r.second) continue;
r.second->setup_data_store(pb_model->mini_batch_size());
}
}
// restart model from checkpoint if we have one
//@todo
//model->restartShared();
if (comm->am_world_master()) {
std::cout << "\n"
<< ret_model->get_description()
<< "Callbacks:" << std::endl;
for (lbann_callback *cb : ret_model->get_callbacks()) {
std::cout << cb->name() << std::endl;
}
}
if (first_model) {
#ifndef LBANN_DETERMINISTIC
// Under normal conditions, reinitialize the random number generator so
// that regularization techniques (e.g. dropout) generate unique patterns
// on different ranks.
init_random(random_seed + comm->get_rank_in_world());
#else
if(comm->am_world_master()) {
std::cout <<
"--------------------------------------------------------------------------------\n"
"ALERT: executing in sequentially consistent mode -- performance will suffer\n"
"--------------------------------------------------------------------------------\n";
}
#endif
}
return ret_model;
}
void print_lbann_configuration(lbann_data::Model *pb_model, lbann_comm *comm, int io_threads_per_process, int io_threads_offset) {
// Report hardware settings
std::cout << "Hardware properties (for master process)" << std::endl
<< " Processes on node : " << comm->get_procs_per_node() << std::endl
<< " Total number of processes : " << comm->get_procs_in_world() << std::endl
<< " OpenMP threads per process : " << omp_get_max_threads() << std::endl
<< " I/O threads per process (+offset) : " << io_threads_per_process
<< " (+" << io_threads_offset << ")" << std::endl;
#ifdef HYDROGEN_HAVE_CUDA
std::cout << " GPUs on node : " << El::GPUManager::NumDevices() << std::endl;
#endif // HYDROGEN_HAVE_CUDA
std::cout << std::endl;
// Report build settings
std::cout << "Build settings" << std::endl;
std::cout << " Type : ";
#ifdef LBANN_DEBUG
std::cout << "Debug" << std::endl;
#else
std::cout << "Release" << std::endl;
#endif // LBANN_DEBUG
std::cout << " Aluminum : ";
#ifdef LBANN_HAS_ALUMINUM
std::cout << "detected" << std::endl;
#else
std::cout << "NOT detected" << std::endl;
#endif // LBANN_HAS_ALUMINUM
std::cout << " CUDA : ";
#ifdef LBANN_HAS_GPU
std::cout << "detected" << std::endl;
#else
std::cout << "NOT detected" << std::endl;
#endif // LBANN_HAS_GPU
std::cout << " cuDNN : ";
#ifdef LBANN_HAS_CUDNN
std::cout << "detected" << std::endl;
#else
std::cout << "NOT detected" << std::endl;
#endif // LBANN_HAS_CUDNN
std::cout << " CUB : ";
#ifdef HYDROGEN_HAVE_CUB
std::cout << "detected" << std::endl;
#else
std::cout << "NOT detected" << std::endl;
#endif // HYDROGEN_HAVE_CUB
std::cout << std::endl;
// Report device settings
std::cout << "GPU settings" << std::endl;
bool disable_cuda = pb_model->disable_cuda();
#ifndef LBANN_HAS_GPU
disable_cuda = true;
#endif // LBANN_HAS_GPU
std::cout << " CUDA : "
<< (disable_cuda ? "disabled" : "enabled") << std::endl;
std::cout << " cuDNN : ";
#ifdef LBANN_HAS_CUDNN
std::cout << (disable_cuda ? "disabled" : "enabled") << std::endl;
#else
std::cout << "disabled" << std::endl;
#endif // LBANN_HAS_CUDNN
const auto* env = std::getenv("MV2_USE_CUDA");
std::cout << " MV2_USE_CUDA : " << (env != nullptr ? env : "") << std::endl;
std::cout << std::endl;
#ifdef LBANN_HAS_ALUMINUM
std::cout << "Aluminum Features:" << std::endl;
std::cout << " NCCL : ";
#ifdef AL_HAS_NCCL
std::cout << "enabled" << std::endl;
#else
std::cout << "disabled" << std::endl;
#endif // AL_HAS_NCCL
std::cout << std::endl;
#endif // LBANN_HAS_ALUMINUM
// Report model settings
const auto& grid = comm->get_trainer_grid();
int procs_per_trainer = pb_model->procs_per_trainer();
std::cout << "Model settings" << std::endl
<< " Models : " << comm->get_num_trainers() << std::endl
<< " Processes per trainer : " << procs_per_trainer << std::endl
<< " Grid dimensions : " << grid.Height() << " x " << grid.Width() << std::endl;
std::cout << std::endl;
}
} // namespace lbann
| 1 | 14,089 | Why do you need to load a specific data reader in lbann_library? | LLNL-lbann | cpp |
@@ -31,10 +31,10 @@ namespace Nethermind.Runner.Ethereum.Steps
_context = context;
}
- public ValueTask Execute()
+ public Task Execute()
{
_context.ChainSpec.Bootnodes = _context.ChainSpec.Bootnodes?.Where(n => !n.NodeId?.Equals(_context.NodeKey.PublicKey) ?? false).ToArray() ?? new NetworkNode[0];
- return default;
+ return Task.CompletedTask;
}
}
} | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System.Linq;
using System.Threading.Tasks;
using Nethermind.Config;
using Nethermind.Runner.Ethereum.Context;
namespace Nethermind.Runner.Ethereum.Steps
{
[RunnerStepDependency(typeof(SetupKeyStore))]
public class FilterBootnodes : IStep
{
private readonly EthereumRunnerContext _context;
public FilterBootnodes(EthereumRunnerContext context)
{
_context = context;
}
public ValueTask Execute()
{
_context.ChainSpec.Bootnodes = _context.ChainSpec.Bootnodes?.Where(n => !n.NodeId?.Equals(_context.NodeKey.PublicKey) ?? false).ToArray() ?? new NetworkNode[0];
return default;
}
}
} | 1 | 23,188 | why not ValueTask? | NethermindEth-nethermind | .cs |
@@ -422,7 +422,7 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi
return reconcile.Result{Requeue: true}, nil
}
- if cd.Status.InstallerImage == nil {
+ if !cd.Status.Installed && (cd.Status.InstallerImage == nil || cd.Status.CLIImage == nil) {
return r.resolveInstallerImage(cd, imageSet, releaseImage, hiveImage, cdLog)
}
| 1 | package clusterdeployment
import (
"context"
"fmt"
"os"
"reflect"
"regexp"
"strconv"
"time"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
routev1 "github.com/openshift/api/route/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/metrics"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
apihelpers "github.com/openshift/hive/pkg/apis/helpers"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1alpha1"
"github.com/openshift/hive/pkg/constants"
"github.com/openshift/hive/pkg/controller/images"
hivemetrics "github.com/openshift/hive/pkg/controller/metrics"
controllerutils "github.com/openshift/hive/pkg/controller/utils"
"github.com/openshift/hive/pkg/imageset"
"github.com/openshift/hive/pkg/install"
)
const (
controllerName = "clusterDeployment"
serviceAccountName = "cluster-installer" // service account that can run the installer and upload artifacts to the cluster's namespace.
defaultRequeueTime = 10 * time.Second
adminSSHKeySecretKey = "ssh-publickey"
adminKubeconfigKey = "kubeconfig"
rawAdminKubeconfigKey = "raw-kubeconfig"
clusterImageSetNotFoundReason = "ClusterImageSetNotFound"
clusterImageSetFoundReason = "ClusterImageSetFound"
dnsNotReadyReason = "DNSNotReady"
dnsReadyReason = "DNSReady"
dnsReadyAnnotation = "hive.openshift.io/dnsready"
clusterDeploymentGenerationAnnotation = "hive.openshift.io/cluster-deployment-generation"
jobHashAnnotation = "hive.openshift.io/jobhash"
firstTimeInstallAnnotation = "hive.openshift.io/first-time-install"
deleteAfterAnnotation = "hive.openshift.io/delete-after" // contains a duration after which the cluster should be cleaned up.
)
var (
metricCompletedInstallJobRestarts = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_completed_install_restart",
Help: "Distribution of the number of restarts for all completed cluster installations.",
Buckets: []float64{0, 2, 10, 20, 50},
},
[]string{"cluster_type"},
)
metricInstallJobDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_install_job_duration_seconds",
Help: "Distribution of the runtime of completed install jobs.",
Buckets: []float64{60, 300, 600, 1200, 1800, 2400, 3000, 3600},
},
)
metricInstallDelaySeconds = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_install_job_delay_seconds",
Help: "Time between cluster deployment creation and creation of the job to install/provision the cluster.",
Buckets: []float64{30, 60, 120, 300, 600, 1200, 1800},
},
)
metricImageSetDelaySeconds = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_imageset_job_delay_seconds",
Help: "Time between cluster deployment creation and creation of the job which resolves the installer image to use for a ClusterImageSet.",
Buckets: []float64{10, 30, 60, 300, 600, 1200, 1800},
},
)
metricClustersCreated = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "hive_cluster_deployments_created_total",
Help: "Counter incremented every time we observe a new cluster.",
},
[]string{"cluster_type"},
)
metricClustersInstalled = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "hive_cluster_deployments_installed_total",
Help: "Counter incremented every time we observe a successful installation.",
},
[]string{"cluster_type"},
)
metricClustersDeleted = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "hive_cluster_deployments_deleted_total",
Help: "Counter incremented every time we observe a deleted cluster.",
},
[]string{"cluster_type"},
)
metricDNSDelaySeconds = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_dns_delay_seconds",
Help: "Time between cluster deployment with spec.manageDNS creation and the DNSZone becoming ready.",
Buckets: []float64{10, 30, 60, 300, 600, 1200, 1800},
},
)
// regex to find/replace wildcard ingress entries
// case-insensitive leading literal '*' followed by a literal '.'
wildcardDomain = regexp.MustCompile(`(?i)^\*\.`)
)
func init() {
metrics.Registry.MustRegister(metricInstallJobDuration)
metrics.Registry.MustRegister(metricCompletedInstallJobRestarts)
metrics.Registry.MustRegister(metricInstallDelaySeconds)
metrics.Registry.MustRegister(metricImageSetDelaySeconds)
metrics.Registry.MustRegister(metricClustersCreated)
metrics.Registry.MustRegister(metricClustersInstalled)
metrics.Registry.MustRegister(metricClustersDeleted)
metrics.Registry.MustRegister(metricDNSDelaySeconds)
}
// Add creates a new ClusterDeployment controller and adds it to the manager with default RBAC.
func Add(mgr manager.Manager) error {
return AddToManager(mgr, NewReconciler(mgr))
}
// NewReconciler returns a new reconcile.Reconciler
func NewReconciler(mgr manager.Manager) reconcile.Reconciler {
return &ReconcileClusterDeployment{
Client: controllerutils.NewClientWithMetricsOrDie(mgr, controllerName),
scheme: mgr.GetScheme(),
logger: log.WithField("controller", controllerName),
remoteClusterAPIClientBuilder: controllerutils.BuildClusterAPIClientFromKubeconfig,
}
}
// AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler
func AddToManager(mgr manager.Manager, r reconcile.Reconciler) error {
c, err := controller.New("clusterdeployment-controller", mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: controllerutils.GetConcurrentReconciles()})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error getting new cluster deployment")
return err
}
// Watch for changes to ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}}, &handler.EnqueueRequestForObject{})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching cluster deployment")
return err
}
// Watch for jobs created by a ClusterDeployment:
err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching cluster deployment job")
return err
}
// Watch for pods created by an install job
err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestsFromMapFunc{
ToRequests: handler.ToRequestsFunc(selectorPodWatchHandler),
})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching cluster deployment pods")
return err
}
// Watch for deprovision requests created by a ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeprovisionRequest{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching deprovision request created by cluster deployment")
return err
}
// Watch for dnszones created by a ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.DNSZone{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching cluster deployment dnszones")
return err
}
return nil
}
var _ reconcile.Reconciler = &ReconcileClusterDeployment{}
// ReconcileClusterDeployment reconciles a ClusterDeployment object
type ReconcileClusterDeployment struct {
client.Client
scheme *runtime.Scheme
logger log.FieldLogger
// remoteClusterAPIClientBuilder is a function pointer to the function that builds a client for the
// remote cluster's cluster-api
remoteClusterAPIClientBuilder func(string, string) (client.Client, error)
}
// Reconcile reads that state of the cluster for a ClusterDeployment object and makes changes based on the state read
// and what is in the ClusterDeployment.Spec
//
// Automatically generate RBAC rules to allow the Controller to read and write Deployments
//
// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=serviceaccounts;secrets;configmaps;events;persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=pods;namespaces,verbs=get;list;watch
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=hive.openshift.io,resources=clusterdeployments;clusterdeployments/status;clusterdeployments/finalizers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=hive.openshift.io,resources=clusterimagesets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=hive.openshift.io,resources=clusterimagesets/status,verbs=get;update;patch
func (r *ReconcileClusterDeployment) Reconcile(request reconcile.Request) (reconcile.Result, error) {
start := time.Now()
cdLog := r.logger.WithFields(log.Fields{
"controller": controllerName,
"clusterDeployment": request.Name,
"namespace": request.Namespace,
})
// For logging, we need to see when the reconciliation loop starts and ends.
cdLog.Info("reconciling cluster deployment")
defer func() {
dur := time.Since(start)
hivemetrics.MetricControllerReconcileTime.WithLabelValues(controllerName).Observe(dur.Seconds())
cdLog.WithField("elapsed", dur).Info("reconcile complete")
}()
// Fetch the ClusterDeployment instance
cd := &hivev1.ClusterDeployment{}
err := r.Get(context.TODO(), request.NamespacedName, cd)
if err != nil {
if apierrors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
cdLog.Info("cluster deployment Not Found")
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
cdLog.WithError(err).Error("Error getting cluster deployment")
return reconcile.Result{}, err
}
return r.reconcile(request, cd, cdLog)
}
func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (reconcile.Result, error) {
origCD := cd
cd = cd.DeepCopy()
// TODO: We may want to remove this fix in future.
// Handle pre-existing clusters with older status version structs that did not have the new
// cluster version mandatory fields defined.
// NOTE: removing this is causing the imageset job to fail. Please leave it in until
// we can determine what needs to be fixed.
controllerutils.FixupEmptyClusterVersionFields(&cd.Status.ClusterVersionStatus)
if !reflect.DeepEqual(origCD.Status, cd.Status) {
cdLog.Info("correcting empty cluster version fields")
err := r.Status().Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Error("error updating cluster deployment status")
return reconcile.Result{}, err
}
return reconcile.Result{
Requeue: true,
RequeueAfter: defaultRequeueTime,
}, nil
}
// We previously allowed clusterdeployment.spec.ingress[] entries to have ingress domains with a leading '*'.
// Migrate the clusterdeployment to the new format if we find a wildcard ingress domain.
// TODO: we can one day remove this once all clusterdeployment are known to have non-wildcard data
if migrateWildcardIngress(cd) {
cdLog.Info("migrating wildcard ingress entries")
err := r.Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Error("failed to update cluster deployment")
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// TODO: remove this once clusterdeployments have been migrated. We are no longer storing syncset status
// on clusterdeployments, remove it.
if len(cd.Status.SyncSetStatus) > 0 || len(cd.Status.SelectorSyncSetStatus) > 0 {
cd.Status.SyncSetStatus = nil
cd.Status.SelectorSyncSetStatus = nil
err := r.Status().Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Error("failed to migrate cluster deployment status")
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
imageSet, modified, err := r.getClusterImageSet(cd, cdLog)
if modified || err != nil {
return reconcile.Result{}, err
}
hiveImage := r.getHiveImage(cd, imageSet, cdLog)
releaseImage := r.getReleaseImage(cd, imageSet, cdLog)
if cd.DeletionTimestamp != nil {
if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
clearUnderwaySecondsMetrics(cd)
return reconcile.Result{}, nil
}
// Deprovision still underway, report metric for this cluster.
hivemetrics.MetricClusterDeploymentDeprovisioningUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(
time.Since(cd.DeletionTimestamp.Time).Seconds())
// If the cluster never made it to installed, make sure we clear the provisioning
// underway metric.
if !cd.Status.Installed {
hivemetrics.MetricClusterDeploymentProvisionUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(0.0)
}
return r.syncDeletedClusterDeployment(cd, hiveImage, cdLog)
}
// requeueAfter will be used to determine if cluster should be requeued after
// reconcile has completed
var requeueAfter time.Duration
// Check for the delete-after annotation, and if the cluster has expired, delete it
deleteAfter, ok := cd.Annotations[deleteAfterAnnotation]
if ok {
cdLog.Debugf("found delete after annotation: %s", deleteAfter)
dur, err := time.ParseDuration(deleteAfter)
if err != nil {
return reconcile.Result{}, fmt.Errorf("error parsing %s as a duration: %v", deleteAfterAnnotation, err)
}
if !cd.CreationTimestamp.IsZero() {
expiry := cd.CreationTimestamp.Add(dur)
cdLog.Debugf("cluster expires at: %s", expiry)
if time.Now().After(expiry) {
cdLog.WithField("expiry", expiry).Info("cluster has expired, issuing delete")
err := r.Delete(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Error("error deleting expired cluster")
}
return reconcile.Result{}, err
}
// We have an expiry time but we're not expired yet. Set requeueAfter for just after expiry time
// so that we requeue cluster for deletion once reconcile has completed
requeueAfter = expiry.Sub(time.Now()) + 60*time.Second
}
}
if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
cdLog.Debugf("adding clusterdeployment finalizer")
if err := r.addClusterDeploymentFinalizer(cd); err != nil {
cdLog.WithError(err).Error("error adding finalizer")
return reconcile.Result{}, err
}
metricClustersCreated.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
return reconcile.Result{}, nil
}
cdLog.Debug("loading SSH key secret")
if cd.Spec.SSHKey == nil {
cdLog.Error("cluster has no ssh key set, unable to launch install")
return reconcile.Result{}, fmt.Errorf("cluster has no ssh key set, unable to launch install")
}
sshKey, err := controllerutils.LoadSecretData(r.Client, cd.Spec.SSHKey.Name,
cd.Namespace, adminSSHKeySecretKey)
if err != nil {
cdLog.WithError(err).Error("unable to load ssh key from secret")
return reconcile.Result{}, err
}
cdLog.Debug("loading pull secrets")
pullSecret, err := r.mergePullSecrets(cd, cdLog)
if err != nil {
cdLog.WithError(err).Error("Error merging pull secrets")
return reconcile.Result{}, err
}
// Update the pull secret object if required
modifiedCD, err := r.updatePullSecretInfo(pullSecret, cd, cdLog)
if err != nil || modifiedCD {
if err != nil {
cdLog.WithError(err).Error("Error updating the merged pull secret")
return reconcile.Result{}, err
}
// Because the global pull secret is not referenced on our cluster deployment,
// generating it does not cause an automatic reconcile. Manually requeue to avoid
// waiting 30 minutes before the cluster install proceeds.
return reconcile.Result{Requeue: true}, nil
}
if cd.Status.InstallerImage == nil {
return r.resolveInstallerImage(cd, imageSet, releaseImage, hiveImage, cdLog)
}
if cd.Spec.ManageDNS {
managedDNSZoneAvailable, dnsZone, err := r.ensureManagedDNSZone(cd, cdLog)
if err != nil {
return reconcile.Result{}, err
}
modified, err := r.setDNSNotReadyCondition(cd, managedDNSZoneAvailable, cdLog)
if modified || err != nil {
return reconcile.Result{}, err
}
if !managedDNSZoneAvailable {
// The clusterdeployment will be queued when the owned DNSZone's status
// is updated to available.
cdLog.Debug("DNSZone is not yet available. Waiting for zone to become available.")
return reconcile.Result{}, nil
}
updated, err := r.setDNSDelayMetric(cd, dnsZone, cdLog)
if updated || err != nil {
return reconcile.Result{}, err
}
}
// firstInstalledObserve is the flag that is used for reporting the provision job duration metric
firstInstalledObserve := false
containerRestarts := 0
// Check if an install job already exists:
existingJob := &batchv1.Job{}
installJobName := install.GetInstallJobName(cd)
err = r.Get(context.TODO(), types.NamespacedName{Name: installJobName, Namespace: cd.Namespace}, existingJob)
if err != nil {
if apierrors.IsNotFound(err) {
cdLog.Debug("no install job exists")
existingJob = nil
} else {
cdLog.WithError(err).Error("error looking for install job")
return reconcile.Result{}, err
}
} else {
if !existingJob.DeletionTimestamp.IsZero() {
cdLog.WithError(err).Error("install job is being deleted, requeueing to wait for deletion")
return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil
}
// setting the flag so that we can report the metric after cd is installed
if existingJob.Status.Succeeded > 0 && !cd.Status.Installed {
firstInstalledObserve = true
}
}
if cd.Status.Installed {
cdLog.Debug("cluster is already installed, no processing of install job needed")
r.cleanupInstallLogPVC(cd, cdLog)
} else {
// Indicate that the cluster is still installing:
hivemetrics.MetricClusterDeploymentProvisionUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(
time.Since(cd.CreationTimestamp.Time).Seconds())
skipGatherLogs := os.Getenv(constants.SkipGatherLogsEnvVar) == "true"
if !skipGatherLogs {
if err := r.createPVC(cd, cdLog); err != nil {
return reconcile.Result{}, err
}
}
job, err := install.GenerateInstallerJob(
cd,
hiveImage,
releaseImage,
serviceAccountName,
sshKey, GetInstallLogsPVCName(cd), skipGatherLogs)
if err != nil {
cdLog.WithError(err).Error("error generating install job")
return reconcile.Result{}, err
}
jobHash, err := controllerutils.CalculateJobSpecHash(job)
if err != nil {
cdLog.WithError(err).Error("failed to calculate hash for generated install job")
return reconcile.Result{}, err
}
if job.Annotations == nil {
job.Annotations = map[string]string{}
}
job.Annotations[jobHashAnnotation] = jobHash
if err = controllerutil.SetControllerReference(cd, job, r.scheme); err != nil {
cdLog.WithError(err).Error("error setting controller reference on job")
return reconcile.Result{}, err
}
cdLog = cdLog.WithField("job", job.Name)
if existingJob == nil {
cdLog.Infof("creating install job")
_, err = controllerutils.SetupClusterInstallServiceAccount(r, cd.Namespace, cdLog)
if err != nil {
cdLog.WithError(err).Error("error setting up service account and role")
return reconcile.Result{}, err
}
err = r.Create(context.TODO(), job)
if err != nil {
cdLog.Errorf("error creating job: %v", err)
return reconcile.Result{}, err
}
if _, ok := cd.Annotations[firstTimeInstallAnnotation]; !ok {
initializeAnnotations(cd)
// Add the annotation for first time install
cd.Annotations[firstTimeInstallAnnotation] = "true"
if err := r.Client.Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Error("failed to save annotation for firstTimeInstall")
}
kickstartDuration := time.Since(cd.CreationTimestamp.Time)
cdLog.WithField("elapsed", kickstartDuration.Seconds()).Info("calculated time to install job seconds")
metricInstallDelaySeconds.Observe(float64(kickstartDuration.Seconds()))
}
} else {
cdLog.Debug("provision job exists")
containerRestarts, err = r.calcInstallPodRestarts(cd, cdLog)
if err != nil {
// Metrics calculation should not shut down reconciliation, logging and moving on.
cdLog.WithError(err).Warn("error listing pods, unable to calculate pod restarts but continuing")
} else {
if containerRestarts > 0 {
cdLog.WithFields(log.Fields{
"restarts": containerRestarts,
}).Warn("install pod has restarted")
}
// Store the restart count on the cluster deployment status.
cd.Status.InstallRestarts = containerRestarts
}
if existingJob.Annotations != nil {
didGenerationChange, err := r.updateOutdatedConfigurations(cd.Generation, existingJob, cdLog)
if didGenerationChange || err != nil {
return reconcile.Result{}, err
}
}
jobDeleted, err := r.deleteJobOnHashChange(existingJob, job, cdLog)
if jobDeleted || err != nil {
return reconcile.Result{}, err
}
}
if firstInstalledObserve && cd.Status.InstalledTimestamp == nil {
now := metav1.Now()
cd.Status.InstalledTimestamp = &now
}
}
err = r.updateClusterDeploymentStatus(cd, origCD, existingJob, cdLog)
if err != nil {
cdLog.WithError(err).Errorf("error updating cluster deployment status")
return reconcile.Result{}, err
}
// firstInstalledObserve will be true if this is the first time we've noticed the install job completed.
// If true, we know we can report the metrics associated with a completed job.
if firstInstalledObserve {
// jobDuration calculates the time elapsed since the install job started
jobDuration := existingJob.Status.CompletionTime.Time.Sub(existingJob.Status.StartTime.Time)
cdLog.WithField("duration", jobDuration.Seconds()).Debug("install job completed")
metricInstallJobDuration.Observe(float64(jobDuration.Seconds()))
// Report a metric for the total number of container restarts:
metricCompletedInstallJobRestarts.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).
Observe(float64(containerRestarts))
// Clear the install underway seconds metric. After this no-one should be reporting
// this metric for this cluster.
hivemetrics.MetricClusterDeploymentProvisionUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(0.0)
metricClustersInstalled.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
}
// Check for requeueAfter duration
if requeueAfter != 0 {
cdLog.Debugf("cluster will re-sync due to expiry time in: %v", requeueAfter)
return reconcile.Result{RequeueAfter: requeueAfter}, nil
}
return reconcile.Result{}, nil
}
// GetInstallLogsPVCName returns the expected name of the persistent volume claim for cluster install failure logs.
func GetInstallLogsPVCName(cd *hivev1.ClusterDeployment) string {
return apihelpers.GetResourceName(cd.Name, "install-logs")
}
// createPVC will create the PVC for the install logs if it does not already exist.
func (r *ReconcileClusterDeployment) createPVC(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
pvcName := GetInstallLogsPVCName(cd)
switch err := r.Get(context.TODO(), types.NamespacedName{Name: pvcName, Namespace: cd.Namespace}, &corev1.PersistentVolumeClaim{}); {
case err == nil:
cdLog.Debug("pvc already exists")
return nil
case !apierrors.IsNotFound(err):
cdLog.WithError(err).Error("error getting persistent volume claim")
return err
}
labels := map[string]string{
constants.InstallJobLabel: "true",
constants.ClusterDeploymentNameLabel: cd.Name,
}
if cd.Labels != nil {
typeStr, ok := cd.Labels[hivev1.HiveClusterTypeLabel]
if ok {
labels[hivev1.HiveClusterTypeLabel] = typeStr
}
}
pvc := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: pvcName,
Namespace: cd.Namespace,
Labels: labels,
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
}
cdLog.WithField("pvc", pvc.Name).Info("creating persistent volume claim")
if err := controllerutil.SetControllerReference(cd, pvc, r.scheme); err != nil {
cdLog.WithError(err).Error("error setting controller reference on pvc")
return err
}
err := r.Create(context.TODO(), pvc)
if err != nil {
cdLog.WithError(err).Error("error creating pvc")
}
return err
}
// getHiveImage looks for a Hive image to use in clusterdeployment jobs in the following order:
// 1 - specified in the cluster deployment spec.images.hiveImage
// 2 - referenced in the cluster deployment spec.imageSet
// 3 - specified via environment variable to the hive controller
// 4 - fallback default hardcoded image reference
func (r *ReconcileClusterDeployment) getHiveImage(cd *hivev1.ClusterDeployment, imageSet *hivev1.ClusterImageSet, cdLog log.FieldLogger) string {
if cd.Spec.Images.HiveImage != "" {
return cd.Spec.Images.HiveImage
}
if imageSet != nil && imageSet.Spec.HiveImage != nil {
return *imageSet.Spec.HiveImage
}
return images.GetHiveImage(cdLog)
}
// getReleaseImage looks for a a release image in clusterdeployment or its corresponding imageset in the following order:
// 1 - specified in the cluster deployment spec.images.releaseImage
// 2 - referenced in the cluster deployment spec.imageSet
func (r *ReconcileClusterDeployment) getReleaseImage(cd *hivev1.ClusterDeployment, imageSet *hivev1.ClusterImageSet, cdLog log.FieldLogger) string {
if cd.Spec.Images.ReleaseImage != "" {
return cd.Spec.Images.ReleaseImage
}
if imageSet != nil && imageSet.Spec.ReleaseImage != nil {
return *imageSet.Spec.ReleaseImage
}
return ""
}
func (r *ReconcileClusterDeployment) getClusterImageSet(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (*hivev1.ClusterImageSet, bool, error) {
if cd.Spec.ImageSet == nil || len(cd.Spec.ImageSet.Name) == 0 {
return nil, false, nil
}
imageSet := &hivev1.ClusterImageSet{}
err := r.Get(context.TODO(), types.NamespacedName{Name: cd.Spec.ImageSet.Name}, imageSet)
switch {
case apierrors.IsNotFound(err):
cdLog.WithField("clusterimageset", cd.Spec.ImageSet.Name).Warning("clusterdeployment references non-existent clusterimageset")
modified, err := r.setImageSetNotFoundCondition(cd, false, cdLog)
return nil, modified, err
case err != nil:
cdLog.WithError(err).WithField("clusterimageset", cd.Spec.ImageSet.Name).Error("unexpected error retrieving clusterimageset")
return nil, false, err
default:
return imageSet, false, nil
}
}
func (r *ReconcileClusterDeployment) statusUpdate(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
err := r.Status().Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Error("cannot update clusterdeployment status")
}
return err
}
func (r *ReconcileClusterDeployment) resolveInstallerImage(cd *hivev1.ClusterDeployment, imageSet *hivev1.ClusterImageSet, releaseImage, hiveImage string, cdLog log.FieldLogger) (reconcile.Result, error) {
if len(cd.Spec.Images.InstallerImage) > 0 {
cdLog.WithField("image", cd.Spec.Images.InstallerImage).
Debug("setting status.InstallerImage to the value in spec.images.installerImage")
cd.Status.InstallerImage = &cd.Spec.Images.InstallerImage
return reconcile.Result{}, r.statusUpdate(cd, cdLog)
}
if imageSet != nil && imageSet.Spec.InstallerImage != nil {
cd.Status.InstallerImage = imageSet.Spec.InstallerImage
cdLog.WithField("imageset", imageSet.Name).Debug("setting status.InstallerImage using imageSet.Spec.InstallerImage")
return reconcile.Result{}, r.statusUpdate(cd, cdLog)
}
cliImage := images.GetCLIImage(cdLog)
job := imageset.GenerateImageSetJob(cd, releaseImage, serviceAccountName, imageset.AlwaysPullImage(cliImage), imageset.AlwaysPullImage(hiveImage))
if err := controllerutil.SetControllerReference(cd, job, r.scheme); err != nil {
cdLog.WithError(err).Error("error setting controller reference on job")
return reconcile.Result{}, err
}
jobName := types.NamespacedName{Name: job.Name, Namespace: job.Namespace}
jobLog := cdLog.WithField("job", jobName)
existingJob := &batchv1.Job{}
err := r.Get(context.TODO(), jobName, existingJob)
switch {
// If the job exists but is in the process of getting deleted, requeue and wait for the delete
// to complete.
case err == nil && !job.DeletionTimestamp.IsZero():
jobLog.Debug("imageset job is being deleted. Will recreate once deleted")
return reconcile.Result{RequeueAfter: defaultRequeueTime}, err
// If job exists and is finished, delete so we can recreate it
case err == nil && controllerutils.IsFinished(existingJob):
jobLog.WithField("successful", controllerutils.IsSuccessful(existingJob)).
Warning("Finished job found, but installer image is not yet resolved. Deleting.")
err := r.Delete(context.Background(), existingJob,
client.PropagationPolicy(metav1.DeletePropagationForeground))
if err != nil {
jobLog.WithError(err).Error("cannot delete imageset job")
}
return reconcile.Result{}, err
case apierrors.IsNotFound(err):
jobLog.WithField("releaseImage", releaseImage).Info("creating imageset job")
_, err = controllerutils.SetupClusterInstallServiceAccount(r, cd.Namespace, cdLog)
if err != nil {
cdLog.WithError(err).Error("error setting up service account and role")
return reconcile.Result{}, err
}
err = r.Create(context.TODO(), job)
if err != nil {
jobLog.WithError(err).Error("error creating job")
} else {
// kickstartDuration calculates the delay between creation of cd and start of imageset job
kickstartDuration := time.Since(cd.CreationTimestamp.Time)
cdLog.WithField("elapsed", kickstartDuration.Seconds()).Info("calculated time to imageset job seconds")
metricImageSetDelaySeconds.Observe(float64(kickstartDuration.Seconds()))
}
return reconcile.Result{}, err
case err != nil:
jobLog.WithError(err).Error("cannot get job")
return reconcile.Result{}, err
default:
jobLog.Debug("job exists and is in progress")
}
return reconcile.Result{}, nil
}
func (r *ReconcileClusterDeployment) setDNSNotReadyCondition(cd *hivev1.ClusterDeployment, isReady bool, cdLog log.FieldLogger) (modified bool, err error) {
original := cd.DeepCopy()
status := corev1.ConditionFalse
reason := dnsReadyReason
message := "DNS Zone available"
if !isReady {
status = corev1.ConditionTrue
reason = dnsNotReadyReason
message = "DNS Zone not yet available"
}
cd.Status.Conditions = controllerutils.SetClusterDeploymentCondition(
cd.Status.Conditions,
hivev1.DNSNotReadyCondition,
status,
reason,
message,
controllerutils.UpdateConditionNever)
if !reflect.DeepEqual(original.Status.Conditions, cd.Status.Conditions) {
cdLog.Debugf("setting DNSNotReadyCondition to %v", status)
err := r.Status().Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Error("cannot update status conditions")
}
return true, err
}
return false, nil
}
func (r *ReconcileClusterDeployment) setImageSetNotFoundCondition(cd *hivev1.ClusterDeployment, isNotFound bool, cdLog log.FieldLogger) (modified bool, err error) {
original := cd.DeepCopy()
status := corev1.ConditionFalse
reason := clusterImageSetFoundReason
message := fmt.Sprintf("ClusterImageSet %s is available", cd.Spec.ImageSet.Name)
if isNotFound {
status = corev1.ConditionTrue
reason = clusterImageSetNotFoundReason
message = fmt.Sprintf("ClusterImageSet %s is not available", cd.Spec.ImageSet.Name)
}
cd.Status.Conditions = controllerutils.SetClusterDeploymentCondition(
cd.Status.Conditions,
hivev1.ClusterImageSetNotFoundCondition,
status,
reason,
message,
controllerutils.UpdateConditionNever)
if !reflect.DeepEqual(original.Status.Conditions, cd.Status.Conditions) {
cdLog.Info("setting ClusterImageSetNotFoundCondition to %v", status)
err := r.Status().Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Error("cannot update status conditions")
}
return true, err
}
return false, nil
}
// Deletes the job if it exists and its generation does not match the cluster deployment's
// genetation. Updates the config map if it is outdated too
func (r *ReconcileClusterDeployment) updateOutdatedConfigurations(cdGeneration int64, existingJob *batchv1.Job, cdLog log.FieldLogger) (bool, error) {
var err error
var didGenerationChange bool
if jobGeneration, ok := existingJob.Annotations[clusterDeploymentGenerationAnnotation]; ok {
convertedJobGeneration, _ := strconv.ParseInt(jobGeneration, 10, 64)
if convertedJobGeneration < cdGeneration {
didGenerationChange = true
cdLog.Info("deleting outdated install job due to cluster deployment generation change")
err = r.Delete(context.TODO(), existingJob, client.PropagationPolicy(metav1.DeletePropagationForeground))
if err != nil {
cdLog.WithError(err).Errorf("error deleting outdated install job")
return didGenerationChange, err
}
}
}
return didGenerationChange, err
}
func (r *ReconcileClusterDeployment) updateClusterDeploymentStatus(cd *hivev1.ClusterDeployment, origCD *hivev1.ClusterDeployment, job *batchv1.Job, cdLog log.FieldLogger) error {
cdLog.Debug("updating cluster deployment status")
if job != nil && job.Name != "" && job.Namespace != "" {
// Job exists, check it's status:
cd.Status.Installed = controllerutils.IsSuccessful(job)
}
// The install manager sets this secret name, but we don't consider it a critical failure and
// will attempt to heal it here, as the value is predictable.
if cd.Status.Installed && cd.Status.AdminKubeconfigSecret.Name == "" {
cd.Status.AdminKubeconfigSecret = corev1.LocalObjectReference{Name: apihelpers.GetResourceName(cd.Name, "admin-kubeconfig")}
}
if cd.Status.AdminKubeconfigSecret.Name != "" {
adminKubeconfigSecret := &corev1.Secret{}
err := r.Get(context.Background(), types.NamespacedName{Namespace: cd.Namespace, Name: cd.Status.AdminKubeconfigSecret.Name}, adminKubeconfigSecret)
if err != nil {
if apierrors.IsNotFound(err) {
log.Warn("admin kubeconfig does not yet exist")
} else {
return err
}
} else {
err = r.fixupAdminKubeconfigSecret(adminKubeconfigSecret, cdLog)
if err != nil {
return err
}
err = r.setAdminKubeconfigStatus(cd, adminKubeconfigSecret, cdLog)
if err != nil {
return err
}
}
}
// Update cluster deployment status if changed:
if !reflect.DeepEqual(cd.Status, origCD.Status) {
cdLog.Infof("status has changed, updating cluster deployment")
cdLog.Debugf("orig: %v", origCD)
cdLog.Debugf("new : %v", cd.Status)
err := r.Status().Update(context.TODO(), cd)
if err != nil {
cdLog.Errorf("error updating cluster deployment: %v", err)
return err
}
} else {
cdLog.Debug("cluster deployment status unchanged")
}
return nil
}
func (r *ReconcileClusterDeployment) fixupAdminKubeconfigSecret(secret *corev1.Secret, cdLog log.FieldLogger) error {
originalSecret := secret.DeepCopy()
rawData, hasRawData := secret.Data[rawAdminKubeconfigKey]
if !hasRawData {
secret.Data[rawAdminKubeconfigKey] = secret.Data[adminKubeconfigKey]
rawData = secret.Data[adminKubeconfigKey]
}
var err error
secret.Data[adminKubeconfigKey], err = controllerutils.FixupKubeconfig(rawData)
if err != nil {
cdLog.WithError(err).Errorf("cannot fixup kubeconfig to generate new one")
return err
}
if reflect.DeepEqual(originalSecret.Data, secret.Data) {
cdLog.Debug("secret data has not changed, no need to update")
return nil
}
err = r.Update(context.TODO(), secret)
if err != nil {
cdLog.WithError(err).Error("error updated admin kubeconfig secret")
return err
}
return nil
}
// setAdminKubeconfigStatus sets all cluster status fields that depend on the admin kubeconfig.
func (r *ReconcileClusterDeployment) setAdminKubeconfigStatus(cd *hivev1.ClusterDeployment, adminKubeconfigSecret *corev1.Secret, cdLog log.FieldLogger) error {
if cd.Status.WebConsoleURL == "" || cd.Status.APIURL == "" {
remoteClusterAPIClient, err := r.remoteClusterAPIClientBuilder(string(adminKubeconfigSecret.Data[adminKubeconfigKey]), controllerName)
if err != nil {
cdLog.WithError(err).Error("error building remote cluster-api client connection")
return err
}
// Parse the admin kubeconfig for the server URL:
config, err := clientcmd.Load(adminKubeconfigSecret.Data["kubeconfig"])
if err != nil {
return err
}
cluster, ok := config.Clusters[cd.Spec.ClusterName]
if !ok {
return fmt.Errorf("error parsing admin kubeconfig secret data")
}
// We should be able to assume only one cluster in here:
server := cluster.Server
cdLog.Debugf("found cluster API URL in kubeconfig: %s", server)
cd.Status.APIURL = server
routeObject := &routev1.Route{}
err = remoteClusterAPIClient.Get(context.Background(),
types.NamespacedName{Namespace: "openshift-console", Name: "console"}, routeObject)
if err != nil {
cdLog.WithError(err).Error("error fetching remote route object")
return err
}
cdLog.Debugf("read remote route object: %s", routeObject)
cd.Status.WebConsoleURL = "https://" + routeObject.Spec.Host
}
return nil
}
// ensureManagedDNSZoneDeleted is a safety check to ensure that the child managed DNSZone
// linked to the parent cluster deployment gets a deletionTimestamp when the parent is deleted.
// Normally we expect Kube garbage collection to do this for us, but in rare cases we've seen it
// not working as intended.
func (r *ReconcileClusterDeployment) ensureManagedDNSZoneDeleted(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (*reconcile.Result, error) {
if !cd.Spec.ManageDNS {
return nil, nil
}
dnsZone := &hivev1.DNSZone{}
dnsZoneNamespacedName := types.NamespacedName{Namespace: cd.Namespace, Name: dnsZoneName(cd.Name)}
err := r.Get(context.TODO(), dnsZoneNamespacedName, dnsZone)
if err != nil && !apierrors.IsNotFound(err) {
cdLog.WithError(err).Error("error looking up managed dnszone")
return &reconcile.Result{}, err
}
if apierrors.IsNotFound(err) || !dnsZone.DeletionTimestamp.IsZero() {
cdLog.Debug("dnszone has been deleted or is getting deleted")
return nil, nil
}
cdLog.Warn("managed dnszone did not get a deletionTimestamp when parent cluster deployment was deleted, deleting manually")
err = r.Delete(context.TODO(), dnsZone,
client.PropagationPolicy(metav1.DeletePropagationForeground))
if err != nil {
cdLog.WithError(err).Error("error deleting managed dnszone")
}
return &reconcile.Result{}, err
}
func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.ClusterDeployment, hiveImage string, cdLog log.FieldLogger) (reconcile.Result, error) {
result, err := r.ensureManagedDNSZoneDeleted(cd, cdLog)
if result != nil {
return *result, err
}
if err != nil {
return reconcile.Result{}, err
}
// Delete the install job in case it's still running:
installJob := &batchv1.Job{}
err = r.Get(context.Background(),
types.NamespacedName{
Name: install.GetInstallJobName(cd),
Namespace: cd.Namespace,
},
installJob)
if err != nil && apierrors.IsNotFound(err) {
cdLog.Debug("install job no longer exists, nothing to cleanup")
} else if err != nil {
cdLog.WithError(err).Errorf("error getting existing install job for deleted cluster deployment")
return reconcile.Result{}, err
} else if !installJob.DeletionTimestamp.IsZero() {
cdLog.WithField("finalizers", installJob.Finalizers).Info("install job is being deleted, requeueing to wait for deletion")
return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil
} else {
err = r.Delete(context.Background(), installJob,
client.PropagationPolicy(metav1.DeletePropagationForeground))
if err != nil {
cdLog.WithError(err).Errorf("error deleting existing install job for deleted cluster deployment")
return reconcile.Result{}, err
}
cdLog.WithField("jobName", installJob.Name).Info("install job deleted")
return reconcile.Result{}, nil
}
// Skips creation of deprovision request if PreserveOnDelete is true and cluster is installed
if cd.Spec.PreserveOnDelete {
if cd.Status.Installed {
cdLog.Warn("skipping creation of deprovisioning request for installed cluster due to PreserveOnDelete=true")
if controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
err = r.removeClusterDeploymentFinalizer(cd)
if err != nil {
cdLog.WithError(err).Error("error removing finalizer")
}
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// Overriding PreserveOnDelete because we might have deleted the cluster deployment before it finished
// installing, which can cause AWS resources to leak
cdLog.Infof("PreserveOnDelete=true but creating deprovisioning request as cluster was never successfully provisioned")
}
if cd.Status.InfraID == "" {
cdLog.Warn("skipping uninstall for cluster that never had clusterID set")
err = r.removeClusterDeploymentFinalizer(cd)
if err != nil {
cdLog.WithError(err).Error("error removing finalizer")
}
return reconcile.Result{}, err
}
// Generate a deprovision request
request := generateDeprovisionRequest(cd)
err = controllerutil.SetControllerReference(cd, request, r.scheme)
if err != nil {
cdLog.Errorf("error setting controller reference on deprovision request: %v", err)
return reconcile.Result{}, err
}
// Check if deprovision request already exists:
existingRequest := &hivev1.ClusterDeprovisionRequest{}
err = r.Get(context.TODO(), types.NamespacedName{Name: cd.Name, Namespace: cd.Namespace}, existingRequest)
if err != nil && apierrors.IsNotFound(err) {
cdLog.Infof("creating deprovision request for cluster deployment")
err = r.Create(context.TODO(), request)
if err != nil {
cdLog.WithError(err).Errorf("error creating deprovision request")
// Check if namespace is terminated, if so we can give up, remove the finalizer, and let
// the cluster go away.
ns := &corev1.Namespace{}
err = r.Get(context.TODO(), types.NamespacedName{Name: cd.Namespace}, ns)
if err != nil {
cdLog.WithError(err).Error("error checking for deletionTimestamp on namespace")
return reconcile.Result{}, err
}
if ns.DeletionTimestamp != nil {
cdLog.Warn("detected a namespace deleted before deprovision request could be created, giving up on deprovision and removing finalizer")
err = r.removeClusterDeploymentFinalizer(cd)
if err != nil {
cdLog.WithError(err).Error("error removing finalizer")
}
}
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
} else if err != nil {
cdLog.WithError(err).Errorf("error getting deprovision request")
return reconcile.Result{}, err
}
// Deprovision request exists, check whether it has completed
if existingRequest.Status.Completed {
cdLog.Infof("deprovision request completed, removing finalizer")
err = r.removeClusterDeploymentFinalizer(cd)
if err != nil {
cdLog.WithError(err).Error("error removing finalizer")
}
return reconcile.Result{}, err
}
cdLog.Debug("deprovision request not yet completed")
return reconcile.Result{}, nil
}
func (r *ReconcileClusterDeployment) addClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment) error {
cd = cd.DeepCopy()
controllerutils.AddFinalizer(cd, hivev1.FinalizerDeprovision)
return r.Update(context.TODO(), cd)
}
func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment) error {
cd = cd.DeepCopy()
controllerutils.DeleteFinalizer(cd, hivev1.FinalizerDeprovision)
err := r.Update(context.TODO(), cd)
if err == nil {
clearUnderwaySecondsMetrics(cd)
// Increment the clusters deleted counter:
metricClustersDeleted.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
}
return err
}
// setDNSDelayMetric will calculate the amount of time elapsed from clusterdeployment creation
// to when the dnszone became ready, and set a metric to report the delay.
// Will return a bool indicating whether the clusterdeployment has been modified, and whether any error was encountered.
func (r *ReconcileClusterDeployment) setDNSDelayMetric(cd *hivev1.ClusterDeployment, dnsZone *hivev1.DNSZone, cdLog log.FieldLogger) (bool, error) {
modified := false
initializeAnnotations(cd)
if _, ok := cd.Annotations[dnsReadyAnnotation]; ok {
// already have recorded the dnsdelay metric
return modified, nil
}
readyTimestamp := dnsReadyTransitionTime(dnsZone)
if readyTimestamp == nil {
msg := "did not find timestamp for when dnszone became ready"
cdLog.WithField("dnszone", dnsZone.Name).Error(msg)
return modified, fmt.Errorf(msg)
}
dnsDelayDuration := readyTimestamp.Sub(cd.CreationTimestamp.Time)
cdLog.WithField("duration", dnsDelayDuration.Seconds()).Info("DNS ready")
cd.Annotations[dnsReadyAnnotation] = dnsDelayDuration.String()
if err := r.Client.Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Error("failed to save annotation marking DNS becoming ready")
return modified, err
}
modified = true
metricDNSDelaySeconds.Observe(float64(dnsDelayDuration.Seconds()))
return modified, nil
}
func (r *ReconcileClusterDeployment) ensureManagedDNSZone(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (bool, *hivev1.DNSZone, error) {
// for now we only support AWS
if cd.Spec.AWS == nil || cd.Spec.PlatformSecrets.AWS == nil {
cdLog.Error("cluster deployment platform is not AWS, cannot manage DNS zone")
return false, nil, fmt.Errorf("only AWS managed DNS is supported")
}
dnsZone := &hivev1.DNSZone{}
dnsZoneNamespacedName := types.NamespacedName{Namespace: cd.Namespace, Name: dnsZoneName(cd.Name)}
logger := cdLog.WithField("zone", dnsZoneNamespacedName.String())
err := r.Get(context.TODO(), dnsZoneNamespacedName, dnsZone)
if err == nil {
availableCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.ZoneAvailableDNSZoneCondition)
return availableCondition != nil && availableCondition.Status == corev1.ConditionTrue, dnsZone, nil
}
if apierrors.IsNotFound(err) {
logger.Info("creating new DNSZone for cluster deployment")
return false, nil, r.createManagedDNSZone(cd, logger)
}
logger.WithError(err).Error("failed to fetch DNS zone")
return false, nil, err
}
func (r *ReconcileClusterDeployment) createManagedDNSZone(cd *hivev1.ClusterDeployment, logger log.FieldLogger) error {
dnsZone := &hivev1.DNSZone{
ObjectMeta: metav1.ObjectMeta{
Name: dnsZoneName(cd.Name),
Namespace: cd.Namespace,
},
Spec: hivev1.DNSZoneSpec{
Zone: cd.Spec.BaseDomain,
LinkToParentDomain: true,
AWS: &hivev1.AWSDNSZoneSpec{
AccountSecret: cd.Spec.PlatformSecrets.AWS.Credentials,
Region: cd.Spec.AWS.Region,
},
},
}
for k, v := range cd.Spec.AWS.UserTags {
dnsZone.Spec.AWS.AdditionalTags = append(dnsZone.Spec.AWS.AdditionalTags, hivev1.AWSResourceTag{Key: k, Value: v})
}
if err := controllerutil.SetControllerReference(cd, dnsZone, r.scheme); err != nil {
logger.WithError(err).Error("error setting controller reference on dnszone")
return err
}
err := r.Create(context.TODO(), dnsZone)
if err != nil {
logger.WithError(err).Error("cannot create DNS zone")
return err
}
logger.Info("dns zone created")
return nil
}
func dnsZoneName(cdName string) string {
return apihelpers.GetResourceName(cdName, "zone")
}
func selectorPodWatchHandler(a handler.MapObject) []reconcile.Request {
retval := []reconcile.Request{}
pod := a.Object.(*corev1.Pod)
if pod == nil {
// Wasn't a Pod, bail out. This should not happen.
log.Errorf("Error converting MapObject.Object to Pod. Value: %+v", a.Object)
return retval
}
if pod.Labels == nil {
return retval
}
cdName, ok := pod.Labels[constants.ClusterDeploymentNameLabel]
if !ok {
return retval
}
retval = append(retval, reconcile.Request{NamespacedName: types.NamespacedName{
Name: cdName,
Namespace: pod.Namespace,
}})
return retval
}
func (r *ReconcileClusterDeployment) calcInstallPodRestarts(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (int, error) {
installerPodLabels := map[string]string{constants.ClusterDeploymentNameLabel: cd.Name, constants.InstallJobLabel: "true"}
pods := &corev1.PodList{}
err := r.Client.List(context.Background(), pods, client.InNamespace(cd.Namespace), client.MatchingLabels(installerPodLabels))
if err != nil {
return 0, err
}
if len(pods.Items) > 1 {
log.Warnf("found %d install pods for cluster", len(pods.Items))
}
// Calculate restarts across all containers in the pod:
containerRestarts := 0
for _, pod := range pods.Items {
for _, cs := range pod.Status.ContainerStatuses {
containerRestarts += int(cs.RestartCount)
}
}
return containerRestarts, nil
}
func (r *ReconcileClusterDeployment) deleteJobOnHashChange(existingJob, generatedJob *batchv1.Job, cdLog log.FieldLogger) (bool, error) {
newJobNeeded := false
if _, ok := existingJob.Annotations[jobHashAnnotation]; !ok {
// this job predates tracking the job hash, so assume we need a new job
newJobNeeded = true
}
if existingJob.Annotations[jobHashAnnotation] != generatedJob.Annotations[jobHashAnnotation] {
// delete the job so we get a fresh one with the new job spec
newJobNeeded = true
}
if newJobNeeded {
// delete the existing job
cdLog.Info("deleting existing install job due to updated/missing hash detected")
err := r.Delete(context.TODO(), existingJob, client.PropagationPolicy(metav1.DeletePropagationForeground))
if err != nil {
cdLog.WithError(err).Errorf("error deleting outdated install job")
return newJobNeeded, err
}
}
return newJobNeeded, nil
}
// cleanupInstallLogPVC will immediately delete the PVC (should it exist) if the cluster was installed successfully, without retries.
// If there were retries, it will delete the PVC if it has been more than 7 days since the job was completed.
func (r *ReconcileClusterDeployment) cleanupInstallLogPVC(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
if !cd.Status.Installed {
return nil
}
pvc := &corev1.PersistentVolumeClaim{}
err := r.Get(context.TODO(), types.NamespacedName{Name: GetInstallLogsPVCName(cd), Namespace: cd.Namespace}, pvc)
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
cdLog.WithError(err).Error("error looking up install logs PVC")
return err
}
pvcLog := cdLog.WithField("pvc", pvc.Name)
if cd.Status.InstallRestarts == 0 {
pvcLog.Info("deleting logs PersistentVolumeClaim for installed cluster with no restarts")
if err := r.Delete(context.TODO(), pvc); err != nil {
pvcLog.WithError(err).Error("error deleting install logs PVC")
return err
}
return nil
}
if cd.Status.InstalledTimestamp == nil {
pvcLog.Warn("deleting logs PersistentVolumeClaim for cluster with errors but no installed timestamp")
if err := r.Delete(context.TODO(), pvc); err != nil {
pvcLog.WithError(err).Error("error deleting install logs PVC")
return err
}
return nil
}
// Otherwise, delete if more than 7 days have passed.
if time.Since(cd.Status.InstalledTimestamp.Time) > (7 * 24 * time.Hour) {
pvcLog.Info("deleting logs PersistentVolumeClaim for cluster that was installed after restarts more than 7 days ago")
if err := r.Delete(context.TODO(), pvc); err != nil {
pvcLog.WithError(err).Error("error deleting install logs PVC")
return err
}
return nil
}
cdLog.WithField("pvc", pvc.Name).Debug("preserving logs PersistentVolumeClaim for cluster with install restarts for 7 days")
return nil
}
func generateDeprovisionRequest(cd *hivev1.ClusterDeployment) *hivev1.ClusterDeprovisionRequest {
req := &hivev1.ClusterDeprovisionRequest{
ObjectMeta: metav1.ObjectMeta{
Name: cd.Name,
Namespace: cd.Namespace,
},
Spec: hivev1.ClusterDeprovisionRequestSpec{
InfraID: cd.Status.InfraID,
ClusterID: cd.Status.ClusterID,
Platform: hivev1.ClusterDeprovisionRequestPlatform{
AWS: &hivev1.AWSClusterDeprovisionRequest{},
},
},
}
if cd.Spec.Platform.AWS != nil {
req.Spec.Platform.AWS.Region = cd.Spec.Platform.AWS.Region
}
if cd.Spec.PlatformSecrets.AWS != nil {
req.Spec.Platform.AWS.Credentials = &cd.Spec.PlatformSecrets.AWS.Credentials
}
return req
}
func generatePullSecretObj(pullSecret string, pullSecretName string, cd *hivev1.ClusterDeployment) *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: pullSecretName,
Namespace: cd.Namespace,
},
Type: corev1.SecretTypeDockerConfigJson,
StringData: map[string]string{
corev1.DockerConfigJsonKey: pullSecret,
},
}
}
func migrateWildcardIngress(cd *hivev1.ClusterDeployment) bool {
migrated := false
for i, ingress := range cd.Spec.Ingress {
newIngress := wildcardDomain.ReplaceAllString(ingress.Domain, "")
if newIngress != ingress.Domain {
cd.Spec.Ingress[i].Domain = newIngress
migrated = true
}
}
return migrated
}
func dnsReadyTransitionTime(dnsZone *hivev1.DNSZone) *time.Time {
readyCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.ZoneAvailableDNSZoneCondition)
if readyCondition != nil && readyCondition.Status == corev1.ConditionTrue {
return &readyCondition.LastTransitionTime.Time
}
return nil
}
func strPtr(s string) *string {
return &s
}
func clearUnderwaySecondsMetrics(cd *hivev1.ClusterDeployment) {
// If we've successfully cleared the deprovision finalizer we know this is a good time to
// reset the underway metric to 0, after which it will no longer be reported.
hivemetrics.MetricClusterDeploymentDeprovisioningUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(0.0)
// Clear the install underway seconds metric if this cluster was still installing.
if !cd.Status.Installed {
hivemetrics.MetricClusterDeploymentProvisionUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(0.0)
}
}
// initializeAnnotations() initializes the annotations if it is not already
func initializeAnnotations(cd *hivev1.ClusterDeployment) {
if cd.Annotations == nil {
cd.Annotations = map[string]string{}
}
}
// mergePullSecrets merges the global pull secret JSON (if defined) with the cluster's pull secret JSON (if defined)
// An error will be returned if neither is defined
func (r *ReconcileClusterDeployment) mergePullSecrets(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (string, error) {
var localPullSecret string
var err error
// For code readability let's call the pull secret in cluster deployment config as local pull secret
if cd.Spec.PullSecret != nil {
localPullSecret, err = controllerutils.LoadSecretData(r.Client, cd.Spec.PullSecret.Name, cd.Namespace, corev1.DockerConfigJsonKey)
if err != nil {
if !apierrors.IsNotFound(err) {
return "", err
}
}
}
// Check if global pull secret from env as it comes from hive config
globalPullSecret := os.Getenv("GLOBAL_PULL_SECRET")
switch {
case globalPullSecret != "" && localPullSecret != "":
// Merge local pullSecret and globalPullSecret. If both pull secrets have same registry name
// then the merged pull secret will have registry secret from local pull secret
pullSecret, err := controllerutils.MergeJsons(globalPullSecret, localPullSecret, cdLog)
if err != nil {
errMsg := "unable to merge global pull secret with local pull secret"
cdLog.WithError(err).Error(errMsg)
return "", errors.Wrap(err, errMsg)
}
return pullSecret, nil
case globalPullSecret != "":
return globalPullSecret, nil
case localPullSecret != "":
return localPullSecret, nil
default:
errMsg := "clusterdeployment must specify pull secret since hiveconfig does not specify a global pull secret"
cdLog.Error(errMsg)
return "", errors.New(errMsg)
}
}
// updatePullSecretInfo adds pull secret information in cluster deployment and cluster deployment status.
// It returns true when cluster deployment status has been updated.
func (r *ReconcileClusterDeployment) updatePullSecretInfo(pullSecret string, cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (bool, error) {
var err error
pullSecretObjExists := true
existingPullSecretObj := &corev1.Secret{}
mergedSecretName := constants.GetMergedPullSecretName(cd)
err = r.Get(context.TODO(), types.NamespacedName{Name: mergedSecretName, Namespace: cd.Namespace}, existingPullSecretObj)
if err != nil {
if apierrors.IsNotFound(err) {
cdLog.Info("Existing pull secret object not found")
pullSecretObjExists = false
} else {
return false, errors.Wrap(err, "Error getting pull secret from cluster deployment")
}
}
if pullSecretObjExists {
existingPullSecret, ok := existingPullSecretObj.Data[corev1.DockerConfigJsonKey]
if !ok {
return false, errors.New(fmt.Sprintf("Pull secret %s did not contain key %s", mergedSecretName, corev1.DockerConfigJsonKey))
}
if controllerutils.GetHashOfPullSecret(string(existingPullSecret)) == controllerutils.GetHashOfPullSecret(pullSecret) {
cdLog.Debug("Existing and the new merged pull secret are same")
return false, nil
}
cdLog.Info("Existing merged pull secret hash did not match with latest merged pull secret")
existingPullSecretObj.Data[corev1.DockerConfigJsonKey] = []byte(pullSecret)
err = r.Update(context.TODO(), existingPullSecretObj)
if err != nil {
return false, errors.Wrap(err, "error updating merged pull secret object")
}
cdLog.WithField("secretName", mergedSecretName).Info("Updated the merged pull secret object successfully")
} else {
// create a new pull secret object
newPullSecretObj := generatePullSecretObj(
pullSecret,
mergedSecretName,
cd,
)
err = controllerutil.SetControllerReference(cd, newPullSecretObj, r.scheme)
if err != nil {
cdLog.Errorf("error setting controller reference on new merged pull secret: %v", err)
return false, err
}
err = r.Create(context.TODO(), newPullSecretObj)
if err != nil {
return false, errors.Wrap(err, "error creating new pull secret object")
}
cdLog.WithField("secretName", mergedSecretName).Info("Created the merged pull secret object successfully")
}
return true, nil
}
| 1 | 7,518 | I'm nervous about this line, I don't want to go regenerate a bunch of imageset jobs for clusters that are old, already installed, but don't have a CLIImage set (which they wouldn't because they're old) Adding the Installed guard is meant to address this. Otherwise this *should* recreate the imageset job due to the code in this function that deletes the job if it's finished. (but we don't have our images set due to this clause) | openshift-hive | go |
@@ -303,7 +303,8 @@ public class HadoopTables implements Tables, Configurable {
}
Map<String, String> properties = propertiesBuilder.build();
- TableMetadata metadata = tableMetadata(schema, spec, sortOrder, properties, location);
+
+ TableMetadata metadata = TableMetadata.newTableMetadata(schema, spec, sortOrder, location, properties);
ops.commit(null, metadata);
return new BaseTable(ops, location);
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.hadoop;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Map;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.BaseTable;
import org.apache.iceberg.CatalogUtil;
import org.apache.iceberg.MetadataTableType;
import org.apache.iceberg.MetadataTableUtils;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.SortOrder;
import org.apache.iceberg.StaticTableOperations;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.Tables;
import org.apache.iceberg.Transaction;
import org.apache.iceberg.Transactions;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.exceptions.AlreadyExistsException;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.util.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implementation of Iceberg tables that uses the Hadoop FileSystem
* to store metadata and manifests.
*/
public class HadoopTables implements Tables, Configurable {
private static final Logger LOG = LoggerFactory.getLogger(HadoopTables.class);
private static final String METADATA_JSON = "metadata.json";
private Configuration conf;
public HadoopTables() {
this(new Configuration());
}
public HadoopTables(Configuration conf) {
this.conf = conf;
}
/**
* Loads the table location from a FileSystem path location.
*
* @param location a path URI (e.g. hdfs:///warehouse/my_table/)
* @return table implementation
*/
@Override
public Table load(String location) {
Table result;
Pair<String, MetadataTableType> parsedMetadataType = parseMetadataType(location);
if (parsedMetadataType != null) {
// Load a metadata table
result = loadMetadataTable(parsedMetadataType.first(), location, parsedMetadataType.second());
} else {
// Load a normal table
TableOperations ops = newTableOps(location);
if (ops.current() != null) {
result = new BaseTable(ops, location);
} else {
throw new NoSuchTableException("Table does not exist at location: %s", location);
}
}
LOG.info("Table location loaded: {}", result.location());
return result;
}
/**
* Try to resolve a metadata table, which we encode as URI fragments
* e.g. hdfs:///warehouse/my_table#snapshots
* @param location Path to parse
* @return A base table name and MetadataTableType if a type is found, null if not
*/
private Pair<String, MetadataTableType> parseMetadataType(String location) {
int hashIndex = location.lastIndexOf('#');
if (hashIndex != -1 && !location.endsWith("#")) {
String baseTable = location.substring(0, hashIndex);
String metaTable = location.substring(hashIndex + 1);
MetadataTableType type = MetadataTableType.from(metaTable);
return (type == null) ? null : Pair.of(baseTable, type);
} else {
return null;
}
}
private Table loadMetadataTable(String location, String metadataTableName, MetadataTableType type) {
TableOperations ops = newTableOps(location);
if (ops.current() == null) {
throw new NoSuchTableException("Table does not exist at location: %s", location);
}
return MetadataTableUtils.createMetadataTableInstance(ops, location, metadataTableName, type);
}
/**
* Create a table using the FileSystem implementation resolve from
* location.
*
* @param schema iceberg schema used to create the table
* @param spec partitioning spec, if null the table will be unpartitioned
* @param properties a string map of table properties, initialized to empty if null
* @param location a path URI (e.g. hdfs:///warehouse/my_table)
* @return newly created table implementation
*/
@Override
public Table create(Schema schema, PartitionSpec spec, SortOrder order,
Map<String, String> properties, String location) {
return buildTable(location, schema).withPartitionSpec(spec)
.withSortOrder(order)
.withProperties(properties)
.create();
}
/**
* Drop a table and delete all data and metadata files.
*
* @param location a path URI (e.g. hdfs:///warehouse/my_table)
* @return true if the table was dropped, false if it did not exist
*/
public boolean dropTable(String location) {
return dropTable(location, true);
}
/**
* Drop a table; optionally delete data and metadata files.
* <p>
* If purge is set to true the implementation should delete all data and metadata files.
*
* @param location a path URI (e.g. hdfs:///warehouse/my_table)
* @param purge if true, delete all data and metadata files in the table
* @return true if the table was dropped, false if it did not exist
*/
public boolean dropTable(String location, boolean purge) {
TableOperations ops = newTableOps(location);
TableMetadata lastMetadata = null;
if (ops.current() != null) {
if (purge) {
lastMetadata = ops.current();
}
} else {
return false;
}
try {
if (purge && lastMetadata != null) {
// Since the data files and the metadata files may store in different locations,
// so it has to call dropTableData to force delete the data file.
CatalogUtil.dropTableData(ops.io(), lastMetadata);
}
Path tablePath = new Path(location);
Util.getFs(tablePath, conf).delete(tablePath, true /* recursive */);
return true;
} catch (IOException e) {
throw new UncheckedIOException("Failed to delete file: " + location, e);
}
}
@VisibleForTesting
TableOperations newTableOps(String location) {
if (location.contains(METADATA_JSON)) {
return new StaticTableOperations(location, new HadoopFileIO(conf));
} else {
return new HadoopTableOperations(new Path(location), new HadoopFileIO(conf), conf);
}
}
private TableMetadata tableMetadata(Schema schema, PartitionSpec spec, SortOrder order,
Map<String, String> properties, String location) {
Preconditions.checkNotNull(schema, "A table schema is required");
Map<String, String> tableProps = properties == null ? ImmutableMap.of() : properties;
PartitionSpec partitionSpec = spec == null ? PartitionSpec.unpartitioned() : spec;
SortOrder sortOrder = order == null ? SortOrder.unsorted() : order;
return TableMetadata.newTableMetadata(schema, partitionSpec, sortOrder, location, tableProps);
}
/**
* Start a transaction to create a table.
*
* @param location a location for the table
* @param schema a schema
* @param spec a partition spec
* @param properties a string map of table properties
* @return a {@link Transaction} to create the table
* @throws AlreadyExistsException if the table already exists
*/
public Transaction newCreateTableTransaction(
String location,
Schema schema,
PartitionSpec spec,
Map<String, String> properties) {
return buildTable(location, schema).withPartitionSpec(spec).withProperties(properties).createTransaction();
}
/**
* Start a transaction to replace a table.
*
* @param location a location for the table
* @param schema a schema
* @param spec a partition spec
* @param properties a string map of table properties
* @param orCreate whether to create the table if not exists
* @return a {@link Transaction} to replace the table
* @throws NoSuchTableException if the table doesn't exist and orCreate is false
*/
public Transaction newReplaceTableTransaction(
String location,
Schema schema,
PartitionSpec spec,
Map<String, String> properties,
boolean orCreate) {
Catalog.TableBuilder builder = buildTable(location, schema).withPartitionSpec(spec).withProperties(properties);
return orCreate ? builder.createOrReplaceTransaction() : builder.replaceTransaction();
}
public Catalog.TableBuilder buildTable(String location, Schema schema) {
return new HadoopTableBuilder(location, schema);
}
private class HadoopTableBuilder implements Catalog.TableBuilder {
private final String location;
private final Schema schema;
private final ImmutableMap.Builder<String, String> propertiesBuilder = ImmutableMap.builder();
private PartitionSpec spec = PartitionSpec.unpartitioned();
private SortOrder sortOrder = SortOrder.unsorted();
HadoopTableBuilder(String location, Schema schema) {
this.location = location;
this.schema = schema;
}
@Override
public Catalog.TableBuilder withPartitionSpec(PartitionSpec newSpec) {
this.spec = newSpec != null ? newSpec : PartitionSpec.unpartitioned();
return this;
}
@Override
public Catalog.TableBuilder withSortOrder(SortOrder newSortOrder) {
this.sortOrder = newSortOrder != null ? newSortOrder : SortOrder.unsorted();
return this;
}
@Override
public Catalog.TableBuilder withLocation(String newLocation) {
Preconditions.checkArgument(newLocation == null || location.equals(newLocation),
String.format("Table location %s differs from the table location (%s) from the PathIdentifier",
newLocation, location));
return this;
}
@Override
public Catalog.TableBuilder withProperties(Map<String, String> properties) {
if (properties != null) {
propertiesBuilder.putAll(properties);
}
return this;
}
@Override
public Catalog.TableBuilder withProperty(String key, String value) {
propertiesBuilder.put(key, value);
return this;
}
@Override
public Table create() {
TableOperations ops = newTableOps(location);
if (ops.current() != null) {
throw new AlreadyExistsException("Table already exists at location: %s", location);
}
Map<String, String> properties = propertiesBuilder.build();
TableMetadata metadata = tableMetadata(schema, spec, sortOrder, properties, location);
ops.commit(null, metadata);
return new BaseTable(ops, location);
}
@Override
public Transaction createTransaction() {
TableOperations ops = newTableOps(location);
if (ops.current() != null) {
throw new AlreadyExistsException("Table already exists: %s", location);
}
Map<String, String> properties = propertiesBuilder.build();
TableMetadata metadata = tableMetadata(schema, spec, null, properties, location);
return Transactions.createTableTransaction(location, ops, metadata);
}
@Override
public Transaction replaceTransaction() {
return newReplaceTableTransaction(false);
}
@Override
public Transaction createOrReplaceTransaction() {
return newReplaceTableTransaction(true);
}
private Transaction newReplaceTableTransaction(boolean orCreate) {
TableOperations ops = newTableOps(location);
if (!orCreate && ops.current() == null) {
throw new NoSuchTableException("No such table: %s", location);
}
Map<String, String> properties = propertiesBuilder.build();
TableMetadata metadata;
if (ops.current() != null) {
metadata = ops.current().buildReplacement(schema, spec, sortOrder, location, properties);
} else {
metadata = tableMetadata(schema, spec, sortOrder, properties, location);
}
if (orCreate) {
return Transactions.createOrReplaceTableTransaction(location, ops, metadata);
} else {
return Transactions.replaceTableTransaction(location, ops, metadata);
}
}
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return conf;
}
}
| 1 | 31,360 | Is it necessary to change this file? Doesn't `tableMetadata` call `newTableMetadata`? | apache-iceberg | java |
@@ -78,12 +78,6 @@ func TestCatchupOverGossip(t *testing.T) {
t.Parallel()
// ledger node upgraded version, fetcher node upgraded version
runCatchupOverGossip(t, false, false)
- // ledger node older version, fetcher node upgraded version
- runCatchupOverGossip(t, true, false)
- // ledger node upgraded older version, fetcher node older version
- runCatchupOverGossip(t, false, true)
- // ledger node older version, fetcher node older version
- runCatchupOverGossip(t, true, true)
}
func runCatchupOverGossip(t *testing.T, | 1 | // Copyright (C) 2019-2021 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package catchup
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/framework/fixtures"
)
func TestBasicCatchup(t *testing.T) {
if testing.Short() {
t.Skip()
}
t.Parallel()
a := require.New(t)
// Overview of this test:
// Start a two-node network (primary has 0%, secondary has 100%)
// Let it run for a few blocks.
// Spin up a third node and see if it catches up
var fixture fixtures.RestClientFixture
// Give the second node (which starts up last) all the stake so that its proposal always has better credentials,
// and so that its proposal isn't dropped. Otherwise the test burns 17s to recover. We don't care about stake
// distribution for catchup so this is fine.
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes100Second.json"))
defer fixture.Shutdown()
// Get 2nd node so we wait until we know they're at target block
nc, err := fixture.GetNodeController("Node")
a.NoError(err)
// Let the network make some progress
a.NoError(err)
waitForRound := uint64(3)
err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound)
a.NoError(err)
// Now spin up third node
cloneDataDir := filepath.Join(fixture.PrimaryDataDir(), "../clone")
cloneLedger := false
err = fixture.NC.Clone(cloneDataDir, cloneLedger)
a.NoError(err)
cloneClient, err := fixture.StartNode(cloneDataDir)
a.NoError(err)
defer shutdownClonedNode(cloneDataDir, &fixture, t)
// Now, catch up
err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound)
a.NoError(err)
}
// TestCatchupOverGossip tests catchup across network versions
// The current versions are the original v1 and the upgraded to v2.1
func TestCatchupOverGossip(t *testing.T) {
t.Parallel()
// ledger node upgraded version, fetcher node upgraded version
runCatchupOverGossip(t, false, false)
// ledger node older version, fetcher node upgraded version
runCatchupOverGossip(t, true, false)
// ledger node upgraded older version, fetcher node older version
runCatchupOverGossip(t, false, true)
// ledger node older version, fetcher node older version
runCatchupOverGossip(t, true, true)
}
func runCatchupOverGossip(t *testing.T,
ledgerNodeDowngrade,
fetcherNodeDowngrade bool) {
if testing.Short() {
t.Skip()
}
a := require.New(t)
// Overview of this test:
// Start a two-node network (Primary with 0% stake, Secondary with 100% stake)
// Kill the primary for a few blocks. (Note that primary only has incoming connections)
// Now, revive the primary, and see if it catches up.
var fixture fixtures.RestClientFixture
// Give the second node (which starts up last) all the stake so that its proposal always has better credentials,
// and so that its proposal isn't dropped. Otherwise the test burns 17s to recover. We don't care about stake
// distribution for catchup so this is fine.
fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodes100Second.json"))
if ledgerNodeDowngrade {
// Force the node to only support v1
dir, err := fixture.GetNodeDir("Node")
a.NoError(err)
cfg, err := config.LoadConfigFromDisk(dir)
a.NoError(err)
cfg.NetworkProtocolVersion = "1"
cfg.SaveToDisk(dir)
}
if fetcherNodeDowngrade {
// Force the node to only support v1
dir := fixture.PrimaryDataDir()
cfg, err := config.LoadConfigFromDisk(dir)
a.NoError(err)
cfg.NetworkProtocolVersion = "1"
cfg.SaveToDisk(dir)
}
defer fixture.Shutdown()
ncPrim, err := fixture.GetNodeController("Primary")
a.NoError(err)
// Get 2nd node, which makes all the progress
nc, err := fixture.GetNodeController("Node")
a.NoError(err)
// Start the secondary
_, err = fixture.StartNode(nc.GetDataDir())
a.NoError(err)
// Let the secondary make progress up to round 3, while the primary was never startred ( hence, it's on round = 0)
waitForRound := uint64(3)
err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound)
a.NoError(err)
// stop the secondary, which is on round 3 or more.
nc.FullStop()
// Now, start both primary and secondary, and let the primary catchup up.
fixture.Start()
lg, err := fixture.StartNode(ncPrim.GetDataDir())
a.NoError(err)
// Now, catch up
err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(lg, waitForRound)
a.NoError(err)
waitStart := time.Now()
// wait until the round number on the secondary node matches the round number on the primary node.
for {
nodeLibGoalClient := fixture.LibGoalFixture.GetLibGoalClientFromDataDir(nc.GetDataDir())
nodeStatus, err := nodeLibGoalClient.Status()
a.NoError(err)
primaryStatus, err := lg.Status()
a.NoError(err)
if nodeStatus.LastRound <= primaryStatus.LastRound && waitForRound < nodeStatus.LastRound {
//t.Logf("Both nodes reached round %d\n", primaryStatus.LastRound)
break
}
if time.Now().Sub(waitStart) > time.Minute {
// it's taking too long.
require.FailNow(t, "Waiting too long for catchup to complete")
}
time.Sleep(50 * time.Millisecond)
}
}
// consensusTestUnupgradedProtocol is a version of ConsensusCurrentVersion
// that allows the control of the upgrade from consensusTestUnupgradedProtocol to
// consensusTestUnupgradedToProtocol
const consensusTestUnupgradedProtocol = protocol.ConsensusVersion("test-unupgraded-protocol")
// consensusTestUnupgradedToProtocol is a version of ConsensusCurrentVersion
// It is used as an upgrade from consensusTestUnupgradedProtocol
const consensusTestUnupgradedToProtocol = protocol.ConsensusVersion("test-unupgradedto-protocol")
func TestStoppedCatchupOnUnsupported(t *testing.T) {
if testing.Short() {
t.Skip()
}
t.Parallel()
a := require.New(t)
consensus := make(config.ConsensusProtocols)
// The following two protocols: testUnupgradedProtocol and testUnupgradedToProtocol
// are used to test the case when some nodes in the network do not make progress.
// testUnupgradedToProtocol is derived from ConsensusCurrentVersion and upgraded
// from testUnupgradedProtocol.
testUnupgradedToProtocol := config.Consensus[protocol.ConsensusCurrentVersion]
testUnupgradedToProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
consensus[consensusTestUnupgradedToProtocol] = testUnupgradedToProtocol
// testUnupgradedProtocol is used to control the upgrade of a node. This is used
// to construct and run a network where some node is upgraded, and some other
// node is not upgraded.
// testUnupgradedProtocol is derived from ConsensusCurrentVersion and upgrades to
// testUnupgradedToProtocol.
testUnupgradedProtocol := config.Consensus[protocol.ConsensusCurrentVersion]
testUnupgradedProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
testUnupgradedProtocol.UpgradeVoteRounds = 3
testUnupgradedProtocol.UpgradeThreshold = 2
testUnupgradedProtocol.DefaultUpgradeWaitRounds = 3
testUnupgradedProtocol.MinUpgradeWaitRounds = 0
testUnupgradedProtocol.ApprovedUpgrades[consensusTestUnupgradedToProtocol] = 0
consensus[consensusTestUnupgradedProtocol] = testUnupgradedProtocol
// Overview of this test:
// Start a two-node network (primary has 0%, secondary has 100%)
// Let it run for a few blocks.
// Spin up a third node and see if it catches up
var fixture fixtures.RestClientFixture
fixture.SetConsensus(consensus)
// Give the second node (which starts up last) all the stake so that its proposal always has better credentials,
// and so that its proposal isn't dropped. Otherwise the test burns 17s to recover. We don't care about stake
// distribution for catchup so this is fine.
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes100SecondTestUnupgradedProtocol.json"))
defer fixture.Shutdown()
// Get 2nd node so we wait until we know they're at target block
nc, err := fixture.GetNodeController("Node")
a.NoError(err)
// Let the network make some progress
a.NoError(err)
waitForRound := uint64(3) // UpgradeVoteRounds + DefaultUpgradeWaitRounds
err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound)
a.NoError(err)
// Now spin up third node
cloneDataDir := filepath.Join(fixture.PrimaryDataDir(), "../clone")
cloneLedger := false
err = fixture.NC.Clone(cloneDataDir, cloneLedger)
a.NoError(err)
delete(consensus, consensusTestUnupgradedToProtocol)
fixture.GetNodeControllerForDataDir(cloneDataDir).SetConsensus(consensus)
cloneClient, err := fixture.StartNode(cloneDataDir)
a.NoError(err)
defer shutdownClonedNode(cloneDataDir, &fixture, t)
// Now, catch up
err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound)
a.NoError(err)
timeout := time.NewTimer(20 * time.Second)
loop := true
for loop { // loop until timeout, error from Status() or the node stops making progress
status, err := cloneClient.Status()
select {
case <-timeout.C: // timeout
loop = false
default:
if err != nil { // error from Status()
loop = false
break
}
// Continue looping as long as:
// (1) next version is the same as current version, or
// (2) next version is a different protocol (test knows it is not supported), but
// last round in current protocol is not yet added to the ledger (status.LastRound)
// And check that status.StoppedAtUnsupportedRound is false
if status.NextVersion == status.LastVersion || // next is not a new protocol, or
// next is a new protocol but,
(status.NextVersion != status.LastVersion &&
// the new protocol version is not the next round
status.LastRound+1 != status.NextVersionRound) {
// libgoal Client StoppedAtUnsupportedRound in v1.NodeStatus should be false
a.False(status.StoppedAtUnsupportedRound)
// Give some time for the next round
time.Sleep(800 * time.Millisecond)
} else {
loop = false
}
}
}
a.NoError(err)
status, err := cloneClient.Status()
// Stopped at the first protocol
a.Equal("test-unupgraded-protocol", status.LastVersion)
// Next version is different (did not upgrade to it)
a.NotEqual(status.NextVersion, status.LastVersion)
// Next round is when the upgrade happens
a.True(!status.NextVersionSupported && status.LastRound+1 == status.NextVersionRound)
// libgoal Client StoppedAtUnsupportedRound in v1.NodeStatus should now be true
a.True(status.StoppedAtUnsupportedRound)
}
// shutdownClonedNode replicates the behavior of fixture.Shutdown() for network nodes on cloned node
// It deletes the directory if the test passes, otherwise it preserves it
func shutdownClonedNode(nodeDataDir string, f *fixtures.RestClientFixture, t *testing.T) {
nc := f.LibGoalFixture.GetNodeControllerForDataDir(nodeDataDir)
nc.FullStop()
if !t.Failed() {
os.RemoveAll(nodeDataDir)
}
}
| 1 | 41,734 | I think that I have a better proposal for this test - improve it so that it would know how to read the list of SupportedProtocolVersions and dynamically use these. The motivation here is that I expect to have another network version soon, and this test seems to be a good test case for that. | algorand-go-algorand | go |
@@ -89,10 +89,13 @@ public class HiveCatalog extends BaseMetastoreCatalog implements Closeable, Supp
try {
List<String> tables = clients.run(client -> client.getAllTables(database));
- return tables.stream()
+ List<TableIdentifier> tableIdentifiers = tables.stream()
.map(t -> TableIdentifier.of(namespace, t))
.collect(Collectors.toList());
+ LOG.debug("Listing of namespace [{}] resulted in the following tables: [{}]", namespace, tableIdentifiers);
+ return tableIdentifiers;
+
} catch (UnknownDBException e) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.hive;
import java.io.Closeable;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.iceberg.BaseMetastoreCatalog;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.SupportsNamespaces;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.exceptions.NamespaceNotEmptyException;
import org.apache.iceberg.exceptions.NoSuchNamespaceException;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.relocated.com.google.common.base.Joiner;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HiveCatalog extends BaseMetastoreCatalog implements Closeable, SupportsNamespaces {
private static final Logger LOG = LoggerFactory.getLogger(HiveCatalog.class);
private final String name;
private final HiveClientPool clients;
private final Configuration conf;
private final StackTraceElement[] createStack;
private boolean closed;
public HiveCatalog(Configuration conf) {
this.name = "hive";
this.clients = new HiveClientPool(conf);
this.conf = conf;
this.createStack = Thread.currentThread().getStackTrace();
this.closed = false;
}
public HiveCatalog(String name, String uri, int clientPoolSize, Configuration conf) {
this.name = name;
this.conf = new Configuration(conf);
// before building the client pool, overwrite the configuration's URIs if the argument is non-null
if (uri != null) {
this.conf.set("hive.metastore.uris", uri);
}
this.clients = new HiveClientPool(clientPoolSize, this.conf);
this.createStack = Thread.currentThread().getStackTrace();
this.closed = false;
}
@Override
public List<TableIdentifier> listTables(Namespace namespace) {
Preconditions.checkArgument(isValidateNamespace(namespace),
"Missing database in namespace: %s", namespace);
String database = namespace.level(0);
try {
List<String> tables = clients.run(client -> client.getAllTables(database));
return tables.stream()
.map(t -> TableIdentifier.of(namespace, t))
.collect(Collectors.toList());
} catch (UnknownDBException e) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
} catch (TException e) {
throw new RuntimeException("Failed to list all tables under namespace " + namespace, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to listTables", e);
}
}
@Override
protected String name() {
return name;
}
@Override
public boolean dropTable(TableIdentifier identifier, boolean purge) {
if (!isValidIdentifier(identifier)) {
return false;
}
String database = identifier.namespace().level(0);
TableOperations ops = newTableOps(identifier);
TableMetadata lastMetadata;
if (purge && ops.current() != null) {
lastMetadata = ops.current();
} else {
lastMetadata = null;
}
try {
clients.run(client -> {
client.dropTable(database, identifier.name(),
false /* do not delete data */,
false /* throw NoSuchObjectException if the table doesn't exist */);
return null;
});
if (purge && lastMetadata != null) {
dropTableData(ops.io(), lastMetadata);
}
return true;
} catch (NoSuchTableException | NoSuchObjectException e) {
return false;
} catch (TException e) {
throw new RuntimeException("Failed to drop " + identifier, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to dropTable", e);
}
}
@Override
public void renameTable(TableIdentifier from, TableIdentifier originalTo) {
if (!isValidIdentifier(from)) {
throw new NoSuchTableException("Invalid identifier: %s", from);
}
TableIdentifier to = removeCatalogName(originalTo);
Preconditions.checkArgument(isValidIdentifier(to), "Invalid identifier: %s", to);
String toDatabase = to.namespace().level(0);
String fromDatabase = from.namespace().level(0);
String fromName = from.name();
try {
Table table = clients.run(client -> client.getTable(fromDatabase, fromName));
HiveTableOperations.validateTableIsIceberg(table, fullTableName(name, from));
table.setDbName(toDatabase);
table.setTableName(to.name());
clients.run(client -> {
client.alter_table(fromDatabase, fromName, table);
return null;
});
} catch (NoSuchObjectException e) {
throw new NoSuchTableException("Table does not exist: %s", from);
} catch (AlreadyExistsException e) {
throw new org.apache.iceberg.exceptions.AlreadyExistsException("Table already exists: %s", to);
} catch (TException e) {
throw new RuntimeException("Failed to rename " + from + " to " + to, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to rename", e);
}
}
@Override
public void createNamespace(Namespace namespace, Map<String, String> meta) {
Preconditions.checkArgument(
!namespace.isEmpty(),
"Cannot create namespace with invalid name: %s", namespace);
Preconditions.checkArgument(isValidateNamespace(namespace),
"Cannot support multi part namespace in Hive MetaStore: %s", namespace);
try {
clients.run(client -> {
client.createDatabase(convertToDatabase(namespace, meta));
return null;
});
} catch (AlreadyExistsException e) {
throw new org.apache.iceberg.exceptions.AlreadyExistsException(e, "Namespace '%s' already exists!",
namespace);
} catch (TException e) {
throw new RuntimeException("Failed to create namespace " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to createDatabase(name) " + namespace + " in Hive MataStore", e);
}
}
@Override
public List<Namespace> listNamespaces(Namespace namespace) {
if (!isValidateNamespace(namespace) && !namespace.isEmpty()) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
}
if (!namespace.isEmpty()) {
return ImmutableList.of();
}
try {
return clients.run(HiveMetaStoreClient::getAllDatabases)
.stream()
.map(Namespace::of)
.collect(Collectors.toList());
} catch (TException e) {
throw new RuntimeException("Failed to list all namespace: " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to getAllDatabases() " + namespace + " in Hive MataStore", e);
}
}
@Override
public boolean dropNamespace(Namespace namespace) {
if (!isValidateNamespace(namespace)) {
return false;
}
try {
clients.run(client -> {
client.dropDatabase(namespace.level(0),
false /* deleteData */,
false /* ignoreUnknownDb */,
false /* cascade */);
return null;
});
return true;
} catch (InvalidOperationException e) {
throw new NamespaceNotEmptyException("Namespace " + namespace + " is not empty. One or more tables exist.", e);
} catch (NoSuchObjectException e) {
return false;
} catch (TException e) {
throw new RuntimeException("Failed to drop namespace " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to drop dropDatabase(name) " + namespace + " in Hive MataStore", e);
}
}
@Override
public boolean setProperties(Namespace namespace, Map<String, String> properties) {
Map<String, String> parameter = Maps.newHashMap();
parameter.putAll(loadNamespaceMetadata(namespace));
parameter.putAll(properties);
Database database = convertToDatabase(namespace, parameter);
return alterHiveDataBase(namespace, database);
}
@Override
public boolean removeProperties(Namespace namespace, Set<String> properties) {
Map<String, String> parameter = Maps.newHashMap();
parameter.putAll(loadNamespaceMetadata(namespace));
properties.forEach(key -> parameter.put(key, null));
Database database = convertToDatabase(namespace, parameter);
return alterHiveDataBase(namespace, database);
}
private boolean alterHiveDataBase(Namespace namespace, Database database) {
try {
clients.run(client -> {
client.alterDatabase(namespace.level(0), database);
return null;
});
return true;
} catch (NoSuchObjectException | UnknownDBException e) {
throw new NoSuchNamespaceException(e, "Namespace does not exist: %s", namespace);
} catch (TException e) {
throw new RuntimeException(
"Failed to list namespace under namespace: " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to getDatabase(name) " + namespace + " in Hive MataStore", e);
}
}
@Override
public Map<String, String> loadNamespaceMetadata(Namespace namespace) {
if (!isValidateNamespace(namespace)) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
}
try {
Database database = clients.run(client -> client.getDatabase(namespace.level(0)));
return convertToMetadata(database);
} catch (NoSuchObjectException | UnknownDBException e) {
throw new NoSuchNamespaceException(e, "Namespace does not exist: %s", namespace);
} catch (TException e) {
throw new RuntimeException("Failed to list namespace under namespace: " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to getDatabase(name) " + namespace + " in Hive MataStore", e);
}
}
@Override
protected boolean isValidIdentifier(TableIdentifier tableIdentifier) {
return tableIdentifier.namespace().levels().length == 1;
}
private TableIdentifier removeCatalogName(TableIdentifier to) {
if (isValidIdentifier(to)) {
return to;
}
// check if the identifier includes the catalog name and remove it
if (to.namespace().levels().length == 2 && name().equalsIgnoreCase(to.namespace().level(0))) {
return TableIdentifier.of(Namespace.of(to.namespace().level(1)), to.name());
}
// return the original unmodified
return to;
}
private boolean isValidateNamespace(Namespace namespace) {
return namespace.levels().length == 1;
}
@Override
public TableOperations newTableOps(TableIdentifier tableIdentifier) {
String dbName = tableIdentifier.namespace().level(0);
String tableName = tableIdentifier.name();
return new HiveTableOperations(conf, clients, name, dbName, tableName);
}
@Override
protected String defaultWarehouseLocation(TableIdentifier tableIdentifier) {
// This is a little edgy since we basically duplicate the HMS location generation logic.
// Sadly I do not see a good way around this if we want to keep the order of events, like:
// - Create meta files
// - Create the metadata in HMS, and this way committing the changes
// Create a new location based on the namespace / database if it is set on database level
try {
Database databaseData = clients.run(client -> client.getDatabase(tableIdentifier.namespace().levels()[0]));
if (databaseData.getLocationUri() != null) {
// If the database location is set use it as a base.
return String.format("%s/%s", databaseData.getLocationUri(), tableIdentifier.name());
}
} catch (TException e) {
throw new RuntimeException(String.format("Metastore operation failed for %s", tableIdentifier), e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted during commit", e);
}
// Otherwise stick to the {WAREHOUSE_DIR}/{DB_NAME}.db/{TABLE_NAME} path
String warehouseLocation = conf.get("hive.metastore.warehouse.dir");
Preconditions.checkNotNull(
warehouseLocation,
"Warehouse location is not set: hive.metastore.warehouse.dir=null");
return String.format(
"%s/%s.db/%s",
warehouseLocation,
tableIdentifier.namespace().levels()[0],
tableIdentifier.name());
}
private Map<String, String> convertToMetadata(Database database) {
Map<String, String> meta = Maps.newHashMap();
meta.putAll(database.getParameters());
meta.put("location", database.getLocationUri());
if (database.getDescription() != null) {
meta.put("comment", database.getDescription());
}
return meta;
}
Database convertToDatabase(Namespace namespace, Map<String, String> meta) {
String warehouseLocation = conf.get("hive.metastore.warehouse.dir");
if (!isValidateNamespace(namespace)) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
}
Database database = new Database();
Map<String, String> parameter = Maps.newHashMap();
database.setName(namespace.level(0));
database.setLocationUri(new Path(warehouseLocation, namespace.level(0)).toString() + ".db");
meta.forEach((key, value) -> {
if (key.equals("comment")) {
database.setDescription(value);
} else if (key.equals("location")) {
database.setLocationUri(value);
} else {
if (value != null) {
parameter.put(key, value);
}
}
});
database.setParameters(parameter);
return database;
}
@Override
public void close() {
if (!closed) {
clients.close();
closed = true;
}
}
@SuppressWarnings("checkstyle:NoFinalizer")
@Override
protected void finalize() throws Throwable {
super.finalize();
if (!closed) {
close(); // releasing resources is more important than printing the warning
String trace = Joiner.on("\n\t").join(
Arrays.copyOfRange(createStack, 1, createStack.length));
LOG.warn("Unclosed input stream created by:\n\t{}", trace);
}
}
}
| 1 | 23,816 | What is the purpose of this debug message? I'm not sure how it would help. | apache-iceberg | java |
@@ -25,7 +25,6 @@ import (
"time"
"kythe.io/kythe/go/extractors/bazel"
- "kythe.io/kythe/go/extractors/bazel/extutil"
)
var ( | 1 | /*
* Copyright 2017 The Kythe Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Program extract_kzip implements a Bazel extra action that captures a Kythe
// compilation record for a "spawn" action.
package main
import (
"context"
"flag"
"log"
"time"
"kythe.io/kythe/go/extractors/bazel"
"kythe.io/kythe/go/extractors/bazel/extutil"
)
var (
outputPath = flag.String("output", "", "Path of output index file (required)")
settings bazel.Settings
)
func init() {
flag.Usage = settings.SetFlags(nil, "")
}
func main() {
flag.Parse()
// Verify that required flags are set.
if *outputPath == "" {
log.Fatal("You must provide a non-empty --output file path")
}
config, info, err := bazel.NewFromSettings(settings)
if err != nil {
log.Fatalf("Invalid config settings: %v", err)
}
ctx := context.Background()
start := time.Now()
ai, err := bazel.SpawnAction(info)
if err != nil {
log.Fatalf("Invalid extra action: %v", err)
}
if err := extutil.ExtractAndWrite(ctx, config, ai, *outputPath); err != nil {
log.Fatalf("Extraction failed: %v", err)
}
log.Printf("Finished extracting [%v elapsed]", time.Since(start))
}
| 1 | 12,215 | Is there a corresponding BUILD dependency to prune? | kythe-kythe | go |
@@ -2066,7 +2066,14 @@ func testSignedURL(t *testing.T, newHarness HarnessMaker) {
}
getURLNoParamsURL.RawQuery = ""
getURLNoParams := getURLNoParamsURL.String()
- putURL, err := b.SignedURL(ctx, key, &blob.SignedURLOptions{Method: http.MethodPut})
+ const (
+ goodContentType = "text/plain"
+ badContentType = "application/octet-stream"
+ )
+ putURL, err := b.SignedURL(ctx, key, &blob.SignedURLOptions{
+ Method: http.MethodPut,
+ ContentType: goodContentType,
+ })
if err != nil {
t.Fatal(err)
} else if putURL == "" { | 1 | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package drivertest provides a conformance test for implementations of
// driver.
package drivertest // import "gocloud.dev/blob/drivertest"
import (
"bytes"
"context"
"crypto/md5"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"gocloud.dev/blob"
"gocloud.dev/blob/driver"
"gocloud.dev/gcerrors"
"gocloud.dev/internal/escape"
)
// Harness descibes the functionality test harnesses must provide to run
// conformance tests.
type Harness interface {
// MakeDriver creates a driver.Bucket to test.
// Multiple calls to MakeDriver during a test run must refer to the
// same storage bucket; i.e., a blob created using one driver.Bucket must
// be readable by a subsequent driver.Bucket.
MakeDriver(ctx context.Context) (driver.Bucket, error)
// HTTPClient should return an unauthorized *http.Client, or nil.
// Required if the service supports SignedURL.
HTTPClient() *http.Client
// Close closes resources used by the harness.
Close()
}
// HarnessMaker describes functions that construct a harness for running tests.
// It is called exactly once per test; Harness.Close() will be called when the test is complete.
type HarnessMaker func(ctx context.Context, t *testing.T) (Harness, error)
// AsTest represents a test of As functionality.
// The conformance test:
// 1. Calls BucketCheck.
// 2. Creates a blob in a directory, using BeforeWrite as a WriterOption.
// 3. Fetches the blob's attributes and calls AttributeCheck.
// 4. Creates a Reader for the blob using BeforeReader as a ReaderOption,
// and calls ReaderCheck with the resulting Reader.
// 5. Calls List using BeforeList as a ListOption, with Delimiter set so
// that only the directory is returned, and calls ListObjectCheck
// on the single directory list entry returned.
// 6. Calls List using BeforeList as a ListOption, and calls ListObjectCheck
// on the single blob entry returned.
// 7. Tries to read a non-existent blob, and calls ErrorCheck with the error.
// 8. Makes a copy of the blob, using BeforeCopy as a CopyOption.
//
// For example, an AsTest might set a driver-specific field to a custom
// value in BeforeWrite, and then verify the custom value was returned in
// AttributesCheck and/or ReaderCheck.
type AsTest interface {
// Name should return a descriptive name for the test.
Name() string
// BucketCheck will be called to allow verification of Bucket.As.
BucketCheck(b *blob.Bucket) error
// ErrorCheck will be called to allow verification of Bucket.ErrorAs.
ErrorCheck(b *blob.Bucket, err error) error
// BeforeRead will be passed directly to ReaderOptions as part of reading
// a test blob.
BeforeRead(as func(interface{}) bool) error
// BeforeWrite will be passed directly to WriterOptions as part of creating
// a test blob.
BeforeWrite(as func(interface{}) bool) error
// BeforeCopy will be passed directly to CopyOptions as part of copying
// the test blob.
BeforeCopy(as func(interface{}) bool) error
// BeforeList will be passed directly to ListOptions as part of listing the
// test blob.
BeforeList(as func(interface{}) bool) error
// AttributesCheck will be called after fetching the test blob's attributes.
// It should call attrs.As and verify the results.
AttributesCheck(attrs *blob.Attributes) error
// ReaderCheck will be called after creating a blob.Reader.
// It should call r.As and verify the results.
ReaderCheck(r *blob.Reader) error
// ListObjectCheck will be called after calling List with the test object's
// name as the Prefix. It should call o.As and verify the results.
ListObjectCheck(o *blob.ListObject) error
}
type verifyAsFailsOnNil struct{}
func (verifyAsFailsOnNil) Name() string {
return "verify As returns false when passed nil"
}
func (verifyAsFailsOnNil) BucketCheck(b *blob.Bucket) error {
if b.As(nil) {
return errors.New("want Bucket.As to return false when passed nil")
}
return nil
}
func (verifyAsFailsOnNil) ErrorCheck(b *blob.Bucket, err error) (ret error) {
defer func() {
if recover() == nil {
ret = errors.New("want ErrorAs to panic when passed nil")
}
}()
b.ErrorAs(err, nil)
return nil
}
func (verifyAsFailsOnNil) BeforeRead(as func(interface{}) bool) error {
if as(nil) {
return errors.New("want BeforeReader's As to return false when passed nil")
}
return nil
}
func (verifyAsFailsOnNil) BeforeWrite(as func(interface{}) bool) error {
if as(nil) {
return errors.New("want BeforeWrite's As to return false when passed nil")
}
return nil
}
func (verifyAsFailsOnNil) BeforeCopy(as func(interface{}) bool) error {
if as(nil) {
return errors.New("want BeforeCopy's As to return false when passed nil")
}
return nil
}
func (verifyAsFailsOnNil) BeforeList(as func(interface{}) bool) error {
if as(nil) {
return errors.New("want BeforeList's As to return false when passed nil")
}
return nil
}
func (verifyAsFailsOnNil) AttributesCheck(attrs *blob.Attributes) error {
if attrs.As(nil) {
return errors.New("want Attributes.As to return false when passed nil")
}
return nil
}
func (verifyAsFailsOnNil) ReaderCheck(r *blob.Reader) error {
if r.As(nil) {
return errors.New("want Reader.As to return false when passed nil")
}
return nil
}
func (verifyAsFailsOnNil) ListObjectCheck(o *blob.ListObject) error {
if o.As(nil) {
return errors.New("want ListObject.As to return false when passed nil")
}
return nil
}
// RunConformanceTests runs conformance tests for driver implementations of blob.
func RunConformanceTests(t *testing.T, newHarness HarnessMaker, asTests []AsTest) {
t.Run("TestList", func(t *testing.T) {
testList(t, newHarness)
})
t.Run("TestListWeirdKeys", func(t *testing.T) {
testListWeirdKeys(t, newHarness)
})
t.Run("TestListDelimiters", func(t *testing.T) {
testListDelimiters(t, newHarness)
})
t.Run("TestRead", func(t *testing.T) {
testRead(t, newHarness)
})
t.Run("TestAttributes", func(t *testing.T) {
testAttributes(t, newHarness)
})
t.Run("TestWrite", func(t *testing.T) {
testWrite(t, newHarness)
})
t.Run("TestCanceledWrite", func(t *testing.T) {
testCanceledWrite(t, newHarness)
})
t.Run("TestConcurrentWriteAndRead", func(t *testing.T) {
testConcurrentWriteAndRead(t, newHarness)
})
t.Run("TestMetadata", func(t *testing.T) {
testMetadata(t, newHarness)
})
t.Run("TestMD5", func(t *testing.T) {
testMD5(t, newHarness)
})
t.Run("TestCopy", func(t *testing.T) {
testCopy(t, newHarness)
})
t.Run("TestDelete", func(t *testing.T) {
testDelete(t, newHarness)
})
t.Run("TestKeys", func(t *testing.T) {
testKeys(t, newHarness)
})
t.Run("TestSignedURL", func(t *testing.T) {
testSignedURL(t, newHarness)
})
asTests = append(asTests, verifyAsFailsOnNil{})
t.Run("TestAs", func(t *testing.T) {
for _, st := range asTests {
if st.Name() == "" {
t.Fatalf("AsTest.Name is required")
}
t.Run(st.Name(), func(t *testing.T) {
testAs(t, newHarness, st)
})
}
})
}
// RunBenchmarks runs benchmarks for driver implementations of blob.
func RunBenchmarks(b *testing.B, bkt *blob.Bucket) {
b.Run("BenchmarkRead", func(b *testing.B) {
benchmarkRead(b, bkt)
})
b.Run("BenchmarkWriteReadDelete", func(b *testing.B) {
benchmarkWriteReadDelete(b, bkt)
})
}
// testList tests the functionality of List.
func testList(t *testing.T, newHarness HarnessMaker) {
const keyPrefix = "blob-for-list"
content := []byte("hello")
keyForIndex := func(i int) string { return fmt.Sprintf("%s-%d", keyPrefix, i) }
gotIndices := func(t *testing.T, objs []*driver.ListObject) []int {
var got []int
for _, obj := range objs {
if !strings.HasPrefix(obj.Key, keyPrefix) {
t.Errorf("got name %q, expected it to have prefix %q", obj.Key, keyPrefix)
continue
}
i, err := strconv.Atoi(obj.Key[len(keyPrefix)+1:])
if err != nil {
t.Error(err)
continue
}
got = append(got, i)
}
return got
}
tests := []struct {
name string
pageSize int
prefix string
wantPages [][]int
want []int
}{
{
name: "no objects",
prefix: "no-objects-with-this-prefix",
wantPages: [][]int{nil},
},
{
name: "exactly 1 object due to prefix",
prefix: keyForIndex(1),
wantPages: [][]int{{1}},
want: []int{1},
},
{
name: "no pagination",
prefix: keyPrefix,
wantPages: [][]int{{0, 1, 2}},
want: []int{0, 1, 2},
},
{
name: "by 1",
prefix: keyPrefix,
pageSize: 1,
wantPages: [][]int{{0}, {1}, {2}},
want: []int{0, 1, 2},
},
{
name: "by 2",
prefix: keyPrefix,
pageSize: 2,
wantPages: [][]int{{0, 1}, {2}},
want: []int{0, 1, 2},
},
{
name: "by 3",
prefix: keyPrefix,
pageSize: 3,
wantPages: [][]int{{0, 1, 2}},
want: []int{0, 1, 2},
},
}
ctx := context.Background()
// Creates blobs for sub-tests below.
// We only create the blobs once, for efficiency and because there's
// no guarantee that after we create them they will be immediately returned
// from List. The very first time the test is run against a Bucket, it may be
// flaky due to this race.
init := func(t *testing.T) (driver.Bucket, func()) {
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
// See if the blobs are already there.
b := blob.NewBucket(drv)
iter := b.List(&blob.ListOptions{Prefix: keyPrefix})
found := iterToSetOfKeys(ctx, t, iter)
for i := 0; i < 3; i++ {
key := keyForIndex(i)
if !found[key] {
if err := b.WriteAll(ctx, key, content, nil); err != nil {
b.Close()
t.Fatal(err)
}
}
}
return drv, func() { b.Close(); h.Close() }
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
drv, done := init(t)
defer done()
var gotPages [][]int
var got []int
var nextPageToken []byte
for {
page, err := drv.ListPaged(ctx, &driver.ListOptions{
PageSize: tc.pageSize,
Prefix: tc.prefix,
PageToken: nextPageToken,
})
if err != nil {
t.Fatal(err)
}
gotThisPage := gotIndices(t, page.Objects)
got = append(got, gotThisPage...)
gotPages = append(gotPages, gotThisPage)
if len(page.NextPageToken) == 0 {
break
}
nextPageToken = page.NextPageToken
}
if diff := cmp.Diff(gotPages, tc.wantPages); diff != "" {
t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", gotPages, tc.wantPages, diff)
}
if diff := cmp.Diff(got, tc.want); diff != "" {
t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", got, tc.want, diff)
}
})
}
// Verify pagination works when inserting in a retrieved page.
t.Run("PaginationConsistencyAfterInsert", func(t *testing.T) {
drv, done := init(t)
defer done()
// Fetch a page of 2 results: 0, 1.
page, err := drv.ListPaged(ctx, &driver.ListOptions{
PageSize: 2,
Prefix: keyPrefix,
})
if err != nil {
t.Fatal(err)
}
got := gotIndices(t, page.Objects)
want := []int{0, 1}
if diff := cmp.Diff(got, want); diff != "" {
t.Fatalf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
}
// Insert a key "0a" in the middle of the page we already retrieved.
b := blob.NewBucket(drv)
defer b.Close()
key := page.Objects[0].Key + "a"
if err := b.WriteAll(ctx, key, content, nil); err != nil {
t.Fatal(err)
}
defer func() {
_ = b.Delete(ctx, key)
}()
// Fetch the next page. It should not include 0, 0a, or 1, and it should
// include 2.
page, err = drv.ListPaged(ctx, &driver.ListOptions{
Prefix: keyPrefix,
PageToken: page.NextPageToken,
})
if err != nil {
t.Fatal(err)
}
got = gotIndices(t, page.Objects)
want = []int{2}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
}
})
// Verify pagination works when deleting in a retrieved page.
t.Run("PaginationConsistencyAfterDelete", func(t *testing.T) {
drv, done := init(t)
defer done()
// Fetch a page of 2 results: 0, 1.
page, err := drv.ListPaged(ctx, &driver.ListOptions{
PageSize: 2,
Prefix: keyPrefix,
})
if err != nil {
t.Fatal(err)
}
got := gotIndices(t, page.Objects)
want := []int{0, 1}
if diff := cmp.Diff(got, want); diff != "" {
t.Fatalf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
}
// Delete key "1".
b := blob.NewBucket(drv)
defer b.Close()
key := page.Objects[1].Key
if err := b.Delete(ctx, key); err != nil {
t.Fatal(err)
}
defer func() {
_ = b.WriteAll(ctx, key, content, nil)
}()
// Fetch the next page. It should not include 0 or 1, and it should
// include 2.
page, err = drv.ListPaged(ctx, &driver.ListOptions{
Prefix: keyPrefix,
PageToken: page.NextPageToken,
})
if err != nil {
t.Fatal(err)
}
got = gotIndices(t, page.Objects)
want = []int{2}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
}
})
}
// testListWeirdKeys tests the functionality of List on weird keys.
func testListWeirdKeys(t *testing.T, newHarness HarnessMaker) {
const keyPrefix = "list-weirdkeys-"
content := []byte("hello")
ctx := context.Background()
// We're going to create a blob for each of the weird key strings, and
// then verify we can see them with List.
want := map[string]bool{}
for _, k := range escape.WeirdStrings {
want[keyPrefix+k] = true
}
// Creates blobs for sub-tests below.
// We only create the blobs once, for efficiency and because there's
// no guarantee that after we create them they will be immediately returned
// from List. The very first time the test is run against a Bucket, it may be
// flaky due to this race.
init := func(t *testing.T) (*blob.Bucket, func()) {
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
// See if the blobs are already there.
b := blob.NewBucket(drv)
iter := b.List(&blob.ListOptions{Prefix: keyPrefix})
found := iterToSetOfKeys(ctx, t, iter)
for _, k := range escape.WeirdStrings {
key := keyPrefix + k
if !found[key] {
if err := b.WriteAll(ctx, key, content, nil); err != nil {
b.Close()
t.Fatal(err)
}
}
}
return b, func() { b.Close(); h.Close() }
}
b, done := init(t)
defer done()
iter := b.List(&blob.ListOptions{Prefix: keyPrefix})
got := iterToSetOfKeys(ctx, t, iter)
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
}
}
// listResult is a recursive view of the hierarchy. It's used to verify List
// using Delimiter.
type listResult struct {
Key string
IsDir bool
// If IsDir is true and recursion is enabled, the recursive listing of the directory.
Sub []listResult
}
// doList lists b using prefix and delim.
// If recurse is true, it recurses into directories filling in listResult.Sub.
func doList(ctx context.Context, b *blob.Bucket, prefix, delim string, recurse bool) ([]listResult, error) {
iter := b.List(&blob.ListOptions{
Prefix: prefix,
Delimiter: delim,
})
var retval []listResult
for {
obj, err := iter.Next(ctx)
if err == io.EOF {
if obj != nil {
return nil, errors.New("obj is not nil on EOF")
}
break
}
if err != nil {
return nil, err
}
var sub []listResult
if obj.IsDir && recurse {
sub, err = doList(ctx, b, obj.Key, delim, true)
if err != nil {
return nil, err
}
}
retval = append(retval, listResult{
Key: obj.Key,
IsDir: obj.IsDir,
Sub: sub,
})
}
return retval, nil
}
// testListDelimiters tests the functionality of List using Delimiters.
func testListDelimiters(t *testing.T, newHarness HarnessMaker) {
const keyPrefix = "blob-for-delimiters-"
content := []byte("hello")
// The set of files to use for these tests. The strings in each entry will
// be joined using delim, so the result is a directory structure like this
// (using / as delimiter):
// dir1/a.txt
// dir1/b.txt
// dir1/subdir/c.txt
// dir1/subdir/d.txt
// dir2/e.txt
// f.txt
keys := [][]string{
{"dir1", "a.txt"},
{"dir1", "b.txt"},
{"dir1", "subdir", "c.txt"},
{"dir1", "subdir", "d.txt"},
{"dir2", "e.txt"},
{"f.txt"},
}
// Test with several different delimiters.
tests := []struct {
name, delim string
// Expected result of doList with an empty delimiter.
// All keys should be listed at the top level, with no directories.
wantFlat []listResult
// Expected result of doList with delimiter and recurse = true.
// All keys should be listed, with keys in directories in the Sub field
// of their directory.
wantRecursive []listResult
// Expected result of repeatedly calling driver.ListPaged with delimiter
// and page size = 1.
wantPaged []listResult
// expected result of doList with delimiter and recurse = false
// after dir2/e.txt is deleted
// dir1/ and f.txt should be listed; dir2/ should no longer be present
// because there are no keys in it.
wantAfterDel []listResult
}{
{
name: "fwdslash",
delim: "/",
wantFlat: []listResult{
{Key: keyPrefix + "/dir1/a.txt"},
{Key: keyPrefix + "/dir1/b.txt"},
{Key: keyPrefix + "/dir1/subdir/c.txt"},
{Key: keyPrefix + "/dir1/subdir/d.txt"},
{Key: keyPrefix + "/dir2/e.txt"},
{Key: keyPrefix + "/f.txt"},
},
wantRecursive: []listResult{
{
Key: keyPrefix + "/dir1/",
IsDir: true,
Sub: []listResult{
{Key: keyPrefix + "/dir1/a.txt"},
{Key: keyPrefix + "/dir1/b.txt"},
{
Key: keyPrefix + "/dir1/subdir/",
IsDir: true,
Sub: []listResult{
{Key: keyPrefix + "/dir1/subdir/c.txt"},
{Key: keyPrefix + "/dir1/subdir/d.txt"},
},
},
},
},
{
Key: keyPrefix + "/dir2/",
IsDir: true,
Sub: []listResult{
{Key: keyPrefix + "/dir2/e.txt"},
},
},
{Key: keyPrefix + "/f.txt"},
},
wantPaged: []listResult{
{
Key: keyPrefix + "/dir1/",
IsDir: true,
},
{
Key: keyPrefix + "/dir2/",
IsDir: true,
},
{Key: keyPrefix + "/f.txt"},
},
wantAfterDel: []listResult{
{
Key: keyPrefix + "/dir1/",
IsDir: true,
},
{Key: keyPrefix + "/f.txt"},
},
},
{
name: "backslash",
delim: "\\",
wantFlat: []listResult{
{Key: keyPrefix + "\\dir1\\a.txt"},
{Key: keyPrefix + "\\dir1\\b.txt"},
{Key: keyPrefix + "\\dir1\\subdir\\c.txt"},
{Key: keyPrefix + "\\dir1\\subdir\\d.txt"},
{Key: keyPrefix + "\\dir2\\e.txt"},
{Key: keyPrefix + "\\f.txt"},
},
wantRecursive: []listResult{
{
Key: keyPrefix + "\\dir1\\",
IsDir: true,
Sub: []listResult{
{Key: keyPrefix + "\\dir1\\a.txt"},
{Key: keyPrefix + "\\dir1\\b.txt"},
{
Key: keyPrefix + "\\dir1\\subdir\\",
IsDir: true,
Sub: []listResult{
{Key: keyPrefix + "\\dir1\\subdir\\c.txt"},
{Key: keyPrefix + "\\dir1\\subdir\\d.txt"},
},
},
},
},
{
Key: keyPrefix + "\\dir2\\",
IsDir: true,
Sub: []listResult{
{Key: keyPrefix + "\\dir2\\e.txt"},
},
},
{Key: keyPrefix + "\\f.txt"},
},
wantPaged: []listResult{
{
Key: keyPrefix + "\\dir1\\",
IsDir: true,
},
{
Key: keyPrefix + "\\dir2\\",
IsDir: true,
},
{Key: keyPrefix + "\\f.txt"},
},
wantAfterDel: []listResult{
{
Key: keyPrefix + "\\dir1\\",
IsDir: true,
},
{Key: keyPrefix + "\\f.txt"},
},
},
{
name: "abc",
delim: "abc",
wantFlat: []listResult{
{Key: keyPrefix + "abcdir1abca.txt"},
{Key: keyPrefix + "abcdir1abcb.txt"},
{Key: keyPrefix + "abcdir1abcsubdirabcc.txt"},
{Key: keyPrefix + "abcdir1abcsubdirabcd.txt"},
{Key: keyPrefix + "abcdir2abce.txt"},
{Key: keyPrefix + "abcf.txt"},
},
wantRecursive: []listResult{
{
Key: keyPrefix + "abcdir1abc",
IsDir: true,
Sub: []listResult{
{Key: keyPrefix + "abcdir1abca.txt"},
{Key: keyPrefix + "abcdir1abcb.txt"},
{
Key: keyPrefix + "abcdir1abcsubdirabc",
IsDir: true,
Sub: []listResult{
{Key: keyPrefix + "abcdir1abcsubdirabcc.txt"},
{Key: keyPrefix + "abcdir1abcsubdirabcd.txt"},
},
},
},
},
{
Key: keyPrefix + "abcdir2abc",
IsDir: true,
Sub: []listResult{
{Key: keyPrefix + "abcdir2abce.txt"},
},
},
{Key: keyPrefix + "abcf.txt"},
},
wantPaged: []listResult{
{
Key: keyPrefix + "abcdir1abc",
IsDir: true,
},
{
Key: keyPrefix + "abcdir2abc",
IsDir: true,
},
{Key: keyPrefix + "abcf.txt"},
},
wantAfterDel: []listResult{
{
Key: keyPrefix + "abcdir1abc",
IsDir: true,
},
{Key: keyPrefix + "abcf.txt"},
},
},
}
ctx := context.Background()
// Creates blobs for sub-tests below.
// We only create the blobs once, for efficiency and because there's
// no guarantee that after we create them they will be immediately returned
// from List. The very first time the test is run against a Bucket, it may be
// flaky due to this race.
init := func(t *testing.T, delim string) (driver.Bucket, *blob.Bucket, func()) {
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
// See if the blobs are already there.
prefix := keyPrefix + delim
iter := b.List(&blob.ListOptions{Prefix: prefix})
found := iterToSetOfKeys(ctx, t, iter)
for _, keyParts := range keys {
key := prefix + strings.Join(keyParts, delim)
if !found[key] {
if err := b.WriteAll(ctx, key, content, nil); err != nil {
b.Close()
t.Fatal(err)
}
}
}
return drv, b, func() { b.Close(); h.Close() }
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
drv, b, done := init(t, tc.delim)
defer done()
// Fetch without using delimiter.
got, err := doList(ctx, b, keyPrefix+tc.delim, "", true)
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(got, tc.wantFlat); diff != "" {
t.Errorf("with no delimiter, got\n%v\nwant\n%v\ndiff\n%s", got, tc.wantFlat, diff)
}
// Fetch using delimiter, recursively.
got, err = doList(ctx, b, keyPrefix+tc.delim, tc.delim, true)
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(got, tc.wantRecursive); diff != "" {
t.Errorf("with delimiter, got\n%v\nwant\n%v\ndiff\n%s", got, tc.wantRecursive, diff)
}
// Test pagination via driver.ListPaged.
var nextPageToken []byte
got = nil
for {
page, err := drv.ListPaged(ctx, &driver.ListOptions{
Prefix: keyPrefix + tc.delim,
Delimiter: tc.delim,
PageSize: 1,
PageToken: nextPageToken,
})
if err != nil {
t.Fatal(err)
}
if len(page.Objects) > 1 {
t.Errorf("got %d objects on a page, want 0 or 1", len(page.Objects))
}
for _, obj := range page.Objects {
got = append(got, listResult{
Key: obj.Key,
IsDir: obj.IsDir,
})
}
if len(page.NextPageToken) == 0 {
break
}
nextPageToken = page.NextPageToken
}
if diff := cmp.Diff(got, tc.wantPaged); diff != "" {
t.Errorf("paged got\n%v\nwant\n%v\ndiff\n%s", got, tc.wantPaged, diff)
}
// Delete dir2/e.txt and verify that dir2/ is no longer returned.
key := strings.Join(append([]string{keyPrefix}, "dir2", "e.txt"), tc.delim)
if err := b.Delete(ctx, key); err != nil {
t.Fatal(err)
}
// Attempt to restore dir2/e.txt at the end of the test for the next run.
defer func() {
_ = b.WriteAll(ctx, key, content, nil)
}()
got, err = doList(ctx, b, keyPrefix+tc.delim, tc.delim, false)
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(got, tc.wantAfterDel); diff != "" {
t.Errorf("after delete, got\n%v\nwant\n%v\ndiff\n%s", got, tc.wantAfterDel, diff)
}
})
}
}
func iterToSetOfKeys(ctx context.Context, t *testing.T, iter *blob.ListIterator) map[string]bool {
retval := map[string]bool{}
for {
if item, err := iter.Next(ctx); err == io.EOF {
break
} else if err != nil {
t.Fatal(err)
} else {
retval[item.Key] = true
}
}
return retval
}
// testRead tests the functionality of NewReader, NewRangeReader, and Reader.
func testRead(t *testing.T, newHarness HarnessMaker) {
const key = "blob-for-reading"
content := []byte("abcdefghijklmnopqurstuvwxyz")
contentSize := int64(len(content))
tests := []struct {
name string
key string
offset, length int64
want []byte
wantReadSize int64
wantErr bool
// set to true to skip creation of the object for
// tests where we expect an error without any actual
// read.
skipCreate bool
}{
{
name: "read of nonexistent key fails",
key: "key-does-not-exist",
length: -1,
wantErr: true,
},
{
name: "negative offset fails",
key: key,
offset: -1,
wantErr: true,
skipCreate: true,
},
{
name: "length 0 read",
key: key,
want: []byte{},
},
{
name: "read from positive offset to end",
key: key,
offset: 10,
length: -1,
want: content[10:],
wantReadSize: contentSize - 10,
},
{
name: "read a part in middle",
key: key,
offset: 10,
length: 5,
want: content[10:15],
wantReadSize: 5,
},
{
name: "read in full",
key: key,
length: -1,
want: content,
wantReadSize: contentSize,
},
{
name: "read in full with negative length not -1",
key: key,
length: -42,
want: content,
wantReadSize: contentSize,
},
}
ctx := context.Background()
// Creates a blob for sub-tests below.
init := func(t *testing.T, skipCreate bool) (*blob.Bucket, func()) {
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
if skipCreate {
return b, func() { b.Close(); h.Close() }
}
if err := b.WriteAll(ctx, key, content, nil); err != nil {
b.Close()
t.Fatal(err)
}
return b, func() {
_ = b.Delete(ctx, key)
b.Close()
h.Close()
}
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
b, done := init(t, tc.skipCreate)
defer done()
r, err := b.NewRangeReader(ctx, tc.key, tc.offset, tc.length, nil)
if (err != nil) != tc.wantErr {
t.Errorf("got err %v want error %v", err, tc.wantErr)
}
if err != nil {
return
}
defer r.Close()
// Make the buffer bigger than needed to make sure we actually only read
// the expected number of bytes.
got := make([]byte, tc.wantReadSize+10)
n, err := r.Read(got)
// EOF error is optional, see https://golang.org/pkg/io/#Reader.
if err != nil && err != io.EOF {
t.Errorf("unexpected error during read: %v", err)
}
if int64(n) != tc.wantReadSize {
t.Errorf("got read length %d want %d", n, tc.wantReadSize)
}
if !cmp.Equal(got[:tc.wantReadSize], tc.want) {
t.Errorf("got %q want %q", string(got), string(tc.want))
}
if r.Size() != contentSize {
t.Errorf("got size %d want %d", r.Size(), contentSize)
}
if r.ModTime().IsZero() {
t.Errorf("got zero mod time, want non-zero")
}
})
}
}
// testAttributes tests Attributes.
func testAttributes(t *testing.T, newHarness HarnessMaker) {
const (
key = "blob-for-attributes"
contentType = "text/plain"
cacheControl = "no-cache"
contentDisposition = "inline"
contentEncoding = "identity"
contentLanguage = "en"
)
content := []byte("Hello World!")
ctx := context.Background()
// Creates a blob for sub-tests below.
init := func(t *testing.T) (*blob.Bucket, func()) {
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
opts := &blob.WriterOptions{
ContentType: contentType,
CacheControl: cacheControl,
ContentDisposition: contentDisposition,
ContentEncoding: contentEncoding,
ContentLanguage: contentLanguage,
}
if err := b.WriteAll(ctx, key, content, opts); err != nil {
b.Close()
t.Fatal(err)
}
return b, func() {
_ = b.Delete(ctx, key)
b.Close()
h.Close()
}
}
b, done := init(t)
defer done()
_, err := b.Attributes(ctx, "not-found")
if err == nil {
t.Errorf("got nil want error")
} else if gcerrors.Code(err) != gcerrors.NotFound {
t.Errorf("got %v want NotFound error", err)
} else if !strings.Contains(err.Error(), "not-found") {
t.Errorf("got %v want error to include missing key", err)
}
a, err := b.Attributes(ctx, key)
if err != nil {
t.Fatalf("failed Attributes: %v", err)
}
// Also make a Reader so we can verify the subset of attributes
// that it exposes.
r, err := b.NewReader(ctx, key, nil)
if err != nil {
t.Fatalf("failed Attributes: %v", err)
}
if a.CacheControl != cacheControl {
t.Errorf("got CacheControl %q want %q", a.CacheControl, cacheControl)
}
if a.ContentDisposition != contentDisposition {
t.Errorf("got ContentDisposition %q want %q", a.ContentDisposition, contentDisposition)
}
if a.ContentEncoding != contentEncoding {
t.Errorf("got ContentEncoding %q want %q", a.ContentEncoding, contentEncoding)
}
if a.ContentLanguage != contentLanguage {
t.Errorf("got ContentLanguage %q want %q", a.ContentLanguage, contentLanguage)
}
if a.ContentType != contentType {
t.Errorf("got ContentType %q want %q", a.ContentType, contentType)
}
if r.ContentType() != contentType {
t.Errorf("got Reader.ContentType() %q want %q", r.ContentType(), contentType)
}
if a.Size != int64(len(content)) {
t.Errorf("got Size %d want %d", a.Size, len(content))
}
if r.Size() != int64(len(content)) {
t.Errorf("got Reader.Size() %d want %d", r.Size(), len(content))
}
r.Close()
t1 := a.ModTime
if err := b.WriteAll(ctx, key, content, nil); err != nil {
t.Fatal(err)
}
a2, err := b.Attributes(ctx, key)
if err != nil {
t.Errorf("failed Attributes#2: %v", err)
}
t2 := a2.ModTime
if t2.Before(t1) {
t.Errorf("ModTime %v is before %v", t2, t1)
}
}
// loadTestData loads test data, inlined using go-bindata.
func loadTestData(t testing.TB, name string) []byte {
data, err := Asset(name)
if err != nil {
t.Fatal(err)
}
return data
}
// testWrite tests the functionality of NewWriter and Writer.
func testWrite(t *testing.T, newHarness HarnessMaker) {
const key = "blob-for-reading"
const existingContent = "existing content"
smallText := loadTestData(t, "test-small.txt")
mediumHTML := loadTestData(t, "test-medium.html")
largeJpg := loadTestData(t, "test-large.jpg")
helloWorld := []byte("hello world")
helloWorldMD5 := md5.Sum(helloWorld)
tests := []struct {
name string
key string
exists bool
content []byte
contentType string
contentMD5 []byte
firstChunk int
wantContentType string
wantErr bool
wantReadErr bool // if wantErr is true, and Read after err should fail with something other than NotExists
}{
{
name: "write to empty key fails",
wantErr: true,
wantReadErr: true, // read from empty key fails, but not always with NotExists
},
{
name: "no write then close results in empty blob",
key: key,
},
{
name: "no write then close results in empty blob, blob existed",
key: key,
},
{
name: "invalid ContentType fails",
key: key,
contentType: "application/octet/stream",
wantErr: true,
},
{
name: "ContentType is discovered if not provided",
key: key,
content: mediumHTML,
wantContentType: "text/html",
},
{
name: "write with explicit ContentType overrides discovery",
key: key,
content: mediumHTML,
contentType: "application/json",
wantContentType: "application/json",
},
{
name: "Content md5 match",
key: key,
content: helloWorld,
contentMD5: helloWorldMD5[:],
},
{
name: "Content md5 did not match",
key: key,
content: []byte("not hello world"),
contentMD5: helloWorldMD5[:],
wantErr: true,
},
{
name: "Content md5 did not match, blob existed",
exists: true,
key: key,
content: []byte("not hello world"),
contentMD5: helloWorldMD5[:],
wantErr: true,
},
{
name: "a small text file",
key: key,
content: smallText,
wantContentType: "text/html",
},
{
name: "a large jpg file",
key: key,
content: largeJpg,
wantContentType: "image/jpg",
},
{
name: "a large jpg file written in two chunks",
key: key,
firstChunk: 10,
content: largeJpg,
wantContentType: "image/jpg",
},
// TODO(issue #304): Fails for GCS.
/*
{
name: "ContentType is parsed and reformatted",
key: key,
content: []byte("foo"),
contentType: `FORM-DATA;name="foo"`,
wantContentType: `form-data; name=foo`,
},
*/
}
ctx := context.Background()
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
defer b.Close()
// If the test wants the blob to already exist, write it.
if tc.exists {
if err := b.WriteAll(ctx, key, []byte(existingContent), nil); err != nil {
t.Fatal(err)
}
defer func() {
_ = b.Delete(ctx, key)
}()
}
// Write the content.
opts := &blob.WriterOptions{
ContentType: tc.contentType,
ContentMD5: tc.contentMD5[:],
}
w, err := b.NewWriter(ctx, tc.key, opts)
if err == nil {
if len(tc.content) > 0 {
if tc.firstChunk == 0 {
// Write the whole thing.
_, err = w.Write(tc.content)
} else {
// Write it in 2 chunks.
_, err = w.Write(tc.content[:tc.firstChunk])
if err == nil {
_, err = w.Write(tc.content[tc.firstChunk:])
}
}
}
if err == nil {
err = w.Close()
}
}
if (err != nil) != tc.wantErr {
t.Errorf("NewWriter or Close got err %v want error %v", err, tc.wantErr)
}
if err != nil {
// The write failed; verify that it had no effect.
buf, err := b.ReadAll(ctx, tc.key)
if tc.exists {
// Verify the previous content is still there.
if !bytes.Equal(buf, []byte(existingContent)) {
t.Errorf("Write failed as expected, but content doesn't match expected previous content; got \n%s\n want \n%s", string(buf), existingContent)
}
} else {
// Verify that the read fails with NotFound.
if err == nil {
t.Error("Write failed as expected, but Read after that didn't return an error")
} else if !tc.wantReadErr && gcerrors.Code(err) != gcerrors.NotFound {
t.Errorf("Write failed as expected, but Read after that didn't return the right error; got %v want NotFound", err)
} else if !strings.Contains(err.Error(), tc.key) {
t.Errorf("got %v want error to include missing key", err)
}
}
return
}
defer func() { _ = b.Delete(ctx, tc.key) }()
// Read it back.
buf, err := b.ReadAll(ctx, tc.key)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, tc.content) {
if len(buf) < 100 && len(tc.content) < 100 {
t.Errorf("read didn't match write; got \n%s\n want \n%s", string(buf), string(tc.content))
} else {
t.Error("read didn't match write, content too large to display")
}
}
})
}
}
// testCanceledWrite tests the functionality of canceling an in-progress write.
func testCanceledWrite(t *testing.T, newHarness HarnessMaker) {
const key = "blob-for-canceled-write"
content := []byte("hello world")
cancelContent := []byte("going to cancel")
tests := []struct {
description string
contentType string
exists bool
}{
{
// The write will be buffered in the portable type as part of
// ContentType detection, so the first call to the Driver will be Close.
description: "EmptyContentType",
},
{
// The write will be sent to the Driver, which may do its own
// internal buffering.
description: "NonEmptyContentType",
contentType: "text/plain",
},
{
description: "BlobExists",
exists: true,
},
// TODO(issue #482): Find a way to test that a chunked upload that's interrupted
// after some chunks are uploaded cancels correctly.
}
ctx := context.Background()
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
cancelCtx, cancel := context.WithCancel(ctx)
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
defer b.Close()
opts := &blob.WriterOptions{
ContentType: test.contentType,
}
// If the test wants the blob to already exist, write it.
if test.exists {
if err := b.WriteAll(ctx, key, content, opts); err != nil {
t.Fatal(err)
}
defer func() {
_ = b.Delete(ctx, key)
}()
}
// Create a writer with the context that we're going
// to cancel.
w, err := b.NewWriter(cancelCtx, key, opts)
if err != nil {
t.Fatal(err)
}
// Write the content.
if _, err := w.Write(cancelContent); err != nil {
t.Fatal(err)
}
// Verify that the previous content (if any) is still readable,
// because the write hasn't been Closed yet.
got, err := b.ReadAll(ctx, key)
if test.exists {
// The previous content should still be there.
if !cmp.Equal(got, content) {
t.Errorf("during unclosed write, got %q want %q", string(got), string(content))
}
} else {
// The read should fail; the write hasn't been Closed so the
// blob shouldn't exist.
if err == nil {
t.Error("wanted read to return an error when write is not yet Closed")
}
}
// Cancel the context to abort the write.
cancel()
// Close should return some kind of canceled context error.
// We can't verify the kind of error cleanly, so we just verify there's
// an error.
if err := w.Close(); err == nil {
t.Errorf("got Close error %v want canceled ctx error", err)
}
// Verify the write was truly aborted.
got, err = b.ReadAll(ctx, key)
if test.exists {
// The previous content should still be there.
if !cmp.Equal(got, content) {
t.Errorf("after canceled write, got %q want %q", string(got), string(content))
}
} else {
// The read should fail; the write was aborted so the
// blob shouldn't exist.
if err == nil {
t.Error("wanted read to return an error when write was canceled")
}
}
})
}
}
// testMetadata tests writing and reading the key/value metadata for a blob.
func testMetadata(t *testing.T, newHarness HarnessMaker) {
const key = "blob-for-metadata"
hello := []byte("hello")
weirdMetadata := map[string]string{}
for _, k := range escape.WeirdStrings {
weirdMetadata[k] = k
}
tests := []struct {
name string
metadata map[string]string
content []byte
contentType string
want map[string]string
wantErr bool
}{
{
name: "empty",
content: hello,
metadata: map[string]string{},
want: nil,
},
{
name: "empty key fails",
content: hello,
metadata: map[string]string{"": "empty key value"},
wantErr: true,
},
{
name: "duplicate case-insensitive key fails",
content: hello,
metadata: map[string]string{"abc": "foo", "aBc": "bar"},
wantErr: true,
},
{
name: "valid metadata",
content: hello,
metadata: map[string]string{
"key_a": "value-a",
"kEy_B": "value-b",
"key_c": "vAlUe-c",
},
want: map[string]string{
"key_a": "value-a",
"key_b": "value-b",
"key_c": "vAlUe-c",
},
},
{
name: "valid metadata with empty body",
content: nil,
metadata: map[string]string{"foo": "bar"},
want: map[string]string{"foo": "bar"},
},
{
name: "valid metadata with content type",
content: hello,
contentType: "text/plain",
metadata: map[string]string{"foo": "bar"},
want: map[string]string{"foo": "bar"},
},
{
name: "weird metadata keys",
content: hello,
metadata: weirdMetadata,
want: weirdMetadata,
},
{
name: "non-utf8 metadata key",
content: hello,
metadata: map[string]string{escape.NonUTF8String: "bar"},
wantErr: true,
},
{
name: "non-utf8 metadata value",
content: hello,
metadata: map[string]string{"foo": escape.NonUTF8String},
wantErr: true,
},
}
ctx := context.Background()
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
defer b.Close()
opts := &blob.WriterOptions{
Metadata: tc.metadata,
ContentType: tc.contentType,
}
err = b.WriteAll(ctx, key, hello, opts)
if (err != nil) != tc.wantErr {
t.Errorf("got error %v want error %v", err, tc.wantErr)
}
if err != nil {
return
}
defer func() {
_ = b.Delete(ctx, key)
}()
a, err := b.Attributes(ctx, key)
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(a.Metadata, tc.want); diff != "" {
t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", a.Metadata, tc.want, diff)
}
})
}
}
// testMD5 tests reading MD5 hashes via List and Attributes.
func testMD5(t *testing.T, newHarness HarnessMaker) {
ctx := context.Background()
// Define two blobs with different content; we'll write them and then verify
// their returned MD5 hashes.
const aKey, bKey = "blob-for-md5-aaa", "blob-for-md5-bbb"
aContent, bContent := []byte("hello"), []byte("goodbye")
aMD5 := md5.Sum(aContent)
bMD5 := md5.Sum(bContent)
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
defer b.Close()
// Write the two blobs.
if err := b.WriteAll(ctx, aKey, aContent, nil); err != nil {
t.Fatal(err)
}
defer func() { _ = b.Delete(ctx, aKey) }()
if err := b.WriteAll(ctx, bKey, bContent, nil); err != nil {
t.Fatal(err)
}
defer func() { _ = b.Delete(ctx, bKey) }()
// Check the MD5 we get through Attributes. Note that it's always legal to
// return a nil MD5.
aAttr, err := b.Attributes(ctx, aKey)
if err != nil {
t.Fatal(err)
}
if aAttr.MD5 != nil && !bytes.Equal(aAttr.MD5, aMD5[:]) {
t.Errorf("got MD5\n%x\nwant\n%x", aAttr.MD5, aMD5)
}
bAttr, err := b.Attributes(ctx, bKey)
if err != nil {
t.Fatal(err)
}
if bAttr.MD5 != nil && !bytes.Equal(bAttr.MD5, bMD5[:]) {
t.Errorf("got MD5\n%x\nwant\n%x", bAttr.MD5, bMD5)
}
// Check the MD5 we get through List. Note that it's always legal to
// return a nil MD5.
iter := b.List(&blob.ListOptions{Prefix: "blob-for-md5-"})
obj, err := iter.Next(ctx)
if err != nil {
t.Fatal(err)
}
if obj.Key != aKey {
t.Errorf("got name %q want %q", obj.Key, aKey)
}
if obj.MD5 != nil && !bytes.Equal(obj.MD5, aMD5[:]) {
t.Errorf("got MD5\n%x\nwant\n%x", obj.MD5, aMD5)
}
obj, err = iter.Next(ctx)
if err != nil {
t.Fatal(err)
}
if obj.Key != bKey {
t.Errorf("got name %q want %q", obj.Key, bKey)
}
if obj.MD5 != nil && !bytes.Equal(obj.MD5, bMD5[:]) {
t.Errorf("got MD5\n%x\nwant\n%x", obj.MD5, bMD5)
}
}
// testCopy tests the functionality of Copy.
func testCopy(t *testing.T, newHarness HarnessMaker) {
const (
srcKey = "blob-for-copying-src"
dstKey = "blob-for-copying-dest"
dstKeyExists = "blob-for-copying-dest-exists"
contentType = "text/plain"
cacheControl = "no-cache"
contentDisposition = "inline"
contentEncoding = "identity"
contentLanguage = "en"
)
var contents = []byte("Hello World")
ctx := context.Background()
t.Run("NonExistentSourceFails", func(t *testing.T) {
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
defer b.Close()
err = b.Copy(ctx, dstKey, "does-not-exist", nil)
if err == nil {
t.Errorf("got nil want error")
} else if gcerrors.Code(err) != gcerrors.NotFound {
t.Errorf("got %v want NotFound error", err)
} else if !strings.Contains(err.Error(), "does-not-exist") {
t.Errorf("got %v want error to include missing key", err)
}
})
t.Run("Works", func(t *testing.T) {
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
defer b.Close()
// Create the source blob.
wopts := &blob.WriterOptions{
ContentType: contentType,
CacheControl: cacheControl,
ContentDisposition: contentDisposition,
ContentEncoding: contentEncoding,
ContentLanguage: contentLanguage,
Metadata: map[string]string{"foo": "bar"},
}
if err := b.WriteAll(ctx, srcKey, contents, wopts); err != nil {
t.Fatal(err)
}
// Grab its attributes to compare to the copy's attributes later.
wantAttr, err := b.Attributes(ctx, srcKey)
if err != nil {
t.Fatal(err)
}
wantAttr.ModTime = time.Time{} // don't compare this field
// Create another blob that we're going to overwrite.
if err := b.WriteAll(ctx, dstKeyExists, []byte("clobber me"), nil); err != nil {
t.Fatal(err)
}
// Copy the source to the destination.
if err := b.Copy(ctx, dstKey, srcKey, nil); err != nil {
t.Errorf("got unexpected error copying blob: %v", err)
}
// Read the copy.
got, err := b.ReadAll(ctx, dstKey)
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(got, contents) {
t.Errorf("got %q want %q", string(got), string(contents))
}
// Verify attributes of the copy.
gotAttr, err := b.Attributes(ctx, dstKey)
if err != nil {
t.Fatal(err)
}
gotAttr.ModTime = time.Time{} // don't compare this field
if diff := cmp.Diff(gotAttr, wantAttr, cmpopts.IgnoreUnexported(blob.Attributes{})); diff != "" {
t.Errorf("got %v want %v diff %s", gotAttr, wantAttr, diff)
}
// Copy the source to the second destination, where there's an existing blob.
// It should be overwritten.
if err := b.Copy(ctx, dstKeyExists, srcKey, nil); err != nil {
t.Errorf("got unexpected error copying blob: %v", err)
}
// Read the copy.
got, err = b.ReadAll(ctx, dstKeyExists)
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(got, contents) {
t.Errorf("got %q want %q", string(got), string(contents))
}
// Verify attributes of the copy.
gotAttr, err = b.Attributes(ctx, dstKeyExists)
if err != nil {
t.Fatal(err)
}
gotAttr.ModTime = time.Time{} // don't compare this field
if diff := cmp.Diff(gotAttr, wantAttr, cmpopts.IgnoreUnexported(blob.Attributes{})); diff != "" {
t.Errorf("got %v want %v diff %s", gotAttr, wantAttr, diff)
}
})
}
// testDelete tests the functionality of Delete.
func testDelete(t *testing.T, newHarness HarnessMaker) {
const key = "blob-for-deleting"
ctx := context.Background()
t.Run("NonExistentFails", func(t *testing.T) {
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
defer b.Close()
err = b.Delete(ctx, "does-not-exist")
if err == nil {
t.Errorf("got nil want error")
} else if gcerrors.Code(err) != gcerrors.NotFound {
t.Errorf("got %v want NotFound error", err)
} else if !strings.Contains(err.Error(), "does-not-exist") {
t.Errorf("got %v want error to include missing key", err)
}
})
t.Run("Works", func(t *testing.T) {
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
defer b.Close()
// Create the blob.
if err := b.WriteAll(ctx, key, []byte("Hello world"), nil); err != nil {
t.Fatal(err)
}
// Delete it.
if err := b.Delete(ctx, key); err != nil {
t.Errorf("got unexpected error deleting blob: %v", err)
}
// Subsequent read fails with NotFound.
_, err = b.NewReader(ctx, key, nil)
if err == nil {
t.Errorf("read after delete got nil, want error")
} else if gcerrors.Code(err) != gcerrors.NotFound {
t.Errorf("read after delete want NotFound error, got %v", err)
} else if !strings.Contains(err.Error(), key) {
t.Errorf("got %v want error to include missing key", err)
}
// Subsequent delete also fails.
err = b.Delete(ctx, key)
if err == nil {
t.Errorf("delete after delete got nil, want error")
} else if gcerrors.Code(err) != gcerrors.NotFound {
t.Errorf("delete after delete got %v, want NotFound error", err)
} else if !strings.Contains(err.Error(), key) {
t.Errorf("got %v want error to include missing key", err)
}
})
}
// testConcurrentWriteAndRead tests that concurrent writing to multiple blob
// keys and concurrent reading from multiple blob keys works.
func testConcurrentWriteAndRead(t *testing.T, newHarness HarnessMaker) {
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
defer b.Close()
// Prepare data. Each of the numKeys blobs has dataSize bytes, with each byte
// set to the numeric key index. For example, the blob at "key0" consists of
// all dataSize bytes set to 0.
const numKeys = 20
const dataSize = 4 * 1024
keyData := make(map[int][]byte)
for k := 0; k < numKeys; k++ {
data := make([]byte, dataSize)
for i := 0; i < dataSize; i++ {
data[i] = byte(k)
}
keyData[k] = data
}
blobName := func(k int) string {
return fmt.Sprintf("key%d", k)
}
var wg sync.WaitGroup
// Write all blobs concurrently.
for k := 0; k < numKeys; k++ {
wg.Add(1)
go func(key int) {
if err := b.WriteAll(ctx, blobName(key), keyData[key], nil); err != nil {
t.Fatal(err)
}
wg.Done()
}(k)
defer b.Delete(ctx, blobName(k))
}
wg.Wait()
// Read all blobs concurrently and verify that they contain the expected data.
for k := 0; k < numKeys; k++ {
wg.Add(1)
go func(key int) {
buf, err := b.ReadAll(ctx, blobName(key))
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, keyData[key]) {
t.Errorf("read data mismatch for key %d", key)
}
wg.Done()
}(k)
}
wg.Wait()
}
// testKeys tests a variety of weird keys.
func testKeys(t *testing.T, newHarness HarnessMaker) {
const keyPrefix = "weird-keys"
content := []byte("hello")
ctx := context.Background()
t.Run("non-UTF8 fails", func(t *testing.T) {
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
defer b.Close()
// Write the blob.
key := keyPrefix + escape.NonUTF8String
if err := b.WriteAll(ctx, key, content, nil); err == nil {
t.Error("got nil error, expected error for using non-UTF8 string as key")
}
})
for description, key := range escape.WeirdStrings {
t.Run(description, func(t *testing.T) {
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
defer b.Close()
// Write the blob.
key = keyPrefix + key
if err := b.WriteAll(ctx, key, content, nil); err != nil {
t.Fatal(err)
}
defer func() {
err := b.Delete(ctx, key)
if err != nil {
t.Error(err)
}
}()
// Verify read works.
got, err := b.ReadAll(ctx, key)
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(got, content) {
t.Errorf("got %q want %q", string(got), string(content))
}
// Verify Attributes works.
_, err = b.Attributes(ctx, key)
if err != nil {
t.Error(err)
}
// Verify SignedURL works.
url, err := b.SignedURL(ctx, key, nil)
if gcerrors.Code(err) != gcerrors.Unimplemented {
if err != nil {
t.Error(err)
}
client := h.HTTPClient()
if client == nil {
t.Error("can't verify SignedURL, Harness.HTTPClient() returned nil")
}
resp, err := client.Get(url)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
t.Errorf("got status code %d, want 200", resp.StatusCode)
}
got, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(got, content) {
t.Errorf("got body %q, want %q", string(got), string(content))
}
}
})
}
}
// testSignedURL tests the functionality of SignedURL.
func testSignedURL(t *testing.T, newHarness HarnessMaker) {
const key = "blob-for-signing"
const contents = "hello world"
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
defer b.Close()
// Verify that a negative Expiry gives an error. This is enforced in the
// portable type, so works regardless of driver support.
_, err = b.SignedURL(ctx, key, &blob.SignedURLOptions{Expiry: -1 * time.Minute})
if err == nil {
t.Error("got nil error, expected error for negative SignedURLOptions.Expiry")
}
// Generate real signed URLs for GET, GET with the query params remvoed, PUT, and DELETE.
getURL, err := b.SignedURL(ctx, key, nil)
if err != nil {
if gcerrors.Code(err) == gcerrors.Unimplemented {
t.Skipf("SignedURL not supported")
return
}
t.Fatal(err)
} else if getURL == "" {
t.Fatal("got empty GET url")
}
// Copy getURL, but remove all query params. This URL should not be allowed
// to GET since the client is unauthorized.
getURLNoParamsURL, err := url.Parse(getURL)
if err != nil {
t.Fatalf("failed to parse getURL: %v", err)
}
getURLNoParamsURL.RawQuery = ""
getURLNoParams := getURLNoParamsURL.String()
putURL, err := b.SignedURL(ctx, key, &blob.SignedURLOptions{Method: http.MethodPut})
if err != nil {
t.Fatal(err)
} else if putURL == "" {
t.Fatal("got empty PUT url")
}
deleteURL, err := b.SignedURL(ctx, key, &blob.SignedURLOptions{Method: http.MethodDelete})
if err != nil {
t.Fatal(err)
} else if deleteURL == "" {
t.Fatal("got empty DELETE url")
}
client := h.HTTPClient()
if client == nil {
t.Fatal("can't verify SignedURL, Harness.HTTPClient() returned nil")
}
// PUT the blob. Try with all URLs, only putURL should work.
for _, test := range []struct {
urlMethod string
url string
wantSuccess bool
}{
{http.MethodGet, getURL, false},
{http.MethodDelete, deleteURL, false},
{http.MethodPut, putURL, true},
} {
req, err := http.NewRequest(http.MethodPut, test.url, strings.NewReader(contents))
if err != nil {
t.Fatalf("failed to create PUT HTTP request using %s URL: %v", test.urlMethod, err)
}
if resp, err := client.Do(req); err != nil {
t.Fatalf("PUT failed with %s URL: %v", test.urlMethod, err)
} else {
defer resp.Body.Close()
success := resp.StatusCode >= 200 && resp.StatusCode < 300
if success != test.wantSuccess {
t.Errorf("PUT with %s URL got status code %d, want 2xx? %v", test.urlMethod, resp.StatusCode, test.wantSuccess)
gotBody, _ := ioutil.ReadAll(resp.Body)
t.Errorf(string(gotBody))
}
}
}
// GET it. Try with all URLs, only getURL should work.
for _, test := range []struct {
urlMethod string
url string
wantSuccess bool
}{
{http.MethodDelete, deleteURL, false},
{http.MethodPut, putURL, false},
{http.MethodGet, getURLNoParams, false},
{http.MethodGet, getURL, true},
} {
if resp, err := client.Get(test.url); err != nil {
t.Fatalf("GET with %s URL failed: %v", test.urlMethod, err)
} else {
defer resp.Body.Close()
success := resp.StatusCode >= 200 && resp.StatusCode < 300
if success != test.wantSuccess {
t.Errorf("GET with %s URL got status code %d, want 2xx? %v", test.urlMethod, resp.StatusCode, test.wantSuccess)
gotBody, _ := ioutil.ReadAll(resp.Body)
t.Errorf(string(gotBody))
} else if success {
gotBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Errorf("GET with %s URL failed to read response body: %v", test.urlMethod, err)
} else if gotBodyStr := string(gotBody); gotBodyStr != contents {
t.Errorf("GET with %s URL got body %q, want %q", test.urlMethod, gotBodyStr, contents)
}
}
}
}
// DELETE it. Try with all URLs, only deleteURL should work.
for _, test := range []struct {
urlMethod string
url string
wantSuccess bool
}{
{http.MethodGet, getURL, false},
{http.MethodPut, putURL, false},
{http.MethodDelete, deleteURL, true},
} {
req, err := http.NewRequest(http.MethodDelete, test.url, nil)
if err != nil {
t.Fatalf("failed to create DELETE HTTP request using %s URL: %v", test.urlMethod, err)
}
if resp, err := client.Do(req); err != nil {
t.Fatalf("DELETE with %s URL failed: %v", test.urlMethod, err)
} else {
defer resp.Body.Close()
success := resp.StatusCode >= 200 && resp.StatusCode < 300
if success != test.wantSuccess {
t.Fatalf("DELETE with %s URL got status code %d, want 2xx? %v", test.urlMethod, resp.StatusCode, test.wantSuccess)
gotBody, _ := ioutil.ReadAll(resp.Body)
t.Errorf(string(gotBody))
}
}
}
// GET should fail now that the blob has been deleted.
if resp, err := client.Get(getURL); err != nil {
t.Errorf("GET after DELETE failed: %v", err)
} else {
defer resp.Body.Close()
if resp.StatusCode != 404 {
t.Errorf("GET after DELETE got status code %d, want 404", resp.StatusCode)
gotBody, _ := ioutil.ReadAll(resp.Body)
t.Errorf(string(gotBody))
}
}
}
// testAs tests the various As functions, using AsTest.
func testAs(t *testing.T, newHarness HarnessMaker, st AsTest) {
const (
dir = "mydir"
key = dir + "/as-test"
copyKey = dir + "/as-test-copy"
)
var content = []byte("hello world")
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
drv, err := h.MakeDriver(ctx)
if err != nil {
t.Fatal(err)
}
b := blob.NewBucket(drv)
defer b.Close()
// Verify Bucket.As.
if err := st.BucketCheck(b); err != nil {
t.Error(err)
}
// Create a blob, using the provided callback.
if err := b.WriteAll(ctx, key, content, &blob.WriterOptions{BeforeWrite: st.BeforeWrite}); err != nil {
t.Error(err)
}
defer func() { _ = b.Delete(ctx, key) }()
// Verify Attributes.As.
attrs, err := b.Attributes(ctx, key)
if err != nil {
t.Fatal(err)
}
if err := st.AttributesCheck(attrs); err != nil {
t.Error(err)
}
// Verify Reader.As.
r, err := b.NewReader(ctx, key, &blob.ReaderOptions{BeforeRead: st.BeforeRead})
if err != nil {
t.Fatal(err)
}
defer r.Close()
if err := st.ReaderCheck(r); err != nil {
t.Error(err)
}
// Verify ListObject.As for the directory.
iter := b.List(&blob.ListOptions{Prefix: dir, Delimiter: "/", BeforeList: st.BeforeList})
found := false
for {
obj, err := iter.Next(ctx)
if err == io.EOF {
break
}
if found {
t.Fatal("got a second object returned from List, only wanted one")
}
found = true
if err != nil {
log.Fatal(err)
}
if err := st.ListObjectCheck(obj); err != nil {
t.Error(err)
}
}
// Verify ListObject.As for the blob.
iter = b.List(&blob.ListOptions{Prefix: key, BeforeList: st.BeforeList})
found = false
for {
obj, err := iter.Next(ctx)
if err == io.EOF {
break
}
if found {
t.Fatal("got a second object returned from List, only wanted one")
}
found = true
if err != nil {
log.Fatal(err)
}
if err := st.ListObjectCheck(obj); err != nil {
t.Error(err)
}
}
_, gotErr := b.NewReader(ctx, "key-does-not-exist", nil)
if gotErr == nil {
t.Fatalf("got nil error from NewReader for nonexistent key, want an error")
}
if err := st.ErrorCheck(b, gotErr); err != nil {
t.Error(err)
}
// Copy the blob, using the provided callback.
if err := b.Copy(ctx, copyKey, key, &blob.CopyOptions{BeforeCopy: st.BeforeCopy}); err != nil {
t.Error(err)
} else {
defer func() { _ = b.Delete(ctx, copyKey) }()
}
}
func benchmarkRead(b *testing.B, bkt *blob.Bucket) {
ctx := context.Background()
const key = "readbenchmark-blob"
content := loadTestData(b, "test-large.jpg")
if err := bkt.WriteAll(ctx, key, content, nil); err != nil {
b.Fatal(err)
}
defer func() {
_ = bkt.Delete(ctx, key)
}()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
buf, err := bkt.ReadAll(ctx, key)
if err != nil {
b.Error(err)
}
if !bytes.Equal(buf, content) {
b.Error("read didn't match write")
}
}
})
}
func benchmarkWriteReadDelete(b *testing.B, bkt *blob.Bucket) {
ctx := context.Background()
const baseKey = "writereaddeletebenchmark-blob-"
content := loadTestData(b, "test-large.jpg")
var nextID uint32
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
key := fmt.Sprintf("%s%d", baseKey, atomic.AddUint32(&nextID, 1))
for pb.Next() {
if err := bkt.WriteAll(ctx, key, content, nil); err != nil {
b.Error(err)
continue
}
buf, err := bkt.ReadAll(ctx, key)
if err != nil {
b.Error(err)
}
if !bytes.Equal(buf, content) {
b.Error("read didn't match write")
}
if err := bkt.Delete(ctx, key); err != nil {
b.Error(err)
continue
}
}
})
}
| 1 | 19,785 | The "good" and "bad" descriptions confused me a bit. Maybe `allowedContentType` and `disallowedContentType`? Not sure if that's better. | google-go-cloud | go |
@@ -273,7 +273,7 @@ public class HttpCommandExecutor implements CommandExecutor, NeedsLocalLogs {
}
if (!GET_ALL_SESSIONS.equals(command.getName())
&& !NEW_SESSION.equals(command.getName())) {
- throw new SessionNotFoundException("Session ID is null");
+ throw new SessionNotFoundException("Session ID is null. Using WebDriver after calling quit()?");
}
}
| 1 | /*
Copyright 2007-2011 Selenium committers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.openqa.selenium.remote;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.NoHttpResponseException;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.params.HttpClientParams;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.CoreConnectionPNames;
import org.apache.http.params.HttpParams;
import org.apache.http.protocol.BasicHttpContext;
import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils;
import org.openqa.selenium.UnsupportedCommandException;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.logging.LocalLogs;
import org.openqa.selenium.logging.LogEntry;
import org.openqa.selenium.logging.LogType;
import org.openqa.selenium.logging.NeedsLocalLogs;
import org.openqa.selenium.logging.profiler.HttpProfilerLogEntry;
import org.openqa.selenium.remote.internal.HttpClientFactory;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.BindException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.Map;
import static org.apache.http.protocol.ExecutionContext.HTTP_TARGET_HOST;
import static org.openqa.selenium.remote.DriverCommand.*;
public class HttpCommandExecutor implements CommandExecutor, NeedsLocalLogs {
private static final int MAX_REDIRECTS = 10;
private final HttpHost targetHost;
private final URL remoteServer;
private final Map<String, CommandInfo> nameToUrl;
private final HttpClient client;
private final ErrorCodes errorCodes = new ErrorCodes();
private static HttpClientFactory httpClientFactory;
private LocalLogs logs = LocalLogs.getNullLogger();
public HttpCommandExecutor(URL addressOfRemoteServer) {
this(ImmutableMap.<String, CommandInfo>of(), addressOfRemoteServer);
}
public HttpCommandExecutor(Map<String, CommandInfo> additionalCommands, URL addressOfRemoteServer) {
try {
remoteServer = addressOfRemoteServer == null ?
new URL(System.getProperty("webdriver.remote.server", "http://localhost:4444/wd/hub")) :
addressOfRemoteServer;
} catch (MalformedURLException e) {
throw new WebDriverException(e);
}
HttpParams params = new BasicHttpParams();
// Use the JRE default for the socket linger timeout.
params.setParameter(CoreConnectionPNames.SO_LINGER, -1);
HttpClientParams.setRedirecting(params, false);
synchronized (HttpCommandExecutor.class) {
if (httpClientFactory == null) {
httpClientFactory = new HttpClientFactory();
}
}
client = httpClientFactory.getHttpClient();
if (addressOfRemoteServer != null && addressOfRemoteServer.getUserInfo() != null) {
// Use HTTP Basic auth
UsernamePasswordCredentials credentials = new
UsernamePasswordCredentials(addressOfRemoteServer.getUserInfo());
((DefaultHttpClient) client).getCredentialsProvider().
setCredentials(AuthScope.ANY, credentials);
}
// Some machines claim "localhost.localdomain" is the same as "localhost".
// This assumption is not always true.
String host = remoteServer.getHost().replace(".localdomain", "");
targetHost = new HttpHost(
host, remoteServer.getPort(), remoteServer.getProtocol());
ImmutableMap.Builder<String, CommandInfo> builder = ImmutableMap.builder();
for (Map.Entry<String, CommandInfo> entry : additionalCommands.entrySet()) {
builder.put(entry.getKey(), entry.getValue());
}
builder
.put(GET_ALL_SESSIONS, get("/sessions"))
.put(NEW_SESSION, post("/session"))
.put(GET_CAPABILITIES, get("/session/:sessionId"))
.put(QUIT, delete("/session/:sessionId"))
.put(GET_CURRENT_WINDOW_HANDLE, get("/session/:sessionId/window_handle"))
.put(GET_WINDOW_HANDLES, get("/session/:sessionId/window_handles"))
.put(GET, post("/session/:sessionId/url"))
// The Alert API is still experimental and should not be used.
.put(GET_ALERT, get("/session/:sessionId/alert"))
.put(DISMISS_ALERT, post("/session/:sessionId/dismiss_alert"))
.put(ACCEPT_ALERT, post("/session/:sessionId/accept_alert"))
.put(GET_ALERT_TEXT, get("/session/:sessionId/alert_text"))
.put(SET_ALERT_VALUE, post("/session/:sessionId/alert_text"))
.put(GO_FORWARD, post("/session/:sessionId/forward"))
.put(GO_BACK, post("/session/:sessionId/back"))
.put(REFRESH, post("/session/:sessionId/refresh"))
.put(EXECUTE_SCRIPT, post("/session/:sessionId/execute"))
.put(EXECUTE_ASYNC_SCRIPT, post("/session/:sessionId/execute_async"))
.put(GET_CURRENT_URL, get("/session/:sessionId/url"))
.put(GET_TITLE, get("/session/:sessionId/title"))
.put(GET_PAGE_SOURCE, get("/session/:sessionId/source"))
.put(SCREENSHOT, get("/session/:sessionId/screenshot"))
.put(SET_BROWSER_VISIBLE, post("/session/:sessionId/visible"))
.put(IS_BROWSER_VISIBLE, get("/session/:sessionId/visible"))
.put(FIND_ELEMENT, post("/session/:sessionId/element"))
.put(FIND_ELEMENTS, post("/session/:sessionId/elements"))
.put(GET_ACTIVE_ELEMENT, post("/session/:sessionId/element/active"))
.put(FIND_CHILD_ELEMENT, post("/session/:sessionId/element/:id/element"))
.put(FIND_CHILD_ELEMENTS, post("/session/:sessionId/element/:id/elements"))
.put(CLICK_ELEMENT, post("/session/:sessionId/element/:id/click"))
.put(CLEAR_ELEMENT, post("/session/:sessionId/element/:id/clear"))
.put(SUBMIT_ELEMENT, post("/session/:sessionId/element/:id/submit"))
.put(GET_ELEMENT_TEXT, get("/session/:sessionId/element/:id/text"))
.put(SEND_KEYS_TO_ELEMENT, post("/session/:sessionId/element/:id/value"))
.put(UPLOAD_FILE, post("/session/:sessionId/file"))
.put(GET_ELEMENT_VALUE, get("/session/:sessionId/element/:id/value"))
.put(GET_ELEMENT_TAG_NAME, get("/session/:sessionId/element/:id/name"))
.put(IS_ELEMENT_SELECTED, get("/session/:sessionId/element/:id/selected"))
.put(IS_ELEMENT_ENABLED, get("/session/:sessionId/element/:id/enabled"))
.put(IS_ELEMENT_DISPLAYED, get("/session/:sessionId/element/:id/displayed"))
.put(HOVER_OVER_ELEMENT, post("/session/:sessionId/element/:id/hover"))
.put(GET_ELEMENT_LOCATION, get("/session/:sessionId/element/:id/location"))
.put(GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW,
get("/session/:sessionId/element/:id/location_in_view"))
.put(GET_ELEMENT_SIZE, get("/session/:sessionId/element/:id/size"))
.put(GET_ELEMENT_ATTRIBUTE, get("/session/:sessionId/element/:id/attribute/:name"))
.put(ELEMENT_EQUALS, get("/session/:sessionId/element/:id/equals/:other"))
.put(GET_ALL_COOKIES, get("/session/:sessionId/cookie"))
.put(ADD_COOKIE, post("/session/:sessionId/cookie"))
.put(DELETE_ALL_COOKIES, delete("/session/:sessionId/cookie"))
.put(DELETE_COOKIE, delete("/session/:sessionId/cookie/:name"))
.put(SWITCH_TO_FRAME, post("/session/:sessionId/frame"))
.put(SWITCH_TO_WINDOW, post("/session/:sessionId/window"))
.put(GET_WINDOW_SIZE, get("/session/:sessionId/window/:windowHandle/size"))
.put(GET_WINDOW_POSITION, get("/session/:sessionId/window/:windowHandle/position"))
.put(SET_WINDOW_SIZE, post("/session/:sessionId/window/:windowHandle/size"))
.put(SET_WINDOW_POSITION, post("/session/:sessionId/window/:windowHandle/position"))
.put(MAXIMIZE_WINDOW, post("/session/:sessionId/window/:windowHandle/maximize"))
.put(CLOSE, delete("/session/:sessionId/window"))
.put(DRAG_ELEMENT, post("/session/:sessionId/element/:id/drag"))
.put(GET_ELEMENT_VALUE_OF_CSS_PROPERTY,
get("/session/:sessionId/element/:id/css/:propertyName"))
.put(IMPLICITLY_WAIT, post("/session/:sessionId/timeouts/implicit_wait"))
.put(SET_SCRIPT_TIMEOUT, post("/session/:sessionId/timeouts/async_script"))
.put(SET_TIMEOUT, post("/session/:sessionId/timeouts"))
.put(EXECUTE_SQL, post("/session/:sessionId/execute_sql"))
.put(GET_LOCATION, get("/session/:sessionId/location"))
.put(SET_LOCATION, post("/session/:sessionId/location"))
.put(GET_APP_CACHE_STATUS, get("/session/:sessionId/application_cache/status"))
.put(IS_BROWSER_ONLINE, get("/session/:sessionId/browser_connection"))
.put(SET_BROWSER_ONLINE, post("/session/:sessionId/browser_connection"))
// TODO (user): Would it be better to combine this command with
// GET_LOCAL_STORAGE_SIZE?
.put(GET_LOCAL_STORAGE_ITEM, get("/session/:sessionId/local_storage/key/:key"))
.put(REMOVE_LOCAL_STORAGE_ITEM, delete("/session/:sessionId/local_storage/key/:key"))
.put(GET_LOCAL_STORAGE_KEYS, get("/session/:sessionId/local_storage"))
.put(SET_LOCAL_STORAGE_ITEM, post("/session/:sessionId/local_storage"))
.put(CLEAR_LOCAL_STORAGE, delete("/session/:sessionId/local_storage"))
.put(GET_LOCAL_STORAGE_SIZE, get("/session/:sessionId/local_storage/size"))
// TODO (user): Would it be better to combine this command with
// GET_SESSION_STORAGE_SIZE?
.put(GET_SESSION_STORAGE_ITEM, get("/session/:sessionId/session_storage/key/:key"))
.put(REMOVE_SESSION_STORAGE_ITEM, delete("/session/:sessionId/session_storage/key/:key"))
.put(GET_SESSION_STORAGE_KEYS, get("/session/:sessionId/session_storage"))
.put(SET_SESSION_STORAGE_ITEM, post("/session/:sessionId/session_storage"))
.put(CLEAR_SESSION_STORAGE, delete("/session/:sessionId/session_storage"))
.put(GET_SESSION_STORAGE_SIZE, get("/session/:sessionId/session_storage/size"))
.put(GET_SCREEN_ORIENTATION, get("/session/:sessionId/orientation"))
.put(SET_SCREEN_ORIENTATION, post("/session/:sessionId/orientation"))
// Interactions-related commands.
.put(CLICK, post("/session/:sessionId/click"))
.put(DOUBLE_CLICK, post("/session/:sessionId/doubleclick"))
.put(MOUSE_DOWN, post("/session/:sessionId/buttondown"))
.put(MOUSE_UP, post("/session/:sessionId/buttonup"))
.put(MOVE_TO, post("/session/:sessionId/moveto"))
.put(SEND_KEYS_TO_ACTIVE_ELEMENT, post("/session/:sessionId/keys"))
// IME related commands.
.put(IME_GET_AVAILABLE_ENGINES, get("/session/:sessionId/ime/available_engines"))
.put(IME_GET_ACTIVE_ENGINE, get("/session/:sessionId/ime/active_engine"))
.put(IME_IS_ACTIVATED, get("/session/:sessionId/ime/activated"))
.put(IME_DEACTIVATE, post("/session/:sessionId/ime/deactivate"))
.put(IME_ACTIVATE_ENGINE, post("/session/:sessionId/ime/activate"))
// Advanced Touch API commands
// TODO(berrada): Refactor single tap with mouse click.
.put(TOUCH_SINGLE_TAP, post("/session/:sessionId/touch/click"))
.put(TOUCH_DOWN, post("/session/:sessionId/touch/down"))
.put(TOUCH_UP, post("/session/:sessionId/touch/up"))
.put(TOUCH_MOVE, post("/session/:sessionId/touch/move"))
.put(TOUCH_SCROLL, post("/session/:sessionId/touch/scroll"))
.put(TOUCH_DOUBLE_TAP, post("/session/:sessionId/touch/doubleclick"))
.put(TOUCH_LONG_PRESS, post("/session/:sessionId/touch/longclick"))
.put(TOUCH_FLICK, post("/session/:sessionId/touch/flick"))
.put(GET_LOG, post("/session/:sessionId/log"))
.put(GET_AVAILABLE_LOG_TYPES, get("/session/:sessionId/log/types"))
.put(STATUS, get("/status"));
nameToUrl = builder.build();
}
public void setLocalLogs(LocalLogs logs) {
this.logs = logs;
}
private void log(String logType, LogEntry entry) {
logs.addEntry(logType, entry);
}
public URL getAddressOfRemoteServer() {
return remoteServer;
}
public Response execute(Command command) throws IOException {
HttpContext context = new BasicHttpContext();
if (command.getSessionId() == null) {
if (QUIT.equals(command.getName())) {
return new Response();
}
if (!GET_ALL_SESSIONS.equals(command.getName())
&& !NEW_SESSION.equals(command.getName())) {
throw new SessionNotFoundException("Session ID is null");
}
}
CommandInfo info = nameToUrl.get(command.getName());
try {
HttpUriRequest httpMethod = info.getMethod(remoteServer, command);
setAcceptHeader(httpMethod);
if (httpMethod instanceof HttpPost) {
String payload = new BeanToJsonConverter().convert(command.getParameters());
((HttpPost) httpMethod).setEntity(new StringEntity(payload, "utf-8"));
httpMethod.addHeader("Content-Type", "application/json; charset=utf-8");
}
// Do not allow web proxy caches to cache responses to "get" commands
if (httpMethod instanceof HttpGet) {
httpMethod.addHeader("Cache-Control", "no-cache");
}
log(LogType.PROFILER, new HttpProfilerLogEntry(command.getName(), true));
HttpResponse response = fallBackExecute(context, httpMethod);
log(LogType.PROFILER, new HttpProfilerLogEntry(command.getName(), false));
response = followRedirects(client, context, response, /* redirect count */0);
final EntityWithEncoding entityWithEncoding = new EntityWithEncoding(response.getEntity());
return createResponse(response, context, entityWithEncoding);
} catch (UnsupportedCommandException e) {
if (e.getMessage() == null || "".equals(e.getMessage())) {
throw new UnsupportedOperationException(
"No information from server. Command name was: " + command.getName(),
e.getCause());
}
throw e;
}
}
private HttpResponse fallBackExecute(HttpContext context, HttpUriRequest httpMethod)
throws IOException {
try {
return client.execute(targetHost, httpMethod, context);
} catch (BindException e) {
// If we get this, there's a chance we've used all the local ephemeral sockets
// Sleep for a bit to let the OS reclaim them, then try the request again.
try {
Thread.sleep(2000);
} catch (InterruptedException ie) {
throw Throwables.propagate(ie);
}
} catch (NoHttpResponseException e) {
// If we get this, there's a chance we've used all the remote ephemeral sockets
// Sleep for a bit to let the OS reclaim them, then try the request again.
try {
Thread.sleep(2000);
} catch (InterruptedException ie) {
throw Throwables.propagate(ie);
}
}
return client.execute(targetHost, httpMethod, context);
}
private void setAcceptHeader(HttpUriRequest httpMethod) {
httpMethod.addHeader("Accept", "application/json, image/png");
}
private HttpResponse followRedirects(
HttpClient client, HttpContext context, HttpResponse response, int redirectCount) {
if (!isRedirect(response)) {
return response;
}
try {
// Make sure that the previous connection is freed.
HttpEntity httpEntity = response.getEntity();
if (httpEntity != null) {
EntityUtils.consume(httpEntity);
}
} catch (IOException e) {
throw new WebDriverException(e);
}
if (redirectCount > MAX_REDIRECTS) {
throw new WebDriverException("Maximum number of redirects exceeded. Aborting");
}
String location = response.getFirstHeader("location").getValue();
URI uri;
try {
uri = buildUri(context, location);
HttpGet get = new HttpGet(uri);
setAcceptHeader(get);
HttpResponse newResponse = client.execute(targetHost, get, context);
return followRedirects(client, context, newResponse, redirectCount + 1);
} catch (URISyntaxException e) {
throw new WebDriverException(e);
} catch (ClientProtocolException e) {
throw new WebDriverException(e);
} catch (IOException e) {
throw new WebDriverException(e);
}
}
private URI buildUri(HttpContext context, String location) throws URISyntaxException {
URI uri;
uri = new URI(location);
if (!uri.isAbsolute()) {
HttpHost host = (HttpHost) context.getAttribute(HTTP_TARGET_HOST);
uri = new URI(host.toURI() + location);
}
return uri;
}
private boolean isRedirect(HttpResponse response) {
int code = response.getStatusLine().getStatusCode();
return (code == 301 || code == 302 || code == 303 || code == 307)
&& response.containsHeader("location");
}
class EntityWithEncoding {
private final String charSet;
private final byte[] content;
EntityWithEncoding(HttpEntity entity) throws IOException {
try {
if (entity != null) {
content = EntityUtils.toByteArray(entity);
Charset entityCharset = ContentType.getOrDefault(entity).getCharset();
charSet = entityCharset != null ? entityCharset.name() : null;
} else {
content = new byte[0];
charSet = null;
}
} finally {
EntityUtils.consume(entity);
}
}
public String getContentString()
throws UnsupportedEncodingException {
return new String(content, charSet != null ? charSet : "utf-8");
}
public byte[] getContent() {
return content;
}
public boolean hasEntityContent() {
return content != null;
}
}
private Response createResponse(HttpResponse httpResponse, HttpContext context,
EntityWithEncoding entityWithEncoding) throws IOException {
final Response response;
Header header = httpResponse.getFirstHeader("Content-Type");
if (header != null && header.getValue().startsWith("application/json")) {
String responseAsText = entityWithEncoding.getContentString();
try {
response = new JsonToBeanConverter().convert(Response.class, responseAsText);
} catch (ClassCastException e) {
if (responseAsText != null && "".equals(responseAsText)) {
// The remote server has died, but has already set some headers.
// Normally this occurs when the final window of the firefox driver
// is closed on OS X. Return null, as the return value _should_ be
// being ignored. This is not an elegant solution.
return null;
}
throw new WebDriverException("Cannot convert text to response: " + responseAsText, e);
}
} else {
response = new Response();
if (header != null && header.getValue().startsWith("image/png")) {
response.setValue(entityWithEncoding.getContent());
} else if (entityWithEncoding.hasEntityContent()) {
response.setValue(entityWithEncoding.getContentString());
}
HttpHost finalHost = (HttpHost) context.getAttribute(HTTP_TARGET_HOST);
String uri = finalHost.toURI();
String sessionId = HttpSessionId.getSessionId(uri);
if (sessionId != null) {
response.setSessionId(sessionId);
}
int statusCode = httpResponse.getStatusLine().getStatusCode();
if (!(statusCode > 199 && statusCode < 300)) {
// 4xx represents an unknown command or a bad request.
if (statusCode > 399 && statusCode < 500) {
response.setStatus(ErrorCodes.UNKNOWN_COMMAND);
} else if (statusCode > 499 && statusCode < 600) {
// 5xx represents an internal server error. The response status should already be set, but
// if not, set it to a general error code.
if (response.getStatus() == ErrorCodes.SUCCESS) {
response.setStatus(ErrorCodes.UNHANDLED_ERROR);
}
} else {
response.setStatus(ErrorCodes.UNHANDLED_ERROR);
}
}
if (response.getValue() instanceof String) {
// We normalise to \n because Java will translate this to \r\n
// if this is suitable on our platform, and if we have \r\n, java will
// turn this into \r\r\n, which would be Bad!
response.setValue(((String) response.getValue()).replace("\r\n", "\n"));
}
}
response.setState(errorCodes.toState(response.getStatus()));
return response;
}
private static CommandInfo get(String url) {
return new CommandInfo(url, HttpVerb.GET);
}
private static CommandInfo post(String url) {
return new CommandInfo(url, HttpVerb.POST);
}
private static CommandInfo delete(String url) {
return new CommandInfo(url, HttpVerb.DELETE);
}
}
| 1 | 10,691 | It would be better to just change RWD to throw IllegalStateException if you attempt to execute a command after quit (unless it's a second call to quit()) | SeleniumHQ-selenium | py |
@@ -106,11 +106,11 @@ func TestServerRoutesWithAuthAndBCrypt(t *testing.T) {
srvB := RunServer(optsB)
defer srvB.Shutdown()
- urlA := fmt.Sprintf("nats://%s:%d/", optsA.Host, optsA.Port)
- urlB := fmt.Sprintf("nats://%s:%d/", optsB.Host, optsB.Port)
+ urlA := fmt.Sprintf("nats://%s:%s@%s:%d/", optsA.Username, optsA.Password, optsA.Host, optsA.Port)
+ urlB := fmt.Sprintf("nats://%s:%s@%s:%d/", optsB.Username, optsB.Password, optsB.Host, optsB.Port)
// Wait for route to form.
- time.Sleep(250 * time.Millisecond)
+ time.Sleep(4 * time.Second)
nc1, err := nats.Connect(urlA)
if err != nil { | 1 | // Copyright 2013-2016 Apcera Inc. All rights reserved.
package server
import (
"fmt"
"net"
"net/url"
"reflect"
"strconv"
"testing"
"time"
"github.com/nats-io/go-nats"
)
func TestRouteConfig(t *testing.T) {
opts, err := ProcessConfigFile("./configs/cluster.conf")
if err != nil {
t.Fatalf("Received an error reading route config file: %v\n", err)
}
golden := &Options{
Host: "localhost",
Port: 4242,
Username: "derek",
Password: "bella",
AuthTimeout: 1.0,
Cluster: ClusterOpts{
Host: "127.0.0.1",
Port: 4244,
Username: "route_user",
Password: "top_secret",
AuthTimeout: 1.0,
NoAdvertise: true,
ConnectRetries: 2,
},
LogFile: "/tmp/nats_cluster_test.log",
PidFile: "/tmp/nats_cluster_test.pid",
}
// Setup URLs
r1, _ := url.Parse("nats-route://foo:bar@localhost:4245")
r2, _ := url.Parse("nats-route://foo:bar@localhost:4246")
golden.Routes = []*url.URL{r1, r2}
if !reflect.DeepEqual(golden, opts) {
t.Fatalf("Options are incorrect.\nexpected: %+v\ngot: %+v",
golden, opts)
}
}
func TestServerRoutesWithClients(t *testing.T) {
optsA, _ := ProcessConfigFile("./configs/srv_a.conf")
optsB, _ := ProcessConfigFile("./configs/srv_b.conf")
optsA.NoSigs, optsA.NoLog = true, true
optsB.NoSigs, optsB.NoLog = true, true
srvA := RunServer(optsA)
defer srvA.Shutdown()
urlA := fmt.Sprintf("nats://%s:%d/", optsA.Host, optsA.Port)
urlB := fmt.Sprintf("nats://%s:%d/", optsB.Host, optsB.Port)
nc1, err := nats.Connect(urlA)
if err != nil {
t.Fatalf("Error creating client: %v\n", err)
}
defer nc1.Close()
ch := make(chan bool)
sub, _ := nc1.Subscribe("foo", func(m *nats.Msg) { ch <- true })
nc1.QueueSubscribe("foo", "bar", func(m *nats.Msg) {})
nc1.Publish("foo", []byte("Hello"))
// Wait for message
<-ch
sub.Unsubscribe()
srvB := RunServer(optsB)
defer srvB.Shutdown()
// Wait for route to form.
time.Sleep(250 * time.Millisecond)
nc2, err := nats.Connect(urlB)
if err != nil {
t.Fatalf("Error creating client: %v\n", err)
}
defer nc2.Close()
nc2.Publish("foo", []byte("Hello"))
nc2.Flush()
}
func TestServerRoutesWithAuthAndBCrypt(t *testing.T) {
optsA, _ := ProcessConfigFile("./configs/srv_a_bcrypt.conf")
optsB, _ := ProcessConfigFile("./configs/srv_b_bcrypt.conf")
optsA.NoSigs, optsA.NoLog = true, true
optsB.NoSigs, optsB.NoLog = true, true
srvA := RunServer(optsA)
defer srvA.Shutdown()
srvB := RunServer(optsB)
defer srvB.Shutdown()
urlA := fmt.Sprintf("nats://%s:%d/", optsA.Host, optsA.Port)
urlB := fmt.Sprintf("nats://%s:%d/", optsB.Host, optsB.Port)
// Wait for route to form.
time.Sleep(250 * time.Millisecond)
nc1, err := nats.Connect(urlA)
if err != nil {
t.Fatalf("Error creating client: %v\n", err)
}
defer nc1.Close()
// Test that we are connected.
ch := make(chan bool)
sub, _ := nc1.Subscribe("foo", func(m *nats.Msg) { ch <- true })
nc1.Flush()
defer sub.Unsubscribe()
nc2, err := nats.Connect(urlB)
if err != nil {
t.Fatalf("Error creating client: %v\n", err)
}
defer nc2.Close()
nc2.Publish("foo", []byte("Hello"))
// Wait for message
select {
case <-ch:
case <-time.After(2 * time.Second):
t.Fatal("Timeout waiting for message across route")
}
}
// Helper function to check that a cluster is formed
func checkClusterFormed(t *testing.T, servers ...*Server) {
// Wait for the cluster to form
var err string
expectedNumRoutes := len(servers) - 1
maxTime := time.Now().Add(5 * time.Second)
for time.Now().Before(maxTime) {
err = ""
for _, s := range servers {
if numRoutes := s.NumRoutes(); numRoutes != expectedNumRoutes {
err = fmt.Sprintf("Expected %d routes for server %q, got %d", expectedNumRoutes, s.ID(), numRoutes)
break
}
}
if err != "" {
time.Sleep(100 * time.Millisecond)
} else {
break
}
}
if err != "" {
t.Fatalf("%s", err)
}
}
// Helper function to generate next opts to make sure no port conflicts etc.
func nextServerOpts(opts *Options) *Options {
nopts := *opts
nopts.Port++
nopts.Cluster.Port++
nopts.HTTPPort++
return &nopts
}
func TestSeedSolicitWorks(t *testing.T) {
optsSeed, _ := ProcessConfigFile("./configs/seed.conf")
optsSeed.NoSigs, optsSeed.NoLog = true, true
srvSeed := RunServer(optsSeed)
defer srvSeed.Shutdown()
optsA := nextServerOpts(optsSeed)
optsA.Routes = RoutesFromStr(fmt.Sprintf("nats://%s:%d", optsSeed.Cluster.Host, optsSeed.Cluster.Port))
srvA := RunServer(optsA)
defer srvA.Shutdown()
urlA := fmt.Sprintf("nats://%s:%d/", optsA.Host, optsA.Port)
nc1, err := nats.Connect(urlA)
if err != nil {
t.Fatalf("Error creating client: %v\n", err)
}
defer nc1.Close()
// Test that we are connected.
ch := make(chan bool)
nc1.Subscribe("foo", func(m *nats.Msg) { ch <- true })
nc1.Flush()
optsB := nextServerOpts(optsA)
optsB.Routes = RoutesFromStr(fmt.Sprintf("nats://%s:%d", optsSeed.Cluster.Host, optsSeed.Cluster.Port))
srvB := RunServer(optsB)
defer srvB.Shutdown()
urlB := fmt.Sprintf("nats://%s:%d/", optsB.Host, optsB.Port)
nc2, err := nats.Connect(urlB)
if err != nil {
t.Fatalf("Error creating client: %v\n", err)
}
defer nc2.Close()
checkClusterFormed(t, srvSeed, srvA, srvB)
nc2.Publish("foo", []byte("Hello"))
// Wait for message
select {
case <-ch:
case <-time.After(2 * time.Second):
t.Fatal("Timeout waiting for message across route")
}
}
func TestTLSSeedSolicitWorks(t *testing.T) {
optsSeed, _ := ProcessConfigFile("./configs/seed_tls.conf")
optsSeed.NoSigs, optsSeed.NoLog = true, true
srvSeed := RunServer(optsSeed)
defer srvSeed.Shutdown()
optsA := nextServerOpts(optsSeed)
optsA.Routes = RoutesFromStr(fmt.Sprintf("nats://%s:%d", optsSeed.Cluster.Host, optsSeed.Cluster.Port))
srvA := RunServer(optsA)
defer srvA.Shutdown()
urlA := fmt.Sprintf("nats://%s:%d/", optsA.Host, optsA.Port)
nc1, err := nats.Connect(urlA)
if err != nil {
t.Fatalf("Error creating client: %v\n", err)
}
defer nc1.Close()
// Test that we are connected.
ch := make(chan bool)
nc1.Subscribe("foo", func(m *nats.Msg) { ch <- true })
nc1.Flush()
optsB := nextServerOpts(optsA)
optsB.Routes = RoutesFromStr(fmt.Sprintf("nats://%s:%d", optsSeed.Cluster.Host, optsSeed.Cluster.Port))
srvB := RunServer(optsB)
defer srvB.Shutdown()
urlB := fmt.Sprintf("nats://%s:%d/", optsB.Host, optsB.Port)
nc2, err := nats.Connect(urlB)
if err != nil {
t.Fatalf("Error creating client: %v\n", err)
}
defer nc2.Close()
checkClusterFormed(t, srvSeed, srvA, srvB)
nc2.Publish("foo", []byte("Hello"))
// Wait for message
select {
case <-ch:
case <-time.After(2 * time.Second):
t.Fatal("Timeout waiting for message across route")
}
}
func TestChainedSolicitWorks(t *testing.T) {
optsSeed, _ := ProcessConfigFile("./configs/seed.conf")
optsSeed.NoSigs, optsSeed.NoLog = true, true
srvSeed := RunServer(optsSeed)
defer srvSeed.Shutdown()
optsA := nextServerOpts(optsSeed)
optsA.Routes = RoutesFromStr(fmt.Sprintf("nats://%s:%d", optsSeed.Cluster.Host, optsSeed.Cluster.Port))
srvA := RunServer(optsA)
defer srvA.Shutdown()
urlSeed := fmt.Sprintf("nats://%s:%d/", optsSeed.Host, optsSeed.Port)
nc1, err := nats.Connect(urlSeed)
if err != nil {
t.Fatalf("Error creating client: %v\n", err)
}
defer nc1.Close()
// Test that we are connected.
ch := make(chan bool)
nc1.Subscribe("foo", func(m *nats.Msg) { ch <- true })
nc1.Flush()
optsB := nextServerOpts(optsA)
// Server B connects to A
optsB.Routes = RoutesFromStr(fmt.Sprintf("nats://%s:%d", optsA.Cluster.Host, optsA.Cluster.Port))
srvB := RunServer(optsB)
defer srvB.Shutdown()
urlB := fmt.Sprintf("nats://%s:%d/", optsB.Host, optsB.Port)
nc2, err := nats.Connect(urlB)
if err != nil {
t.Fatalf("Error creating client: %v\n", err)
}
defer nc2.Close()
checkClusterFormed(t, srvSeed, srvA, srvB)
nc2.Publish("foo", []byte("Hello"))
// Wait for message
select {
case <-ch:
case <-time.After(2 * time.Second):
t.Fatal("Timeout waiting for message across route")
}
}
func TestTLSChainedSolicitWorks(t *testing.T) {
optsSeed, _ := ProcessConfigFile("./configs/seed_tls.conf")
optsSeed.NoSigs, optsSeed.NoLog = true, true
srvSeed := RunServer(optsSeed)
defer srvSeed.Shutdown()
optsA := nextServerOpts(optsSeed)
optsA.Routes = RoutesFromStr(fmt.Sprintf("nats://%s:%d", optsSeed.Cluster.Host, optsSeed.Cluster.Port))
srvA := RunServer(optsA)
defer srvA.Shutdown()
urlSeed := fmt.Sprintf("nats://%s:%d/", optsSeed.Host, optsSeed.Port)
nc1, err := nats.Connect(urlSeed)
if err != nil {
t.Fatalf("Error creating client: %v\n", err)
}
defer nc1.Close()
// Test that we are connected.
ch := make(chan bool)
nc1.Subscribe("foo", func(m *nats.Msg) { ch <- true })
nc1.Flush()
optsB := nextServerOpts(optsA)
// Server B connects to A
optsB.Routes = RoutesFromStr(fmt.Sprintf("nats://%s:%d", optsA.Cluster.Host, optsA.Cluster.Port))
srvB := RunServer(optsB)
defer srvB.Shutdown()
urlB := fmt.Sprintf("nats://%s:%d/", optsB.Host, optsB.Port)
nc2, err := nats.Connect(urlB)
if err != nil {
t.Fatalf("Error creating client: %v\n", err)
}
defer nc2.Close()
checkClusterFormed(t, srvSeed, srvA, srvB)
nc2.Publish("foo", []byte("Hello"))
// Wait for message
select {
case <-ch:
case <-time.After(2 * time.Second):
t.Fatal("Timeout waiting for message across route")
}
}
func TestRouteTLSHandshakeError(t *testing.T) {
optsSeed, _ := ProcessConfigFile("./configs/seed_tls.conf")
srvSeed := RunServer(optsSeed)
defer srvSeed.Shutdown()
opts := DefaultOptions
opts.Routes = RoutesFromStr(fmt.Sprintf("nats://%s:%d", optsSeed.Cluster.Host, optsSeed.Cluster.Port))
srv := RunServer(&opts)
defer srv.Shutdown()
time.Sleep(500 * time.Millisecond)
maxTime := time.Now().Add(1 * time.Second)
for time.Now().Before(maxTime) {
if srv.NumRoutes() > 0 {
time.Sleep(100 * time.Millisecond)
continue
}
break
}
if srv.NumRoutes() > 0 {
t.Fatal("Route should have failed")
}
}
func TestBlockedShutdownOnRouteAcceptLoopFailure(t *testing.T) {
opts := DefaultOptions
opts.Cluster.Host = "x.x.x.x"
opts.Cluster.Port = 7222
s := New(&opts)
go s.Start()
// Wait a second
time.Sleep(time.Second)
ch := make(chan bool)
go func() {
s.Shutdown()
ch <- true
}()
timeout := time.NewTimer(5 * time.Second)
select {
case <-ch:
return
case <-timeout.C:
t.Fatal("Shutdown did not complete")
}
}
func TestRouteUseIPv6(t *testing.T) {
opts := DefaultOptions
opts.Cluster.Host = "::"
opts.Cluster.Port = 6222
// I believe that there is no IPv6 support on Travis...
// Regardless, cannot have this test fail simply because IPv6 is disabled
// on the host.
hp := net.JoinHostPort(opts.Cluster.Host, strconv.Itoa(opts.Cluster.Port))
_, err := net.ResolveTCPAddr("tcp", hp)
if err != nil {
t.Skipf("Skipping this test since there is no IPv6 support on this host: %v", err)
}
s := RunServer(&opts)
defer s.Shutdown()
routeUp := false
timeout := time.Now().Add(5 * time.Second)
for time.Now().Before(timeout) && !routeUp {
// We know that the server is local and listening to
// all IPv6 interfaces. Try connect using IPv6 loopback.
if conn, err := net.Dial("tcp", "[::1]:6222"); err != nil {
// Travis seem to have the server actually listening to 0.0.0.0,
// so try with 127.0.0.1
if conn, err := net.Dial("tcp", "127.0.0.1:6222"); err != nil {
time.Sleep(time.Second)
continue
} else {
conn.Close()
}
} else {
conn.Close()
}
routeUp = true
}
if !routeUp {
t.Fatal("Server failed to start route accept loop")
}
}
func TestClientConnectToRoutePort(t *testing.T) {
opts := DefaultOptions
// Since client will first connect to the route listen port, set the
// cluster's Host to localhost so it works on Windows too, since on
// Windows, a client can't use 0.0.0.0 in a connect.
opts.Cluster.Host = "localhost"
opts.Cluster.NoAdvertise = true
s := RunServer(&opts)
defer s.Shutdown()
url := fmt.Sprintf("nats://%s:%d", opts.Cluster.Host, opts.Cluster.Port)
clientURL := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port)
// When connecting to the ROUTE port, the client library will receive the
// CLIENT port in the INFO protocol. This URL is added to the client's pool
// and will be tried after the initial connect failure. So all those
// nats.Connect() should succeed.
// The only reason for a failure would be if there are too many FDs in time-wait
// which would delay the creation of TCP connection. So keep the total of
// attempts rather small.
total := 10
for i := 0; i < total; i++ {
nc, err := nats.Connect(url)
if err != nil {
t.Fatalf("Unexepected error on connect: %v", err)
}
defer nc.Close()
if nc.ConnectedUrl() != clientURL {
t.Fatalf("Expected client to be connected to %v, got %v", clientURL, nc.ConnectedUrl())
}
}
}
| 1 | 6,888 | You may want to have a look at `func checkClusterFormed(t *testing.T, servers ...*Server)` in this file that I created some time ago. It will timeout at 5 seconds, but will return as soon as cluster is ready. | nats-io-nats-server | go |
@@ -662,8 +662,14 @@ static void push_cb (flux_t *h, flux_msg_handler_t *mh,
aggregate_sink (h, ag);
rc = 0;
done:
- if (flux_respond (h, msg, rc < 0 ? saved_errno : 0, NULL) < 0)
- flux_log_error (h, "aggregator.push: flux_respond");
+ if (rc < 0) {
+ if (flux_respond_error (h, msg, saved_errno, NULL) < 0)
+ flux_log_error (h, "aggregator.push: flux_respond_error");
+ }
+ else {
+ if (flux_respond (h, msg, NULL) < 0)
+ flux_log_error (h, "aggregator.push: flux_respond");
+ }
}
| 1 | /************************************************************\
* Copyright 2016 Lawrence Livermore National Security, LLC
* (c.f. AUTHORS, NOTICE.LLNS, COPYING)
*
* This file is part of the Flux resource manager framework.
* For details, see https://github.com/flux-framework.
*
* SPDX-License-Identifier: LGPL-3.0
\************************************************************/
/* aggregator.c - reduction based numerical aggreagator */
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <flux/core.h>
#include <czmq.h>
#include <jansson.h>
#include "src/common/libidset/idset.h"
struct aggregator {
flux_t *h;
uint32_t rank;
double default_timeout;
double timer_scale;
zhash_t *aggregates;
};
/*
* Single entry in an aggregate: a list of ids with a common value.
*/
struct aggregate_entry {
struct idset *ids;
json_t *value;
};
/*
* Representation of an aggregate. A unique kvs key, along with a
* list of aggregate entries as above. Each aggregate tracks its
* summary stats, current count and expected total of entries.
*/
struct aggregate {
struct aggregator *ctx; /* Pointer back to containing aggregator */
flux_watcher_t *tw; /* timeout watcher */
double timeout; /* timeout */
int sink_retries; /* number of times left to try to sink to kvs */
uint32_t fwd_count; /* forward at this many */
char *key; /* KVS key into which to sink the aggregate */
uint32_t count; /* count of current total entries */
uint32_t total; /* expected total entries (used for sink) */
zlist_t *entries; /* list of individual entries */
json_t *summary; /* optional summary stats for this aggregate */
};
static void aggregate_entry_destroy (struct aggregate_entry *ae)
{
if (ae) {
int saved_errno = errno;
idset_destroy (ae->ids);
free (ae);
errno = saved_errno;
}
}
static struct aggregate_entry * aggregate_entry_create (void)
{
struct aggregate_entry *ae = calloc (1, sizeof (*ae));
if (!ae)
return (NULL);
if (!(ae->ids = idset_create (0, IDSET_FLAG_AUTOGROW))) {
aggregate_entry_destroy (ae);
return (NULL);
}
return (ae);
}
/* Search this aggregates entries for a value. Return entry if found
*/
static struct aggregate_entry *
aggregate_entry_find (struct aggregate *ag, json_t *value)
{
struct aggregate_entry *ae = zlist_first (ag->entries);
while (ae) {
if (json_equal (ae->value, value))
return (ae);
ae = zlist_next (ag->entries);
}
return (NULL);
}
static int summarize_real (struct aggregate *ag, json_t *value)
{
double v = json_real_value (value);
double min, max;
if (!ag->summary) {
ag->summary = json_pack ("{s:f,s:f}", "min", v, "max", v);
return ag->summary ? 0 : -1;
}
if (json_unpack (ag->summary, "{s:F,s:F}", "min", &min, "max", &max) < 0) {
flux_log (ag->ctx->h, LOG_ERR, "summarize_real: json_unpack failed");
return (-1);
}
if (((max < v) && (json_object_set (ag->summary, "max", value) < 0))
|| ((min > v) && (json_object_set (ag->summary, "min", value) < 0))) {
flux_log_error (ag->ctx->h, "summarize_real: json_object_set");
return (-1);
}
return (0);
}
static int summarize_int (struct aggregate *ag, json_t *value)
{
int64_t v = json_integer_value (value);
int64_t min, max;
if (!ag->summary) {
ag->summary = json_pack ("{s:I,s:I}", "min", v, "max", v);
return ag->summary ? 0 : -1;
}
if (json_unpack (ag->summary, "{s:I,s:I}", "min", &min, "max", &max) < 0) {
flux_log_error (ag->ctx->h, "summarize_int: json_unpack");
return (-1);
}
if (((max < v) && (json_object_set (ag->summary, "max", value) < 0))
|| ((min > v) && (json_object_set (ag->summary, "min", value) < 0))) {
flux_log_error (ag->ctx->h, "summarize_int: json_object_set");
return (-1);
}
return (0);
}
static int aggregate_update_summary (struct aggregate *ag, json_t *value)
{
switch (json_typeof (value)) {
case JSON_INTEGER:
return summarize_int (ag, value);
case JSON_REAL:
return summarize_real (ag, value);
case JSON_STRING:
case JSON_OBJECT:
case JSON_ARRAY:
case JSON_TRUE:
case JSON_FALSE:
case JSON_NULL:
/* Currently no summary stats for these types */
return (0);
}
return (0);
}
/* Add a new aggregate entry to this aggregate.
* Update summary stats if update == true.
*/
static struct aggregate_entry *
aggregate_entry_add (struct aggregate *ag, json_t *value)
{
struct aggregate_entry *ae = aggregate_entry_create ();
if (ae) {
json_incref (value);
ae->value = value;
/* Update aggregate summary statistics on rank 0 only */
if (ag->ctx->rank == 0 && aggregate_update_summary (ag, value) < 0)
flux_log_error (ag->ctx->h, "aggregate_update_summary");
zlist_push (ag->entries, ae);
}
return (ae);
}
int add_string_to_idset (struct idset *idset, const char *s)
{
struct idset *nids;
unsigned int id;
int rc = -1;
if (!(nids = idset_decode (s)))
return (-1);
id = idset_first (nids);
while (id != IDSET_INVALID_ID) {
if (idset_set (idset, id) < 0)
goto done;
id = idset_next (nids, id);
}
rc = 0;
done:
idset_destroy (nids);
return rc;
}
/* Push a new (ids, value) pair onto aggregate `ag`.
* If an existing matching entry is found, add ids to its nodeset.
* o/w, add a new entry. In either case update current count with
* the number of `ids` added.
*/
static int aggregate_push (struct aggregate *ag, json_t *value, const char *ids)
{
int count;
struct aggregate_entry *ae = aggregate_entry_find (ag, value);
if ((ae == NULL) && !(ae = aggregate_entry_add (ag, value)))
return (-1);
count = idset_count (ae->ids);
if (add_string_to_idset (ae->ids, ids) < 0)
return (-1);
/* Update count */
ag->count += (idset_count (ae->ids) - count);
return (0);
}
/* Push JSON object of aggregate entries onto aggregate `ag`
*/
static int aggregate_push_json (struct aggregate *ag,
json_t *entries)
{
const char *ids;
json_t *val;
json_object_foreach (entries, ids, val) {
if (aggregate_push (ag, val, ids) < 0) {
flux_log_error (ag->ctx->h, "aggregate_push failed");
return (-1);
}
}
return (0);
}
static int set_json_object_new_idset_key (json_t *o, struct idset *key,
json_t *value)
{
char *s;
int rc;
if (!(s = idset_encode (key, IDSET_FLAG_RANGE | IDSET_FLAG_BRACKETS)))
return (-1);
rc = json_object_set_new (o, s, value);
free (s);
return (rc);
}
/* Return json object containing all "entries" from the current
* aggregate object `ag`
*/
static json_t *aggregate_entries_tojson (struct aggregate *ag)
{
struct aggregate_entry *ae;
json_t *entries = NULL;
if (!(entries = json_object ()))
return NULL;
ae = zlist_first (ag->entries);
while (ae) {
if (set_json_object_new_idset_key (entries, ae->ids, ae->value) < 0)
goto error;
ae = zlist_next (ag->entries);
}
return (entries);
error:
json_decref (entries);
return (NULL);
}
static void forward_continuation (flux_future_t *f, void *arg)
{
flux_t *h = flux_future_get_flux (f);
struct aggregate *ag = arg;
if (flux_rpc_get (f, NULL) < 0)
flux_log_error (h, "aggregator.push: key=%s", ag->key);
flux_future_destroy (f);
}
/*
* Forward aggregate `ag` upstream
*/
static int aggregate_forward (flux_t *h, struct aggregate *ag)
{
int rc = 0;
flux_future_t *f;
json_t *o = aggregate_entries_tojson (ag);
if (o == NULL) {
flux_log (h, LOG_ERR, "forward: aggregate_entries_tojson failed");
return (-1);
}
flux_log (h, LOG_DEBUG, "forward: %s: count=%d total=%d",
ag->key, ag->count, ag->total);
if (!(f = flux_rpc_pack (h, "aggregator.push", FLUX_NODEID_UPSTREAM, 0,
"{s:s,s:i,s:i,s:f,s:o}",
"key", ag->key,
"count", ag->count,
"total", ag->total,
"timeout", ag->timeout,
"entries", o)) ||
(flux_future_then (f, -1., forward_continuation, (void *) ag) < 0)) {
flux_log_error (h, "flux_rpc: aggregator.push");
flux_future_destroy (f);
rc = -1;
}
return (rc);
}
static void aggregate_sink_abort (flux_t *h, struct aggregate *ag)
{
flux_msg_t *msg = NULL;
char *topic = NULL;
flux_log (h, LOG_ERR, "sink: aborting aggregate %s\n", ag->key);
if ((asprintf (&topic, "aggregator.abort.%s", ag->key)) < 0) {
flux_log_error (h, "sink_abort: asprintf");
goto out;
}
if ((msg = flux_event_encode (topic, "{ }")) == NULL) {
flux_log_error (h, "flux_event_encode");
goto out;
}
if (flux_send (h, msg, 0) < 0)
flux_log_error (h, "flux_event_encode");
out:
free (topic);
flux_msg_destroy (msg);
}
static void aggregate_sink (flux_t *h, struct aggregate *ag);
static void aggregate_sink_again (flux_reactor_t *r, flux_watcher_t *w,
int revents, void *arg)
{
struct aggregate *ag = arg;
aggregate_sink (ag->ctx->h, ag);
flux_watcher_destroy (w);
}
static int sink_retry (flux_t *h, struct aggregate *ag)
{
flux_watcher_t *w;
double t = ag->timeout;
if (t <= 1e-3)
t = .250;
/* Return with error if we're out of retries */
if (--ag->sink_retries <= 0)
return (-1);
flux_log (h, LOG_DEBUG, "sink: %s: retry in %.3fs", ag->key, t);
w = flux_timer_watcher_create (flux_get_reactor (h),
t, 0.,
aggregate_sink_again,
(void *) ag);
if (w == NULL) {
flux_log_error (h, "sink_retry: flux_timer_watcher_create");
return (-1);
}
flux_watcher_start (w);
return (0);
}
static void sink_continuation (flux_future_t *f, void *arg)
{
flux_t *h = flux_future_get_flux (f);
struct aggregate *ag = arg;
int rc = flux_future_get (f, NULL);
flux_future_destroy (f);
if (rc < 0) {
/* Schedule a retry, if succesful return immediately, otherwise
* abort the current aggregate and remove it.
*/
if (sink_retry (h, ag) == 0)
return;
aggregate_sink_abort (h, ag);
}
zhash_delete (ag->ctx->aggregates, ag->key);
return;
}
static char *aggregate_to_string (struct aggregate *ag)
{
char *s = NULL;
const char *name;
json_t *val, *o;
json_t *entries = aggregate_entries_tojson (ag);
if (entries == NULL)
return (NULL);
o = json_pack ("{s:i,s:i,s:o}",
"total", ag->total,
"count", ag->count,
"entries", entries);
if (o == NULL)
return (NULL);
/* Encode summary stats at top level of json representation
* for backwards compatibility
*/
if (ag->summary) {
json_object_foreach (ag->summary, name, val)
json_object_set (o, name, val);
}
s = json_dumps (o, JSON_COMPACT);
json_decref (o);
return (s);
}
static void aggregate_sink (flux_t *h, struct aggregate *ag)
{
int rc = -1;
char *agstr = NULL;
flux_kvs_txn_t *txn = NULL;
flux_future_t *f = NULL;
flux_log (h, LOG_DEBUG, "sink: %s: count=%d total=%d",
ag->key, ag->count, ag->total);
/* Fail on key == "." */
if (strcmp (ag->key, ".") == 0) {
flux_log (h, LOG_ERR, "sink: refusing to sink to rootdir");
goto out;
}
if (!(agstr = aggregate_to_string (ag))) {
flux_log (h, LOG_ERR, "sink: aggregate_to_string failed");
goto out;
}
if (!(txn = flux_kvs_txn_create ())) {
flux_log_error (h, "sink: flux_kvs_txn_create");
goto out;
}
if (flux_kvs_txn_put (txn, 0, ag->key, agstr) < 0) {
flux_log_error (h, "sink: flux_kvs_txn_put");
goto out;
}
if (!(f = flux_kvs_commit (h, NULL, 0, txn))
|| flux_future_then (f, -1., sink_continuation, (void *)ag) < 0) {
flux_log_error (h, "sink: flux_kvs_commit");
flux_future_destroy (f);
goto out;
}
rc = 0;
out:
flux_kvs_txn_destroy (txn);
free (agstr);
if ((rc < 0) && (sink_retry (h, ag) < 0)) {
aggregate_sink_abort (h, ag);
zhash_delete (ag->ctx->aggregates, ag->key);
}
}
/*
* Flush aggregate `ag` -- forward entry upstream and destroy it locally.
*/
static int aggregate_flush (struct aggregate *ag)
{
flux_t *h = ag->ctx->h;
int rc;
assert (ag->ctx->rank != 0);
rc = aggregate_forward (h, ag);
zhash_delete (ag->ctx->aggregates, ag->key);
return (rc);
}
static void aggregate_destroy (struct aggregate *ag)
{
struct aggregate_entry *ae = zlist_first (ag->entries);
while (ae) {
aggregate_entry_destroy (ae);
ae = zlist_next (ag->entries);
}
zlist_destroy (&ag->entries);
json_decref (ag->summary);
flux_watcher_destroy (ag->tw);
free (ag->key);
free (ag);
}
static void timer_cb (flux_reactor_t *r, flux_watcher_t *tw,
int revents, void *arg)
{
struct aggregate *ag = arg;
flux_t *h = ag->ctx->h;
if (aggregate_flush (ag) < 0)
flux_log_error (h, "aggregate_flush");
}
static void aggregate_timer_start (struct aggregate *ag,
double timeout)
{
assert (ag && ag->ctx && ag->ctx->h);
struct aggregator *ctx = ag->ctx;
if (ctx->rank != 0) {
flux_t *h = ctx->h;
flux_reactor_t *r = flux_get_reactor (h);
ag->tw = flux_timer_watcher_create (r, timeout, 0.,
timer_cb, (void *) ag);
if (ag->tw == NULL) {
flux_log_error (h, "flux_timer_watcher_create");
return;
}
flux_watcher_start (ag->tw);
}
}
static struct aggregate *
aggregate_create (struct aggregator *ctx, const char *key)
{
flux_t *h = ctx->h;
struct aggregate *ag = calloc (1, sizeof (*ag));
if (ag == NULL)
return NULL;
ag->ctx = ctx;
if (!(ag->key = strdup (key)) || !(ag->entries = zlist_new ())) {
flux_log_error (h, "aggregate_create: memory allocation error");
aggregate_destroy (ag);
return (NULL);
}
ag->sink_retries = 2;
return (ag);
}
static void aggregator_destroy (struct aggregator *ctx)
{
if (ctx) {
zhash_destroy (&ctx->aggregates);
free (ctx);
}
}
static int attr_get_int (flux_t *h, const char *attr)
{
unsigned long l;
char *p;
const char *s = flux_attr_get (h, attr);
if (!s)
return (-1);
errno = 0;
l = strtoul (s, &p, 10);
if (*p != '\0' || errno != 0) {
flux_log_error (h, "flux_attr_get (%s) = %s", attr, s);
return (-1);
}
return (l);
}
static double timer_scale (flux_t *h)
{
long level, maxlevel;
if (((level = attr_get_int (h, "tbon.level")) < 0) ||
((maxlevel = attr_get_int (h, "tbon.maxlevel")) < 0)) {
return (1.);
}
return (maxlevel - level + 1.);
}
static struct aggregator * aggregator_create (flux_t *h)
{
struct aggregator * ctx = calloc (1, sizeof (*ctx));
if (ctx == NULL)
return (NULL);
ctx->h = h;
if (flux_get_rank (h, &ctx->rank) < 0) {
flux_log_error (h, "flux_get_rank");
goto error;
}
ctx->default_timeout = 0.01;
ctx->timer_scale = timer_scale (h);
if (!(ctx->aggregates = zhash_new ())) {
flux_log_error (h, "zhash_new");
goto error;
}
return (ctx);
error:
aggregator_destroy (ctx);
return (NULL);
}
/*
* Add a new aggregate to aggregator `ctx`. Insert into entries
* hash, start the aggregate timeout, scaled by the current
* aggregator timeout scale.
*/
static struct aggregate *
aggregator_new_aggregate (struct aggregator *ctx, const char *key,
int64_t total,
double timeout)
{
struct aggregate *ag = aggregate_create (ctx, key);
if (ag == NULL)
return (NULL);
if (zhash_insert (ctx->aggregates, key, ag) < 0) {
aggregate_destroy (ag);
return (NULL);
}
zhash_freefn (ctx->aggregates, key, (zhash_free_fn *) aggregate_destroy);
ag->timeout = timeout;
ag->total = total;
aggregate_timer_start (ag, timeout * ctx->timer_scale);
return (ag);
}
/*
* Callback for "aggregator.push"
*/
static void push_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg)
{
int rc = -1;
struct aggregator *ctx = arg;
struct aggregate *ag = NULL;
const char *key;
double timeout = ctx->default_timeout;
int64_t fwd_count = 0;
int64_t total = 0;
json_t *entries = NULL;
int saved_errno = 0;
if (flux_msg_unpack (msg, "{s:s,s:I,s:o,s?F,s?I}",
"key", &key,
"total", &total,
"entries", &entries,
"timeout", &timeout,
"fwd_count", &fwd_count) < 0) {
saved_errno = EPROTO;
goto done;
}
if (!(ag = zhash_lookup (ctx->aggregates, key)) &&
!(ag = aggregator_new_aggregate (ctx, key, total, timeout))) {
flux_log_error (ctx->h, "failed to get new aggregate");
saved_errno = errno;
goto done;
}
if (fwd_count > 0)
ag->fwd_count = fwd_count;
if ((rc = aggregate_push_json (ag, entries)) < 0) {
flux_log_error (h, "aggregate_push_json: failed");
goto done;
}
flux_log (ctx->h, LOG_DEBUG, "push: %s: count=%d fwd_count=%d total=%d",
ag->key, ag->count, ag->fwd_count, ag->total);
if (ctx->rank > 0) {
if ((ag->count == ag->total
|| ag->count == ag->fwd_count
|| timeout == 0.)
&& (rc = aggregate_flush (ag)))
goto done;
}
else if (ag->count == ag->total)
aggregate_sink (h, ag);
rc = 0;
done:
if (flux_respond (h, msg, rc < 0 ? saved_errno : 0, NULL) < 0)
flux_log_error (h, "aggregator.push: flux_respond");
}
static const struct flux_msg_handler_spec htab[] = {
//{ FLUX_MSGTYPE_EVENT, "hb", hb_cb, 0 },
{ FLUX_MSGTYPE_REQUEST, "aggregator.push", push_cb, 0 },
FLUX_MSGHANDLER_TABLE_END,
};
int mod_main (flux_t *h, int argc, char **argv)
{
int rc = -1;
flux_msg_handler_t **handlers = NULL;
struct aggregator *ctx = aggregator_create (h);
if (!ctx)
goto done;
if (flux_msg_handler_addvec (h, htab, ctx, &handlers) < 0) {
flux_log_error (h, "flux_msg_handler_advec");
goto done;
}
if (flux_reactor_run (flux_get_reactor (h), 0) < 0) {
flux_log_error (h, "flux_reactor_run");
goto done;
}
rc = 0;
done:
flux_msg_handler_delvec (handlers);
aggregator_destroy (ctx);
return rc;
}
MOD_NAME ("aggregator");
/*
* vi:tabstop=4 shiftwidth=4 expandtab
*/
| 1 | 23,214 | I see a few places where this blob of code is required due to the removal of `errnum` parameter from `flux_respond` -- trading 2 lines of code for 8. The improvement to the function seems like a good idea, but I wonder if we need a convenience macro or function to do it the old way? You went through and made all the changes, was it overall a net win? | flux-framework-flux-core | c |
@@ -23,9 +23,9 @@
*/
import {
appendNotificationsCount,
- sendAnalyticsTrackingEvent,
getQueryParameter,
} from './util/standalone';
+import { trackEvent } from './util/tracking';
// Set webpackPublicPath on-the-fly.
if ( global.googlesitekitAdminbar && global.googlesitekitAdminbar.publicPath ) { | 1 | /**
* Admin bar loader.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* eslint camelcase:[0] */
/**
* Internal dependencies
*/
import {
appendNotificationsCount,
sendAnalyticsTrackingEvent,
getQueryParameter,
} from './util/standalone';
// Set webpackPublicPath on-the-fly.
if ( global.googlesitekitAdminbar && global.googlesitekitAdminbar.publicPath ) {
// eslint-disable-next-line no-undef
__webpack_public_path__ = global.googlesitekitAdminbar.publicPath;
}
// Is adminbar scripts loaded?
let isAdminbarLoaded = false;
// Load adminbar script dynamically.
function loadAdminbarScripts() {
return import(
/* webpackChunkName: "chunk-googlesitekit-adminbar" */
'./googlesitekit-adminbar'
).then( ( GoogleSitekitAdminbar ) => {
return GoogleSitekitAdminbar;
} ).catch( () => {
return new Error( 'Site Kit: An error occurred while loading the Adminbar component files.' );
} );
}
function initAdminbar() {
loadAdminbarScripts().then( ( GoogleSitekitAdminbar ) => {
try {
// Initialize the adminbar.
GoogleSitekitAdminbar.init();
} catch ( error ) {
// eslint-disable-next-line no-console
console.error( 'Site Kit: An error occurred while loading the Adminbar components.' );
// Set adminbar to error-state.
document.getElementById( 'js-googlesitekit-adminbar' ).classList.add( 'googlesitekit-adminbar--has-error' );
}
// Remove the loading state.
document.getElementById( 'js-googlesitekit-adminbar' ).classList.remove( 'googlesitekit-adminbar--loading' );
} );
}
// Initialize the loader once the DOM is ready.
global.addEventListener( 'load', function() {
// Add event to Site Kit adminbar icon.
const adminbarIconTrigger = document.getElementById( 'wp-admin-bar-google-site-kit' );
let loadingGtag = false;
// Check if adminbarIconTrigger is an element.
if ( ! adminbarIconTrigger ) {
return;
}
// The total notifications count should always rely on local storage
// directly for external availability.
if ( ! global.localStorage ) {
return;
}
const count = global.localStorage.getItem( 'googlesitekit::total-notifications' ) || 0;
appendNotificationsCount( count );
const onViewAdminBarMenu = function() {
if ( isAdminbarLoaded ) {
return;
}
const { trackingID } = global._googlesitekitBase;
if ( ! trackingID ) {
return;
}
// Track the menu hover event.
if ( global.googlesitekitTrackingEnabled ) {
// Dynamically load the gtag script if not loaded.
if ( 'undefined' === typeof gtag && ! loadingGtag ) {
loadingGtag = true;
const gtagScript = document.createElement( 'script' );
gtagScript.type = 'text/javascript';
gtagScript.setAttribute( 'async', 'true' );
gtagScript.onload = function() {
global.gtag = function() {
global.dataLayer.push( arguments );
};
sendAnalyticsTrackingEvent( 'admin_bar', 'page_stats_view' );
};
gtagScript.setAttribute( 'src', `https://www.googletagmanager.com/gtag/js?id=${ trackingID }` );
document.head.appendChild( gtagScript );
} else {
sendAnalyticsTrackingEvent( 'admin_bar', 'page_stats_view' );
}
}
initAdminbar();
isAdminbarLoaded = true;
};
if ( 'true' === getQueryParameter( 'googlesitekit_adminbar_open' ) ) {
onViewAdminBarMenu();
adminbarIconTrigger.classList.add( 'hover' );
} else {
adminbarIconTrigger.addEventListener( 'mouseenter', onViewAdminBarMenu, false );
}
} );
| 1 | 26,125 | I know it's not related to your change, but it's a bit odd that this import uses a relative path rather than the ones setup by webpack. Might be nice to change this one in case we refactor those paths in the future; this one wouldn't get caught in any kind of find+replace. | google-site-kit-wp | js |
@@ -86,7 +86,8 @@ func (c *Config) Start() {
for _, schedule := range c.Scheduler.Schedules {
helper.ControllerWg.Add(1)
- go schedule.ExecuteSchedule(c.ActionManager.Actions, c.Converter.DataRead, c.Device.ID)
+ newSchedule := schedule
+ go newSchedule.ExecuteSchedule(c.ActionManager.Actions, c.Converter.DataRead, c.Device.ID)
}
helper.ControllerWg.Wait()
} | 1 | /*
Copyright 2019 The KubeEdge Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"encoding/json"
"strings"
MQTT "github.com/eclipse/paho.mqtt.golang"
"github.com/paypal/gatt"
"github.com/paypal/gatt/examples/option"
"k8s.io/klog"
"github.com/kubeedge/kubeedge/mappers/bluetooth_mapper/action_manager"
"github.com/kubeedge/kubeedge/mappers/bluetooth_mapper/configuration"
"github.com/kubeedge/kubeedge/mappers/bluetooth_mapper/data_converter"
"github.com/kubeedge/kubeedge/mappers/bluetooth_mapper/helper"
"github.com/kubeedge/kubeedge/mappers/bluetooth_mapper/scheduler"
"github.com/kubeedge/kubeedge/mappers/bluetooth_mapper/watcher"
)
// constants which can be used to convey topic information
const (
MapperTopicPrefix = "$ke/device/bluetooth-mapper/"
WatcherTopicSuffix = "/watcher/create"
SchedulerCreateTopicSuffix = "/scheduler/create"
SchedulerDeleteTopicSuffix = "/scheduler/delete"
ActionManagerCreateTopicSuffix = "/action-manager/create"
ActionManagerDeleteTopicSuffix = "/action-manager/delete"
)
var topicMap = make(map[string]MQTT.MessageHandler)
//Config contains the configuration used by the controller
type Config struct {
Mqtt configuration.Mqtt `yaml:"mqtt"`
Device configuration.Device `yaml:"device"`
Watcher watcher.Watcher `yaml:"watcher"`
Scheduler scheduler.Scheduler `yaml:"scheduler"`
ActionManager actionmanager.ActionManager `yaml:"action-manager"`
Converter dataconverter.Converter `yaml:"data-converter"`
}
// initTopicMap initializes topics to their respective handler functions
func (c *Config) initTopicMap() {
topicMap[MapperTopicPrefix+c.Device.ID+WatcherTopicSuffix] = c.handleWatchMessage
topicMap[MapperTopicPrefix+c.Device.ID+SchedulerCreateTopicSuffix] = c.handleScheduleCreateMessage
topicMap[MapperTopicPrefix+c.Device.ID+SchedulerDeleteTopicSuffix] = c.handleScheduleDeleteMessage
topicMap[MapperTopicPrefix+c.Device.ID+ActionManagerCreateTopicSuffix] = c.handleActionCreateMessage
topicMap[MapperTopicPrefix+c.Device.ID+ActionManagerDeleteTopicSuffix] = c.handleActionDeleteMessage
}
//Start starts the controller of the mapper
func (c *Config) Start() {
c.initTopicMap()
helper.MqttConnect(c.Mqtt.Mode, c.Mqtt.InternalServer, c.Mqtt.Server)
subscribeAllTopics()
helper.ControllerWg.Add(1)
device, err := gatt.NewDevice(option.DefaultClientOptions...)
if err != nil {
klog.Fatalf("Failed to open device, err: %s\n", err)
return
}
go c.Watcher.Initiate(device, c.Device.Name, c.Device.ID, c.ActionManager.Actions, c.Converter)
<-watcher.DeviceConnected
for _, action := range c.ActionManager.Actions {
if action.PerformImmediately {
action.PerformOperation(c.Converter.DataRead)
}
}
for _, schedule := range c.Scheduler.Schedules {
helper.ControllerWg.Add(1)
go schedule.ExecuteSchedule(c.ActionManager.Actions, c.Converter.DataRead, c.Device.ID)
}
helper.ControllerWg.Wait()
}
//subscribeAllTopics subscribes to mqtt topics associated with mapper
func subscribeAllTopics() {
for key, value := range topicMap {
helper.TokenClient = helper.Client.Subscribe(key, 0, value)
if helper.TokenClient.Wait() && helper.TokenClient.Error() != nil {
klog.Errorf("subscribe() Error in topic: %s is: %s", key, helper.TokenClient.Error())
}
}
}
//handleWatchMessage is the MQTT handler function for changing watcher configuration at runtime
func (c *Config) handleWatchMessage(client MQTT.Client, message MQTT.Message) {
newWatch := watcher.Watcher{}
err := json.Unmarshal(message.Payload(), &newWatch)
if err != nil {
klog.Errorf("Error in unmarshalling: %s", err)
}
c.Watcher = newWatch
configuration.Config.Watcher = c.Watcher
klog.Infof("New watcher has been started")
klog.Infof("New Watcher: %v", c.Watcher)
}
//handleScheduleCreateMessage is the MQTT handler function for adding schedules at runtime
func (c *Config) handleScheduleCreateMessage(client MQTT.Client, message MQTT.Message) {
newSchedules := []scheduler.Schedule{}
err := json.Unmarshal(message.Payload(), &newSchedules)
if err != nil {
klog.Errorf("Error in unmarshalling: %s", err)
}
for _, newSchedule := range newSchedules {
scheduleExists := false
for scheduleIndex, schedule := range c.Scheduler.Schedules {
if schedule.Name == newSchedule.Name {
c.Scheduler.Schedules[scheduleIndex] = newSchedule
scheduleExists = true
break
}
}
if scheduleExists {
c.Scheduler.Schedules = append(c.Scheduler.Schedules, newSchedule)
klog.Infof("Schedule: %s has been updated", newSchedule.Name)
klog.Infof("Updated Schedule: %v", newSchedule)
} else {
klog.Infof("Schedule: %s has been added", newSchedule.Name)
klog.Infof("New Schedule: %v", newSchedule)
}
configuration.Config.Scheduler = c.Scheduler
helper.ControllerWg.Add(1)
newSchedule.ExecuteSchedule(c.ActionManager.Actions, c.Converter.DataRead, c.Device.ID)
}
}
//handleScheduleDeleteMessage is the MQTT handler function for deleting schedules at runtime
func (c *Config) handleScheduleDeleteMessage(client MQTT.Client, message MQTT.Message) {
schedulesToBeDeleted := []scheduler.Schedule{}
err := json.Unmarshal(message.Payload(), &schedulesToBeDeleted)
if err != nil {
klog.Errorf("Error in unmarshalling: %s", err)
}
for _, scheduleToBeDeleted := range schedulesToBeDeleted {
scheduleExists := false
for index, schedule := range c.Scheduler.Schedules {
if strings.EqualFold(schedule.Name, scheduleToBeDeleted.Name) {
scheduleExists = true
copy(c.Scheduler.Schedules[index:], c.Scheduler.Schedules[index+1:])
c.Scheduler.Schedules = c.Scheduler.Schedules[:len(c.Scheduler.Schedules)-1]
break
}
}
configuration.Config.Scheduler = c.Scheduler
if !scheduleExists {
klog.Errorf("Schedule: %s does not exist", scheduleToBeDeleted.Name)
} else {
klog.Infof("Schedule: %s has been deleted ", scheduleToBeDeleted.Name)
}
}
}
//handleActionCreateMessage MQTT handler function for adding actions at runtime
func (c *Config) handleActionCreateMessage(client MQTT.Client, message MQTT.Message) {
newActions := []actionmanager.Action{}
err := json.Unmarshal(message.Payload(), &newActions)
if err != nil {
klog.Errorf("Error in unmarshalling: %s", err)
}
for _, newAction := range newActions {
actionExists := false
for actionIndex, action := range c.ActionManager.Actions {
if action.Name == newAction.Name {
c.ActionManager.Actions[actionIndex] = newAction
actionExists = true
break
}
}
if actionExists {
c.ActionManager.Actions = append(c.ActionManager.Actions, newAction)
klog.Infof("Action: %s has been updated", newAction.Name)
klog.Infof("Updated Action: %v", newAction)
} else {
klog.Infof("Action: %s has been added ", newAction.Name)
klog.Infof("New Action: %v", newAction)
}
configuration.Config.ActionManager = c.ActionManager
if newAction.PerformImmediately {
newAction.PerformOperation(c.Converter.DataRead)
}
}
}
//handleActionDeleteMessage MQTT handler function for deleting actions at runtime
func (c *Config) handleActionDeleteMessage(client MQTT.Client, message MQTT.Message) {
actionsToBeDeleted := []actionmanager.Action{}
err := json.Unmarshal(message.Payload(), &actionsToBeDeleted)
if err != nil {
klog.Errorf("Error in unmarshalling: %s", err)
}
for _, actionToBeDeleted := range actionsToBeDeleted {
actionExists := false
for index, action := range c.ActionManager.Actions {
if strings.EqualFold(action.Name, actionToBeDeleted.Name) {
actionExists = true
copy(c.ActionManager.Actions[index:], c.ActionManager.Actions[index+1:])
c.ActionManager.Actions = c.ActionManager.Actions[:len(c.ActionManager.Actions)-1]
break
}
}
configuration.Config.ActionManager = c.ActionManager
if !actionExists {
klog.Errorf("Action: %s did not exist", actionToBeDeleted.Name)
} else {
klog.Infof("Action: %s has been deleted ", actionToBeDeleted.Name)
}
}
}
| 1 | 15,017 | Good catch, and if we don't declare the newSchedule here, will the last schedule be executed in multi goroutine? | kubeedge-kubeedge | go |
@@ -879,6 +879,7 @@ partial class Build
.Concat(includeSecurity)
.Select(x => Solution.GetProject(x))
.Where(project =>
+ project != null &&
(project, project.TryGetTargetFrameworks()) switch
{
_ when exclude.Contains(project.Path) => false, | 1 | using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Runtime.InteropServices;
using System.Text.Json;
using System.Text.RegularExpressions;
using Nuke.Common;
using Nuke.Common.IO;
using Nuke.Common.ProjectModel;
using Nuke.Common.Tooling;
using Nuke.Common.Tools.DotNet;
using Nuke.Common.Tools.MSBuild;
using Nuke.Common.Tools.NuGet;
using Nuke.Common.Utilities.Collections;
using static CustomDotNetTasks;
using static Nuke.Common.EnvironmentInfo;
using static Nuke.Common.IO.CompressionTasks;
using static Nuke.Common.IO.FileSystemTasks;
using static Nuke.Common.IO.PathConstruction;
using static Nuke.Common.Tools.DotNet.DotNetTasks;
using static Nuke.Common.Tools.MSBuild.MSBuildTasks;
// #pragma warning disable SA1306
// #pragma warning disable SA1134
// #pragma warning disable SA1111
// #pragma warning disable SA1400
// #pragma warning disable SA1401
partial class Build
{
[Solution("Datadog.Trace.sln")] readonly Solution Solution;
AbsolutePath TracerDirectory => RootDirectory / "tracer";
AbsolutePath SharedDirectory => RootDirectory / "shared";
AbsolutePath ProfilerDirectory => ProfilerSrcDirectory ?? RootDirectory / ".." / "dd-continuous-profiler-dotnet";
AbsolutePath MsBuildProject => TracerDirectory / "Datadog.Trace.proj";
AbsolutePath OutputDirectory => TracerDirectory / "bin";
AbsolutePath TracerHomeDirectory => TracerHome ?? (OutputDirectory / "tracer-home");
AbsolutePath SymbolsDirectory => TracerHome ?? (OutputDirectory / "symbols");
AbsolutePath DDTracerHomeDirectory => DDTracerHome ?? (OutputDirectory / "dd-tracer-home");
AbsolutePath ArtifactsDirectory => Artifacts ?? (OutputDirectory / "artifacts");
AbsolutePath WindowsTracerHomeZip => ArtifactsDirectory / "windows-tracer-home.zip";
AbsolutePath WindowsSymbolsZip => ArtifactsDirectory / "windows-native-symbols.zip";
AbsolutePath BuildDataDirectory => TracerDirectory / "build_data";
AbsolutePath MonitoringHomeDirectory => MonitoringHome ?? (SharedDirectory / "bin" / "monitoring-home");
AbsolutePath ProfilerHomeDirectory => ProfilerHome ?? RootDirectory / ".." / "_build" / "DDProf-Deploy";
const string LibDdwafVersion = "1.0.14";
AbsolutePath LibDdwafDirectory => (NugetPackageDirectory ?? RootDirectory / "packages") / $"libddwaf.{LibDdwafVersion}";
AbsolutePath SourceDirectory => TracerDirectory / "src";
AbsolutePath BuildDirectory => TracerDirectory / "build";
AbsolutePath TestsDirectory => TracerDirectory / "test";
AbsolutePath DistributionHomeDirectory => Solution.GetProject(Projects.DatadogMonitoringDistribution).Directory / "home";
AbsolutePath TempDirectory => (AbsolutePath)(IsWin ? Path.GetTempPath() : "/tmp/");
string TracerLogDirectory => IsWin
? Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.CommonApplicationData),
"Datadog .NET Tracer", "logs")
: "/var/log/datadog/dotnet/";
readonly string[] WafWindowsArchitectureFolders =
{
"win-x86", "win-x64"
};
Project NativeProfilerProject => Solution.GetProject(Projects.ClrProfilerNative);
Project NativeLoaderProject => Solution.GetProject(Projects.NativeLoader);
[LazyPathExecutable(name: "cmake")] readonly Lazy<Tool> CMake;
[LazyPathExecutable(name: "make")] readonly Lazy<Tool> Make;
[LazyPathExecutable(name: "fpm")] readonly Lazy<Tool> Fpm;
[LazyPathExecutable(name: "gzip")] readonly Lazy<Tool> GZip;
[LazyPathExecutable(name: "cmd")] readonly Lazy<Tool> Cmd;
IEnumerable<MSBuildTargetPlatform> ArchitecturesForPlatform =>
Equals(TargetPlatform, MSBuildTargetPlatform.x64)
? new[] { MSBuildTargetPlatform.x64, MSBuildTargetPlatform.x86 }
: new[] { MSBuildTargetPlatform.x86 };
bool IsArm64 => RuntimeInformation.ProcessArchitecture == Architecture.Arm64;
string LinuxArchitectureIdentifier => IsArm64 ? "arm64" : TargetPlatform.ToString();
IEnumerable<string> LinuxPackageTypes => IsAlpine ? new[] { "tar" } : new[] { "deb", "rpm", "tar" };
IEnumerable<Project> ProjectsToPack => new[]
{
Solution.GetProject(Projects.DatadogTrace),
Solution.GetProject(Projects.DatadogTraceOpenTracing),
};
Project[] ParallelIntegrationTests => new[]
{
Solution.GetProject(Projects.TraceIntegrationTests),
Solution.GetProject(Projects.OpenTracingIntegrationTests),
};
Project[] ClrProfilerIntegrationTests => new[]
{
Solution.GetProject(Projects.ClrProfilerIntegrationTests),
Solution.GetProject(Projects.AppSecIntegrationTests),
};
readonly IEnumerable<TargetFramework> TargetFrameworks = new[]
{
TargetFramework.NET461,
TargetFramework.NETSTANDARD2_0,
TargetFramework.NETCOREAPP3_1,
};
Target CreateRequiredDirectories => _ => _
.Unlisted()
.Executes(() =>
{
EnsureExistingDirectory(TracerHomeDirectory);
EnsureExistingDirectory(ArtifactsDirectory);
EnsureExistingDirectory(DDTracerHomeDirectory);
EnsureExistingDirectory(BuildDataDirectory);
});
Target Restore => _ => _
.After(Clean)
.Unlisted()
.Executes(() =>
{
if (IsWin)
{
NuGetTasks.NuGetRestore(s => s
.SetTargetPath(Solution)
.SetVerbosity(NuGetVerbosity.Normal)
.When(!string.IsNullOrEmpty(NugetPackageDirectory), o =>
o.SetPackagesDirectory(NugetPackageDirectory)));
}
else
{
DotNetRestore(s => s
.SetProjectFile(Solution)
.SetVerbosity(DotNetVerbosity.Normal)
// .SetTargetPlatform(Platform) // necessary to ensure we restore every project
.SetProperty("configuration", BuildConfiguration.ToString())
.When(!string.IsNullOrEmpty(NugetPackageDirectory), o =>
o.SetPackageDirectory(NugetPackageDirectory)));
}
});
Target CompileNativeSrcWindows => _ => _
.Unlisted()
.After(CompileManagedSrc)
.OnlyWhenStatic(() => IsWin)
.Executes(() =>
{
// If we're building for x64, build for x86 too
var platforms =
Equals(TargetPlatform, MSBuildTargetPlatform.x64)
? new[] { MSBuildTargetPlatform.x64, MSBuildTargetPlatform.x86 }
: new[] { MSBuildTargetPlatform.x86 };
// Can't use dotnet msbuild, as needs to use the VS version of MSBuild
// Build native tracer assets
MSBuild(s => s
.SetTargetPath(MsBuildProject)
.SetConfiguration(BuildConfiguration)
.SetMSBuildPath()
.SetTargets("BuildCppSrc")
.DisableRestore()
.SetMaxCpuCount(null)
.CombineWith(platforms, (m, platform) => m
.SetTargetPlatform(platform)));
});
Target CompileNativeSrcLinux => _ => _
.Unlisted()
.After(CompileManagedSrc)
.OnlyWhenStatic(() => IsLinux)
.Executes(() =>
{
var buildDirectory = NativeProfilerProject.Directory / "build";
EnsureExistingDirectory(buildDirectory);
CMake.Value(
arguments: "../ -DCMAKE_BUILD_TYPE=Release",
workingDirectory: buildDirectory);
Make.Value(workingDirectory: buildDirectory);
});
Target CompileNativeSrcMacOs => _ => _
.Unlisted()
.After(CompileManagedSrc)
.OnlyWhenStatic(() => IsOsx)
.Executes(() =>
{
var nativeProjectDirectory = NativeProfilerProject.Directory;
CMake.Value(arguments: ".", workingDirectory: nativeProjectDirectory);
Make.Value(workingDirectory: nativeProjectDirectory);
});
Target CompileNativeSrc => _ => _
.Unlisted()
.Description("Compiles the native loader")
.DependsOn(CompileNativeSrcWindows)
.DependsOn(CompileNativeSrcMacOs)
.DependsOn(CompileNativeSrcLinux);
Target CompileManagedSrc => _ => _
.Unlisted()
.Description("Compiles the managed code in the src directory")
.After(CreateRequiredDirectories)
.After(Restore)
.Executes(() =>
{
// Always AnyCPU
DotNetMSBuild(x => x
.SetTargetPath(MsBuildProject)
.SetTargetPlatformAnyCPU()
.SetConfiguration(BuildConfiguration)
.DisableRestore()
.SetTargets("BuildCsharpSrc")
);
});
Target CompileNativeTestsWindows => _ => _
.Unlisted()
.After(CompileNativeSrc)
.OnlyWhenStatic(() => IsWin)
.Executes(() =>
{
// If we're building for x64, build for x86 too
var platforms =
Equals(TargetPlatform, MSBuildTargetPlatform.x64)
? new[] { MSBuildTargetPlatform.x64, MSBuildTargetPlatform.x86 }
: new[] { MSBuildTargetPlatform.x86 };
// Can't use dotnet msbuild, as needs to use the VS version of MSBuild
MSBuild(s => s
.SetTargetPath(MsBuildProject)
.SetConfiguration(BuildConfiguration)
.SetMSBuildPath()
.SetTargets("BuildCppTests")
.DisableRestore()
.SetMaxCpuCount(null)
.CombineWith(platforms, (m, platform) => m
.SetTargetPlatform(platform)));
});
Target CompileNativeTestsLinux => _ => _
.Unlisted()
.After(CompileNativeSrc)
.OnlyWhenStatic(() => IsLinux)
.Executes(() =>
{
Logger.Error("We don't currently run unit tests on Linux");
});
Target CompileNativeTests => _ => _
.Unlisted()
.Description("Compiles the native loader unit tests")
.DependsOn(CompileNativeTestsWindows)
.DependsOn(CompileNativeTestsLinux);
Target DownloadLibDdwaf => _ => _
.Unlisted()
.After(CreateRequiredDirectories)
.Executes(() =>
{
var wc = new WebClient();
var libDdwafUri = new Uri($"https://www.nuget.org/api/v2/package/libddwaf/{LibDdwafVersion}");
var libDdwafZip = TempDirectory / "libddwaf.zip";
wc.DownloadFile(libDdwafUri, libDdwafZip);
Console.WriteLine($"{libDdwafZip} downloaded. Extracting to {LibDdwafDirectory}...");
UncompressZip(libDdwafZip, LibDdwafDirectory);
});
Target CopyLibDdwaf => _ => _
.Unlisted()
.After(Clean)
.After(DownloadLibDdwaf)
.OnlyWhenStatic(() => !IsArm64) // not supported yet
.Executes(() =>
{
if (IsWin)
{
foreach (var architecture in WafWindowsArchitectureFolders)
{
var source = LibDdwafDirectory / "runtimes" / architecture / "native" / "ddwaf.dll";
var dest = TracerHomeDirectory / architecture;
Logger.Info($"Copying '{source}' to '{dest}'");
CopyFileToDirectory(source, dest, FileExistsPolicy.Overwrite);
}
}
else
{
var (architecture, ext) = GetUnixArchitectureAndExtension();
var ddwafFileName = $"libddwaf.{ext}";
var source = LibDdwafDirectory / "runtimes" / architecture / "native" / ddwafFileName;
var dest = TracerHomeDirectory;
Logger.Info($"Copying '{source}' to '{dest}'");
CopyFileToDirectory(source, dest, FileExistsPolicy.Overwrite);
}
});
Target CopyLibDdwafForAppSecUnitTests => _ => _
.Unlisted()
.After(Clean)
.After(DownloadLibDdwaf)
.OnlyWhenStatic(() => !IsArm64)// not supported yet
.Executes(() =>
{
var project = Solution.GetProject(Projects.AppSecUnitTests);
var directory = project.Directory;
var targetFrameworks = project.GetTargetFrameworks();
if (IsWin)
{
foreach (var architecture in WafWindowsArchitectureFolders)
{
CopyWaf(architecture, targetFrameworks, directory, "ddwaf", "dll");
}
}
else
{
var (architecture, ext) = GetUnixArchitectureAndExtension();
CopyWaf(architecture, targetFrameworks, directory, "libddwaf", ext);
}
void CopyWaf(string architecture, IEnumerable<string> frameworks, AbsolutePath absolutePath, string wafFileName, string extension)
{
var source = LibDdwafDirectory / "runtimes" / architecture / "native" / $"{wafFileName}.{extension}";
var nativeDir = DDTracerHomeDirectory / architecture / $"Datadog.Trace.ClrProfiler.Native.{extension}";
foreach (var fmk in frameworks)
{
var dest = absolutePath / "bin" / BuildConfiguration / fmk / architecture;
CopyFileToDirectory(source, dest, FileExistsPolicy.Overwrite);
if (!IsWin)
{
CopyFileToDirectory(nativeDir, absolutePath / "bin" / BuildConfiguration / fmk, FileExistsPolicy.Overwrite);
}
}
}
});
Target PublishManagedProfiler => _ => _
.Unlisted()
.After(CompileManagedSrc)
.Executes(() =>
{
var targetFrameworks = IsWin
? TargetFrameworks
: TargetFrameworks.Where(framework => !framework.ToString().StartsWith("net4"));
// Publish Datadog.Trace.MSBuild which includes Datadog.Trace and Datadog.Trace.AspNet
DotNetPublish(s => s
.SetProject(Solution.GetProject(Projects.DatadogTraceMsBuild))
.SetConfiguration(BuildConfiguration)
.SetTargetPlatformAnyCPU()
.EnableNoBuild()
.EnableNoRestore()
.CombineWith(targetFrameworks, (p, framework) => p
.SetFramework(framework)
.SetOutput(TracerHomeDirectory / framework)));
});
Target PublishNativeSymbolsWindows => _ => _
.Unlisted()
.OnlyWhenStatic(() => IsWin)
.After(CompileNativeSrc, PublishManagedProfiler)
.Executes(() =>
{
foreach (var architecture in ArchitecturesForPlatform)
{
var source = NativeProfilerProject.Directory / "bin" / BuildConfiguration / architecture.ToString() /
$"{NativeProfilerProject.Name}.pdb";
var dest = SymbolsDirectory / $"win-{architecture}";
Logger.Info($"Copying '{source}' to '{dest}'");
CopyFileToDirectory(source, dest, FileExistsPolicy.Overwrite);
}
});
Target PublishNativeProfilerWindows => _ => _
.Unlisted()
.OnlyWhenStatic(() => IsWin)
.After(CompileNativeSrc, PublishManagedProfiler)
.Executes(() =>
{
foreach (var architecture in ArchitecturesForPlatform)
{
// Copy native tracer assets
var source = NativeProfilerProject.Directory / "bin" / BuildConfiguration / architecture.ToString() /
$"{NativeProfilerProject.Name}.dll";
var dest = TracerHomeDirectory / $"win-{architecture}";
Logger.Info($"Copying '{source}' to '{dest}'");
CopyFileToDirectory(source, dest, FileExistsPolicy.Overwrite);
}
});
Target PublishNativeProfilerLinux => _ => _
.Unlisted()
.OnlyWhenStatic(() => IsLinux)
.After(CompileNativeSrc, PublishManagedProfiler)
.Executes(() =>
{
// copy createLogPath.sh
CopyFileToDirectory(
BuildDirectory / "artifacts" / "createLogPath.sh",
TracerHomeDirectory,
FileExistsPolicy.Overwrite);
// Copy Native file
CopyFileToDirectory(
NativeProfilerProject.Directory / "build" / "bin" / $"{NativeProfilerProject.Name}.so",
TracerHomeDirectory,
FileExistsPolicy.Overwrite);
});
Target PublishNativeProfilerMacOs => _ => _
.Unlisted()
.OnlyWhenStatic(() => IsOsx)
.After(CompileNativeSrc, PublishManagedProfiler)
.Executes(() =>
{
// copy createLogPath.sh
CopyFileToDirectory(
BuildDirectory / "artifacts" / "createLogPath.sh",
TracerHomeDirectory,
FileExistsPolicy.Overwrite);
// Create home directory
CopyFileToDirectory(
NativeProfilerProject.Directory / "bin" / $"{NativeProfilerProject.Name}.dylib",
TracerHomeDirectory,
FileExistsPolicy.Overwrite);
});
Target PublishNativeProfiler => _ => _
.Unlisted()
.DependsOn(PublishNativeProfilerWindows)
.DependsOn(PublishNativeProfilerLinux)
.DependsOn(PublishNativeProfilerMacOs);
Target CreateDdTracerHome => _ => _
.Unlisted()
.After(PublishNativeProfiler, PublishManagedProfiler, DownloadLibDdwaf, CopyLibDdwaf)
.Executes(() =>
{
// start by copying everything from the tracer home dir
CopyDirectoryRecursively(TracerHomeDirectory, DDTracerHomeDirectory, DirectoryExistsPolicy.Merge, FileExistsPolicy.Overwrite);
if (IsWin)
{
// windows already has the expected layout
return;
}
// Move the native file to the architecture-specific folder
var (architecture, ext) = GetUnixArchitectureAndExtension();
var profilerFileName = $"{NativeProfilerProject.Name}.{ext}";
var ddwafFileName = $"libddwaf.{ext}";
var outputDir = DDTracerHomeDirectory / architecture;
EnsureCleanDirectory(outputDir);
MoveFile(
DDTracerHomeDirectory / profilerFileName,
outputDir / profilerFileName);
// won't exist yet for arm64 builds
var srcDdwafFile = DDTracerHomeDirectory / ddwafFileName;
if (File.Exists(srcDdwafFile))
{
MoveFile(
srcDdwafFile,
DDTracerHomeDirectory / architecture / ddwafFileName);
}
});
Target BuildMsi => _ => _
.Unlisted()
.Description("Builds the .msi files from the compiled tracer home directory")
.After(BuildTracerHome)
.OnlyWhenStatic(() => IsWin)
.Executes(() =>
{
MSBuild(s => s
.SetTargetPath(Solution.GetProject(Projects.WindowsInstaller))
.SetConfiguration(BuildConfiguration)
.SetMSBuildPath()
.AddProperty("RunWixToolsOutOfProc", true)
.SetProperty("TracerHomeDirectory", TracerHomeDirectory)
.SetProperty("LibDdwafDirectory", LibDdwafDirectory)
.SetMaxCpuCount(null)
.CombineWith(ArchitecturesForPlatform, (o, arch) => o
.SetProperty("MsiOutputPath", ArtifactsDirectory / arch.ToString())
.SetTargetPlatform(arch)),
degreeOfParallelism: 2);
});
Target BuildMsiBeta => _ => _
.Unlisted()
.Description("Builds the .msi files from the repo")
.After(BuildTracerHome, BuildProfilerHome, BuildMonitoringHome)
.OnlyWhenStatic(() => IsWin)
.Executes(() =>
{
MSBuild(s => s
.SetTargetPath(SharedDirectory / "src" / "msi-installer" / "WindowsInstaller.wixproj")
.SetConfiguration(BuildConfiguration)
.SetMSBuildPath()
.AddProperty("RunWixToolsOutOfProc", true)
.SetProperty("TracerHomeDirectory", TracerHomeDirectory)
.SetProperty("LibDdwafDirectory", LibDdwafDirectory)
.SetProperty("ProfilerHomeDirectory", ProfilerHomeDirectory)
.SetProperty("MonitoringHomeDirectory", MonitoringHomeDirectory)
.SetProperty("BetaMsiSuffix", BetaMsiSuffix)
.SetMaxCpuCount(null)
.CombineWith(ArchitecturesForPlatform, (o, arch) => o
.SetProperty("MsiOutputPath", ArtifactsDirectory / arch.ToString())
.SetTargetPlatform(arch)),
degreeOfParallelism: 2);
});
Target CreateDistributionHome => _ => _
.Unlisted()
.After(BuildTracerHome)
.Executes(() =>
{
// Copy existing files from tracer home to the Distribution location
CopyDirectoryRecursively(TracerHomeDirectory, DistributionHomeDirectory, DirectoryExistsPolicy.Merge, FileExistsPolicy.Overwrite);
// Ensure createLogPath.sh is copied to the directory
CopyFileToDirectory(
BuildDirectory / "artifacts" / "createLogPath.sh",
DistributionHomeDirectory,
FileExistsPolicy.Overwrite);
});
/// <summary>
/// This target is a bit of a hack, but means that we actually use the All CPU builds in intgration tests etc
/// </summary>
Target CreatePlatformlessSymlinks => _ => _
.Description("Copies the build output from 'All CPU' platforms to platform-specific folders")
.Unlisted()
.After(CompileManagedSrc)
.After(CompileDependencyLibs)
.After(CompileManagedTestHelpers)
.Executes(() =>
{
// create junction for each directory
var directories = TracerDirectory.GlobDirectories(
$"src/**/bin/{BuildConfiguration}",
$"tools/**/bin/{BuildConfiguration}",
$"test/Datadog.Trace.TestHelpers/**/bin/{BuildConfiguration}",
$"test/test-applications/integrations/dependency-libs/**/bin/{BuildConfiguration}"
);
directories.ForEach(existingDir =>
{
var newDir = existingDir.Parent / $"{TargetPlatform}" / BuildConfiguration;
if (DirectoryExists(newDir))
{
Logger.Info($"Skipping '{newDir}' as already exists");
}
else
{
EnsureExistingDirectory(newDir.Parent);
Cmd.Value(arguments: $"cmd /c mklink /J \"{newDir}\" \"{existingDir}\"");
}
});
});
Target ZipSymbols => _ => _
.Unlisted()
.After(BuildTracerHome)
.DependsOn(PublishNativeSymbolsWindows)
.OnlyWhenStatic(() => IsWin)
.Executes(() =>
{
CompressZip(SymbolsDirectory, WindowsSymbolsZip, fileMode: FileMode.Create);
});
Target ZipTracerHome => _ => _
.Unlisted()
.After(BuildTracerHome)
.Requires(() => Version)
.Executes(() =>
{
if (IsWin)
{
CompressZip(TracerHomeDirectory, WindowsTracerHomeZip, fileMode: FileMode.Create);
}
else if (IsLinux)
{
var fpm = Fpm.Value;
var gzip = GZip.Value;
var packageName = "datadog-dotnet-apm";
var workingDirectory = ArtifactsDirectory / $"linux-{LinuxArchitectureIdentifier}";
EnsureCleanDirectory(workingDirectory);
foreach (var packageType in LinuxPackageTypes)
{
var args = new List<string>()
{
"-f",
"-s dir",
$"-t {packageType}",
$"-n {packageName}",
$"-v {Version}",
packageType == "tar" ? "" : "--prefix /opt/datadog",
$"--chdir {TracerHomeDirectory}",
"netstandard2.0/",
"netcoreapp3.1/",
"Datadog.Trace.ClrProfiler.Native.so",
"createLogPath.sh",
};
if (!IsArm64)
{
args.Add("libddwaf.so");
}
var arguments = string.Join(" ", args);
fpm(arguments, workingDirectory: workingDirectory);
}
gzip($"-f {packageName}.tar", workingDirectory: workingDirectory);
var suffix = RuntimeInformation.ProcessArchitecture == Architecture.X64
? string.Empty
: $".{RuntimeInformation.ProcessArchitecture.ToString().ToLower()}";
var versionedName = IsAlpine
? $"{packageName}-{Version}-musl{suffix}.tar.gz"
: $"{packageName}-{Version}{suffix}.tar.gz";
RenameFile(
workingDirectory / $"{packageName}.tar.gz",
workingDirectory / versionedName);
}
});
Target CompileManagedTestHelpers => _ => _
.Unlisted()
.After(Restore)
.After(CompileManagedSrc)
.Executes(() =>
{
// Always AnyCPU
DotNetMSBuild(x => x
.SetTargetPath(MsBuildProject)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatformAnyCPU()
.DisableRestore()
.SetProperty("BuildProjectReferences", false)
.SetTargets("BuildCsharpTestHelpers"));
});
Target CompileManagedUnitTests => _ => _
.Unlisted()
.After(Restore)
.After(CompileManagedSrc)
.DependsOn(CopyLibDdwafForAppSecUnitTests)
.Executes(() =>
{
// Always AnyCPU
DotNetMSBuild(x => x
.SetTargetPath(MsBuildProject)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatformAnyCPU()
.DisableRestore()
.SetProperty("BuildProjectReferences", false)
.SetTargets("BuildCsharpUnitTests"));
});
Target RunManagedUnitTests => _ => _
.Unlisted()
.After(CompileManagedUnitTests)
.Executes(() =>
{
var testProjects = TracerDirectory.GlobFiles("test/**/*.Tests.csproj")
.Select(x => Solution.GetProject(x))
.ToList();
testProjects.ForEach(EnsureResultsDirectory);
var filter = string.IsNullOrEmpty(Filter) && IsArm64 ? "(Category!=ArmUnsupported)" : Filter;
try
{
DotNetTest(x => x
.EnableNoRestore()
.EnableNoBuild()
.SetFilter(filter)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatformAnyCPU()
.SetDDEnvironmentVariables("dd-tracer-dotnet")
.When(CodeCoverage, ConfigureCodeCoverage)
.CombineWith(testProjects, (x, project) => x
.EnableTrxLogOutput(GetResultsDirectory(project))
.SetProjectFile(project)));
}
finally
{
MoveLogsToBuildData();
}
});
Target RunNativeTestsWindows => _ => _
.Unlisted()
.After(CompileNativeSrcWindows)
.After(CompileNativeTestsWindows)
.OnlyWhenStatic(() => IsWin)
.Executes(() =>
{
var workingDirectory = TestsDirectory / "Datadog.Trace.ClrProfiler.Native.Tests" / "bin" / BuildConfiguration.ToString() / TargetPlatform.ToString();
var exePath = workingDirectory / "Datadog.Trace.ClrProfiler.Native.Tests.exe";
var testExe = ToolResolver.GetLocalTool(exePath);
testExe("--gtest_output=xml", workingDirectory: workingDirectory);
});
Target RunNativeTestsLinux => _ => _
.Unlisted()
.After(CompileNativeSrcLinux)
.After(CompileNativeTestsLinux)
.OnlyWhenStatic(() => IsLinux)
.Executes(() =>
{
Logger.Error("We don't currently run unit tests on Linux");
});
Target RunNativeTests => _ => _
.Unlisted()
.DependsOn(RunNativeTestsWindows)
.DependsOn(RunNativeTestsLinux);
Target CompileDependencyLibs => _ => _
.Unlisted()
.After(Restore)
.After(CompileManagedSrc)
.Executes(() =>
{
// Always AnyCPU
DotNetMSBuild(x => x
.SetTargetPath(MsBuildProject)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatformAnyCPU()
.DisableRestore()
.EnableNoDependencies()
.SetTargets("BuildDependencyLibs")
);
});
Target CompileRegressionDependencyLibs => _ => _
.Unlisted()
.After(Restore)
.After(CompileManagedSrc)
.Executes(() =>
{
// We run linux integration tests in AnyCPU, but Windows on the specific architecture
var platform = !IsWin ? MSBuildTargetPlatform.MSIL : TargetPlatform;
DotNetMSBuild(x => x
.SetTargetPath(MsBuildProject)
.SetTargetPlatformAnyCPU()
.DisableRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(platform)
.SetTargets("BuildRegressionDependencyLibs")
);
});
Target CompileRegressionSamples => _ => _
.Unlisted()
.After(Restore)
.After(CreatePlatformlessSymlinks)
.After(CompileRegressionDependencyLibs)
.Requires(() => Framework)
.Executes(() =>
{
var regressionsDirectory = Solution.GetProject(Projects.AutomapperTest)
.Directory.Parent;
var regressionLibs = GlobFiles(regressionsDirectory / "**" / "*.csproj")
.Where(path =>
(path, Solution.GetProject(path).TryGetTargetFrameworks()) switch
{
_ when path.Contains("ExpenseItDemo") => false,
_ when path.Contains("StackExchange.Redis.AssemblyConflict.LegacyProject") => false,
_ when path.Contains("MismatchedTracerVersions") => false,
_ when path.Contains("dependency-libs") => false,
_ when !string.IsNullOrWhiteSpace(SampleName) => path.Contains(SampleName),
(_, var targets) when targets is not null => targets.Contains(Framework),
_ => true,
}
);
// Allow restore here, otherwise things go wonky with runtime identifiers
// in some target frameworks. No, I don't know why
DotNetBuild(x => x
// .EnableNoRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(TargetPlatform)
.SetFramework(Framework)
.SetNoWarnDotNetCore3()
.When(!string.IsNullOrEmpty(NugetPackageDirectory), o =>
o.SetPackageDirectory(NugetPackageDirectory))
.CombineWith(regressionLibs, (x, project) => x
.SetProjectFile(project)));
});
Target CompileFrameworkReproductions => _ => _
.Unlisted()
.Description("Builds .NET Framework projects (non SDK-based projects)")
.After(CompileRegressionDependencyLibs)
.After(CompileDependencyLibs)
.After(CreatePlatformlessSymlinks)
.Requires(() => IsWin)
.Executes(() =>
{
// We have to use the full MSBuild here, as dotnet msbuild doesn't copy the EDMX assets for embedding correctly
// seems similar to https://github.com/dotnet/sdk/issues/8360
MSBuild(s => s
.SetTargetPath(MsBuildProject)
.SetMSBuildPath()
.DisableRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(TargetPlatform)
.SetTargets("BuildFrameworkReproductions")
.SetMaxCpuCount(null));
});
Target CompileIntegrationTests => _ => _
.Unlisted()
.After(CompileManagedSrc)
.After(CompileRegressionSamples)
.After(CompileFrameworkReproductions)
.After(PublishIisSamples)
.Requires(() => Framework)
.Requires(() => TracerHomeDirectory != null)
.Executes(() =>
{
DotNetMSBuild(s => s
.SetTargetPath(MsBuildProject)
.SetProperty("TargetFramework", Framework.ToString())
.DisableRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(TargetPlatform)
.SetTargets("BuildCsharpIntegrationTests")
.SetMaxCpuCount(null));
});
Target CompileSamples => _ => _
.Unlisted()
.After(CompileDependencyLibs)
.After(CreatePlatformlessSymlinks)
.After(CompileFrameworkReproductions)
.Requires(() => TracerHomeDirectory != null)
.Requires(() => Framework)
.Executes(() =>
{
// This does some "unnecessary" rebuilding and restoring
var includeIntegration = TracerDirectory.GlobFiles("test/test-applications/integrations/**/*.csproj");
// Don't build aspnet full framework sample in this step
var includeSecurity = TracerDirectory.GlobFiles("test/test-applications/security/*/*.csproj");
var exclude = TracerDirectory.GlobFiles("test/test-applications/integrations/dependency-libs/**/*.csproj");
var projects = includeIntegration
.Concat(includeSecurity)
.Select(x => Solution.GetProject(x))
.Where(project =>
(project, project.TryGetTargetFrameworks()) switch
{
_ when exclude.Contains(project.Path) => false,
_ when project.Path.ToString().Contains("Samples.OracleMDA") => false,
_ when !string.IsNullOrWhiteSpace(SampleName) => project.Path.ToString().Contains(SampleName),
(_, var targets) when targets is not null => targets.Contains(Framework),
_ => true,
}
);
// /nowarn:NU1701 - Package 'x' was restored using '.NETFramework,Version=v4.6.1' instead of the project target framework '.NETCoreApp,Version=v2.1'.
DotNetBuild(config => config
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(TargetPlatform)
.EnableNoDependencies()
.SetProperty("BuildInParallel", "false")
.SetProcessArgumentConfigurator(arg => arg.Add("/nowarn:NU1701"))
.CombineWith(projects, (s, project) => s
// we have to build this one for all frameworks (because of reasons)
.When(!project.Name.Contains("MultiDomainHost"), x => x.SetFramework(Framework))
.SetProjectFile(project)));
});
Target PublishIisSamples => _ => _
.Unlisted()
.After(CompileManagedTestHelpers)
.After(CompileRegressionSamples)
.After(CompileFrameworkReproductions)
.Executes(() =>
{
var aspnetFolder = TestsDirectory / "test-applications" / "aspnet";
var securityAspnetFolder = TestsDirectory / "test-applications" / "security" / "aspnet";
var aspnetProjects = aspnetFolder.GlobFiles("**/*.csproj");
var securityAspnetProjects = securityAspnetFolder.GlobFiles("**/*.csproj");
var publishProfile = aspnetFolder / "PublishProfiles" / "FolderProfile.pubxml";
MSBuild(x => x
.SetMSBuildPath()
// .DisableRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(TargetPlatform)
.SetProperty("DeployOnBuild", true)
.SetProperty("PublishProfile", publishProfile)
.SetMaxCpuCount(null)
.CombineWith(aspnetProjects.Concat(securityAspnetProjects), (c, project) => c
.SetTargetPath(project))
);
});
Target RunWindowsIntegrationTests => _ => _
.Unlisted()
.After(BuildTracerHome)
.After(CompileIntegrationTests)
.After(CompileSamples)
.After(CompileFrameworkReproductions)
.After(BuildWindowsIntegrationTests)
.Requires(() => IsWin)
.Requires(() => Framework)
.Executes(() =>
{
ParallelIntegrationTests.ForEach(EnsureResultsDirectory);
ClrProfilerIntegrationTests.ForEach(EnsureResultsDirectory);
try
{
DotNetTest(config => config
.SetDotnetPath(TargetPlatform)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(TargetPlatform)
.SetFramework(Framework)
//.WithMemoryDumpAfter(timeoutInMinutes: 30)
.EnableNoRestore()
.EnableNoBuild()
.SetProcessEnvironmentVariable("TracerHomeDirectory", TracerHomeDirectory)
.When(!string.IsNullOrEmpty(Filter), c => c.SetFilter(Filter))
.When(CodeCoverage, ConfigureCodeCoverage)
.CombineWith(ParallelIntegrationTests, (s, project) => s
.EnableTrxLogOutput(GetResultsDirectory(project))
.SetProjectFile(project)), degreeOfParallelism: 4);
// TODO: I think we should change this filter to run on Windows by default
// (RunOnWindows!=False|Category=Smoke)&LoadFromGAC!=True&IIS!=True
DotNetTest(config => config
.SetDotnetPath(TargetPlatform)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(TargetPlatform)
.SetFramework(Framework)
//.WithMemoryDumpAfter(timeoutInMinutes: 30)
.EnableNoRestore()
.EnableNoBuild()
.SetFilter(Filter ?? "RunOnWindows=True&LoadFromGAC!=True&IIS!=True")
.SetProcessEnvironmentVariable("TracerHomeDirectory", TracerHomeDirectory)
.When(CodeCoverage, ConfigureCodeCoverage)
.CombineWith(ClrProfilerIntegrationTests, (s, project) => s
.EnableTrxLogOutput(GetResultsDirectory(project))
.SetProjectFile(project)));
}
finally
{
MoveLogsToBuildData();
CopyMemoryDumps();
}
});
Target RunWindowsRegressionTests => _ => _
.Unlisted()
.After(BuildTracerHome)
.After(CompileIntegrationTests)
.After(CompileRegressionSamples)
.After(CompileFrameworkReproductions)
.Requires(() => IsWin)
.Requires(() => Framework)
.Executes(() =>
{
ClrProfilerIntegrationTests.ForEach(EnsureResultsDirectory);
try
{
DotNetTest(config => config
.SetDotnetPath(TargetPlatform)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(TargetPlatform)
.SetFramework(Framework)
.EnableNoRestore()
.EnableNoBuild()
.SetFilter(Filter ?? "Category=Smoke&LoadFromGAC!=True")
.SetProcessEnvironmentVariable("TracerHomeDirectory", TracerHomeDirectory)
.When(CodeCoverage, ConfigureCodeCoverage)
.CombineWith(ClrProfilerIntegrationTests, (s, project) => s
.EnableTrxLogOutput(GetResultsDirectory(project))
.SetProjectFile(project)));
}
finally
{
MoveLogsToBuildData();
}
});
Target RunWindowsIisIntegrationTests => _ => _
.After(BuildTracerHome)
.After(CompileIntegrationTests)
.After(CompileSamples)
.After(CompileFrameworkReproductions)
.After(PublishIisSamples)
.Requires(() => Framework)
.Executes(() =>
{
ClrProfilerIntegrationTests.ForEach(EnsureResultsDirectory);
try
{
// Different filter from RunWindowsIntegrationTests
DotNetTest(config => config
.SetDotnetPath(TargetPlatform)
.SetConfiguration(BuildConfiguration)
.SetTargetPlatform(TargetPlatform)
.SetFramework(Framework)
.EnableNoRestore()
.EnableNoBuild()
.SetFilter(Filter ?? "(RunOnWindows=True)&LoadFromGAC=True")
.SetProcessEnvironmentVariable("TracerHomeDirectory", TracerHomeDirectory)
.When(CodeCoverage, ConfigureCodeCoverage)
.CombineWith(ClrProfilerIntegrationTests, (s, project) => s
.EnableTrxLogOutput(GetResultsDirectory(project))
.SetProjectFile(project)));
}
finally
{
MoveLogsToBuildData();
}
});
Target CompileSamplesLinux => _ => _
.Unlisted()
.After(CompileManagedSrc)
.After(CompileRegressionDependencyLibs)
.After(CompileDependencyLibs)
.After(CompileManagedTestHelpers)
.Requires(() => TracerHomeDirectory != null)
.Requires(() => Framework)
.Executes(() =>
{
// There's nothing specifically linux-y here, it's just that we only build a subset of projects
// for testing on linux.
var sampleProjects = TracerDirectory.GlobFiles("test/test-applications/integrations/*/*.csproj");
var securitySampleProjects = TracerDirectory.GlobFiles("test/test-applications/security/*/*.csproj");
var regressionProjects = TracerDirectory.GlobFiles("test/test-applications/regression/*/*.csproj");
var instrumentationProjects = TracerDirectory.GlobFiles("test/test-applications/instrumentation/*/*.csproj");
// These samples are currently skipped.
var projectsToSkip = new[]
{
"Samples.Msmq", // Doesn't run on Linux
"Samples.Owin.WebApi2", // Doesn't run on Linux
"Samples.MultiDomainHost.Runner",
"Samples.RateLimiter", // I think we _should_ run this one (assuming it has tests)
"Samples.SqlServer.NetFramework20",
"Samples.TracingWithoutLimits", // I think we _should_ run this one (assuming it has tests)
"Samples.Wcf",
"Samples.WebRequest.NetFramework20",
"AutomapperTest", // I think we _should_ run this one (assuming it has tests)
"DogStatsD.RaceCondition",
"LargePayload", // I think we _should_ run this one (assuming it has tests)
"Sandbox.ManualTracing",
"StackExchange.Redis.AssemblyConflict.LegacyProject",
"Samples.OracleMDA", // We don't test these yet
"Samples.OracleMDA.Core", // We don't test these yet
"MismatchedTracerVersions",
};
// These sample projects are built using RestoreAndBuildSamplesForPackageVersions
// so no point building them now
var multiPackageProjects = new List<string>();
if (TestAllPackageVersions)
{
var samplesFile = BuildDirectory / "PackageVersionsGeneratorDefinitions.json";
using var fs = File.OpenRead(samplesFile);
var json = JsonDocument.Parse(fs);
multiPackageProjects = json.RootElement
.EnumerateArray()
.Select(e => e.GetProperty("SampleProjectName").GetString())
.Distinct()
.Where(name => name switch
{
"Samples.MySql" => false, // the "non package version" is _ALSO_ tested separately
_ => true
})
.ToList();
}
var projectsToBuild = sampleProjects
.Concat(securitySampleProjects)
.Concat(regressionProjects)
.Concat(instrumentationProjects)
.Where(path =>
{
var project = Solution.GetProject(path);
return project?.Name switch
{
"Samples.AspNetCoreMvc21" => Framework == TargetFramework.NETCOREAPP2_1,
"Samples.AspNetCoreMvc30" => Framework == TargetFramework.NETCOREAPP3_0,
"Samples.AspNetCoreMvc31" => Framework == TargetFramework.NETCOREAPP3_1,
"Samples.AspNetCoreMinimalApis" => Framework == TargetFramework.NET6_0,
"Samples.AspNetCore2" => Framework == TargetFramework.NETCOREAPP2_1,
"Samples.AspNetCore5" => Framework == TargetFramework.NET6_0 || Framework == TargetFramework.NET5_0 || Framework == TargetFramework.NETCOREAPP3_1 || Framework == TargetFramework.NETCOREAPP3_0,
"Samples.GraphQL4" => Framework == TargetFramework.NETCOREAPP3_1 || Framework == TargetFramework.NET5_0 || Framework == TargetFramework.NET6_0,
var name when projectsToSkip.Contains(name) => false,
var name when multiPackageProjects.Contains(name) => false,
"Samples.AspNetCoreRazorPages" => true,
_ when !string.IsNullOrWhiteSpace(SampleName) => project?.Name?.Contains(SampleName) ?? false,
_ => true,
};
});
// do the build and publish separately to avoid dependency issues
// Always AnyCPU
DotNetBuild(x => x
// .EnableNoRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetFramework(Framework)
// .SetTargetPlatform(Platform)
.SetNoWarnDotNetCore3()
.When(TestAllPackageVersions, o => o.SetProperty("TestAllPackageVersions", "true"))
.When(IncludeMinorPackageVersions, o => o.SetProperty("IncludeMinorPackageVersions", "true"))
.When(!string.IsNullOrEmpty(NugetPackageDirectory), o => o.SetPackageDirectory(NugetPackageDirectory))
.CombineWith(projectsToBuild, (c, project) => c
.SetProjectFile(project)));
// Always AnyCPU
DotNetPublish(x => x
.EnableNoRestore()
.EnableNoBuild()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetFramework(Framework)
// .SetTargetPlatform(Platform)
.SetNoWarnDotNetCore3()
.When(TestAllPackageVersions, o => o.SetProperty("TestAllPackageVersions", "true"))
.When(IncludeMinorPackageVersions, o => o.SetProperty("IncludeMinorPackageVersions", "true"))
.When(!string.IsNullOrEmpty(NugetPackageDirectory), o => o.SetPackageDirectory(NugetPackageDirectory))
.CombineWith(projectsToBuild, (c, project) => c
.SetProject(project)));
});
Target CompileMultiApiPackageVersionSamples => _ => _
.Unlisted()
.After(CompileManagedSrc)
.After(CompileRegressionDependencyLibs)
.After(CompileDependencyLibs)
.After(CompileManagedTestHelpers)
.After(CompileSamplesLinux)
.Requires(() => TracerHomeDirectory != null)
.Requires(() => Framework)
.Executes(() =>
{
// Build and restore for all versions
// Annoyingly this rebuilds everything again and again.
var targets = new[] { "RestoreSamplesForPackageVersionsOnly", "RestoreAndBuildSamplesForPackageVersionsOnly" };
// /nowarn:NU1701 - Package 'x' was restored using '.NETFramework,Version=v4.6.1' instead of the project target framework '.NETCoreApp,Version=v2.1'.
// /nowarn:NETSDK1138 - Package 'x' was restored using '.NETFramework,Version=v4.6.1' instead of the project target framework '.NETCoreApp,Version=v2.1'.
DotNetMSBuild(x => x
.SetTargetPath(MsBuildProject)
.SetConfiguration(BuildConfiguration)
.EnableNoDependencies()
.SetProperty("TargetFramework", Framework.ToString())
.SetProperty("BuildInParallel", "true")
.SetProperty("CheckEolTargetFramework", "false")
.SetProcessArgumentConfigurator(arg => arg.Add("/nowarn:NU1701"))
.When(TestAllPackageVersions, o => o.SetProperty("TestAllPackageVersions", "true"))
.When(IncludeMinorPackageVersions, o => o.SetProperty("IncludeMinorPackageVersions", "true"))
.CombineWith(targets, (c, target) => c.SetTargets(target))
);
});
Target CompileLinuxIntegrationTests => _ => _
.Unlisted()
.After(CompileManagedSrc)
.After(CompileRegressionDependencyLibs)
.After(CompileDependencyLibs)
.After(CompileManagedTestHelpers)
.After(CompileSamplesLinux)
.After(CompileMultiApiPackageVersionSamples)
.Requires(() => TracerHomeDirectory != null)
.Requires(() => Framework)
.Executes(() =>
{
// Build the actual integration test projects for Any CPU
var integrationTestProjects = TracerDirectory.GlobFiles("test/*.IntegrationTests/*.csproj");
DotNetBuild(x => x
// .EnableNoRestore()
.EnableNoDependencies()
.SetConfiguration(BuildConfiguration)
.SetFramework(Framework)
// .SetTargetPlatform(Platform)
.SetNoWarnDotNetCore3()
.When(TestAllPackageVersions, o => o.SetProperty("TestAllPackageVersions", "true"))
.When(IncludeMinorPackageVersions, o => o.SetProperty("IncludeMinorPackageVersions", "true"))
.When(!string.IsNullOrEmpty(NugetPackageDirectory), o =>
o.SetPackageDirectory(NugetPackageDirectory))
.CombineWith(integrationTestProjects, (c, project) => c
.SetProjectFile(project)));
IntegrationTestLinuxProfilerDirFudge(Projects.ClrProfilerIntegrationTests);
IntegrationTestLinuxProfilerDirFudge(Projects.AppSecIntegrationTests);
});
Target RunLinuxIntegrationTests => _ => _
.After(CompileLinuxIntegrationTests)
.Description("Runs the linux integration tests")
.Requires(() => Framework)
.Requires(() => !IsWin)
.Executes(() =>
{
ParallelIntegrationTests.ForEach(EnsureResultsDirectory);
ClrProfilerIntegrationTests.ForEach(EnsureResultsDirectory);
var filter = (string.IsNullOrEmpty(Filter), IsArm64) switch
{
(true, false) => "Category!=LinuxUnsupported",
(true, true) => "(Category!=ArmUnsupported)&(Category!=LinuxUnsupported)",
_ => Filter
};
try
{
// Run these ones in parallel
// Always AnyCPU
DotNetTest(config => config
.SetConfiguration(BuildConfiguration)
// .SetTargetPlatform(Platform)
.EnableNoRestore()
.EnableNoBuild()
.SetFramework(Framework)
//.WithMemoryDumpAfter(timeoutInMinutes: 30)
.SetFilter(filter)
.SetProcessEnvironmentVariable("TracerHomeDirectory", TracerHomeDirectory)
.When(TestAllPackageVersions, o => o.SetProcessEnvironmentVariable("TestAllPackageVersions", "true"))
.When(IncludeMinorPackageVersions, o => o.SetProperty("IncludeMinorPackageVersions", "true"))
.When(CodeCoverage, ConfigureCodeCoverage)
.CombineWith(ParallelIntegrationTests, (s, project) => s
.EnableTrxLogOutput(GetResultsDirectory(project))
.SetProjectFile(project)),
degreeOfParallelism: 2);
// Run this one separately so we can tail output
DotNetTest(config => config
.SetConfiguration(BuildConfiguration)
// .SetTargetPlatform(Platform)
.EnableNoRestore()
.EnableNoBuild()
.SetFramework(Framework)
//.WithMemoryDumpAfter(timeoutInMinutes: 30)
.SetFilter(filter)
.SetProcessEnvironmentVariable("TracerHomeDirectory", TracerHomeDirectory)
.When(TestAllPackageVersions, o => o.SetProcessEnvironmentVariable("TestAllPackageVersions", "true"))
.When(IncludeMinorPackageVersions, o => o.SetProperty("IncludeMinorPackageVersions", "true"))
.When(CodeCoverage, ConfigureCodeCoverage)
.CombineWith(ClrProfilerIntegrationTests, (s, project) => s
.EnableTrxLogOutput(GetResultsDirectory(project))
.SetProjectFile(project))
);
}
finally
{
MoveLogsToBuildData();
CopyMemoryDumps();
}
});
Target CheckBuildLogsForErrors => _ => _
.Unlisted()
.Description("Reads the logs from build_data and checks for error lines")
.Executes(() =>
{
// we expect to see _some_ errors, so explcitly ignore them
var knownPatterns = new List<Regex>
{
new(@".*Unable to resolve method MongoDB\..*", RegexOptions.Compiled),
new(@".*at CallTargetNativeTest\.NoOp\.Noop\dArgumentsIntegration\.OnAsyncMethodEnd.*", RegexOptions.Compiled),
new(@".*at CallTargetNativeTest\.NoOp\.Noop\dArgumentsIntegration\.OnMethodBegin.*", RegexOptions.Compiled),
new(@".*at CallTargetNativeTest\.NoOp\.Noop\dArgumentsIntegration\.OnMethodEnd.*", RegexOptions.Compiled),
new(@".*at CallTargetNativeTest\.NoOp\.Noop\dArgumentsVoidIntegration\.OnMethodBegin.*", RegexOptions.Compiled),
new(@".*at CallTargetNativeTest\.NoOp\.Noop\dArgumentsVoidIntegration\.OnMethodEnd.*", RegexOptions.Compiled),
};
var logDirectory = BuildDataDirectory / "logs";
if (DirectoryExists(logDirectory))
{
// Should we care about warnings too?
var managedErrors = logDirectory.GlobFiles("**/dotnet-tracer-managed-*")
.SelectMany(ParseManagedLogFiles)
.Where(x => x.Level >= LogLevel.Error)
.Where(IsNewError)
.ToList();
var nativeErrors = logDirectory.GlobFiles("**/dotnet-tracer-native-*")
.SelectMany(ParseNativeLogFiles)
.Where(x => x.Level >= LogLevel.Error)
.Where(IsNewError)
.ToList();
if (managedErrors.Count == 0 && nativeErrors.Count == 0)
{
Logger.Info("No errors found in managed or native logs");
return;
}
Logger.Warn("Found the following errors in log files:");
var allErrors = managedErrors
.Concat(nativeErrors)
.GroupBy(x => x.FileName);
foreach (var erroredFile in allErrors)
{
Logger.Error($"Found errors in log file '{erroredFile.Key}':");
foreach (var error in erroredFile)
{
Logger.Error($"{error.Timestamp:hh:mm:ss} [{error.Level}] {error.Message}");
}
}
ExitCode = 1;
}
bool IsNewError(ParsedLogLine logLine)
{
foreach (var pattern in knownPatterns)
{
if (pattern.IsMatch(logLine.Message))
{
return false;
}
}
return true;
}
static List<ParsedLogLine> ParseManagedLogFiles(AbsolutePath logFile)
{
var regex = new Regex(@"^(\d\d\d\d\-\d\d\-\d\d\W\d\d\:\d\d\:\d\d\.\d\d\d\W\+\d\d\:\d\d)\W\[(.*?)\]\W(.*)", RegexOptions.Compiled);
var allLines = File.ReadAllLines(logFile);
var allLogs = new List<ParsedLogLine>(allLines.Length);
ParsedLogLine currentLine = null;
foreach (var line in allLines)
{
if (string.IsNullOrWhiteSpace(line))
{
continue;
}
var match = regex.Match(line);
if (match.Success)
{
if (currentLine is not null)
{
allLogs.Add(currentLine);
}
try
{
// start of a new log line
var timestamp = DateTimeOffset.Parse(match.Groups[1].Value);
var level = ParseManagedLogLevel(match.Groups[2].Value);
var message = match.Groups[3].Value;
currentLine = new ParsedLogLine(timestamp, level, message, logFile);
}
catch (Exception ex)
{
Logger.Info($"Error parsing line: '{line}. {ex}");
}
}
else
{
if (currentLine is null)
{
Logger.Warn("Incomplete log line: " + line);
}
else
{
currentLine = currentLine with { Message = $"{currentLine.Message}{Environment.NewLine}{line}" };
}
}
}
return allLogs;
}
static List<ParsedLogLine> ParseNativeLogFiles(AbsolutePath logFile)
{
var regex = new Regex(@"^(\d\d\/\d\d\/\d\d\W\d\d\:\d\d\:\d\d\.\d\d\d\W\w\w)\W\[.*?\]\W\[(.*?)\](.*)", RegexOptions.Compiled);
var allLines = File.ReadAllLines(logFile);
var allLogs = new List<ParsedLogLine>(allLines.Length);
foreach (var line in allLines)
{
if (string.IsNullOrWhiteSpace(line))
{
continue;
}
var match = regex.Match(line);
if (match.Success)
{
try
{
// native logs are on one line
var timestamp = DateTimeOffset.ParseExact(match.Groups[1].Value, "MM/dd/yy hh:mm:ss.fff tt", null);
var level = ParseNativeLogLevel(match.Groups[2].Value);
var message = match.Groups[3].Value;
var currentLine = new ParsedLogLine(timestamp, level, message, logFile);
allLogs.Add(currentLine);
}
catch (Exception ex)
{
Logger.Info($"Error parsing line: '{line}. {ex}");
}
}
else
{
Logger.Warn("Incomplete log line: " + line);
}
}
return allLogs;
}
static LogLevel ParseManagedLogLevel(string value)
=> value switch
{
"VRB" => LogLevel.Trace,
"DBG" => LogLevel.Trace,
"INF" => LogLevel.Normal,
"WRN" => LogLevel.Warning,
"ERR" => LogLevel.Error,
_ => LogLevel.Normal, // Concurrency issues can sometimes garble this so ignore it
};
static LogLevel ParseNativeLogLevel(string value)
=> value switch
{
"trace" => LogLevel.Trace,
"debug" => LogLevel.Trace,
"info" => LogLevel.Normal,
"warning" => LogLevel.Warning,
"error" => LogLevel.Error,
_ => LogLevel.Normal, // Concurrency issues can sometimes garble this so ignore it
};
Logger.Info($"Skipping log parsing, directory '{logDirectory}' not found");
});
private AbsolutePath GetResultsDirectory(Project proj) => BuildDataDirectory / "results" / proj.Name;
private void EnsureResultsDirectory(Project proj) => EnsureCleanDirectory(GetResultsDirectory(proj));
private (string, string) GetUnixArchitectureAndExtension() => IsOsx ? ("osx-x64", "dylib") : ($"linux-{LinuxArchitectureIdentifier}", "so");
// the integration tests need their own copy of the profiler, this achieved through build.props on Windows, but doesn't seem to work under Linux
private void IntegrationTestLinuxProfilerDirFudge(string project)
{
// Not sure if/why this is necessary, and we can't just point to the correct output location
var src = TracerHomeDirectory;
var testProject = Solution.GetProject(project).Directory;
var dest = testProject / "bin" / BuildConfiguration / Framework / "profiler-lib";
CopyDirectoryRecursively(src, dest, DirectoryExistsPolicy.Merge, FileExistsPolicy.Overwrite);
// not sure exactly where this is supposed to go, may need to change the original build
foreach (var linuxDir in TracerHomeDirectory.GlobDirectories("linux-*"))
{
CopyDirectoryRecursively(linuxDir, dest, DirectoryExistsPolicy.Merge, FileExistsPolicy.Overwrite);
}
}
private void MoveLogsToBuildData()
{
if (Directory.Exists(TracerLogDirectory))
{
CopyDirectoryRecursively(TracerLogDirectory, BuildDataDirectory / "logs",
DirectoryExistsPolicy.Merge, FileExistsPolicy.Overwrite);
}
if (Directory.Exists(TempDirectory))
{
foreach (var dump in GlobFiles(TempDirectory, "coredump*"))
{
MoveFileToDirectory(dump, BuildDataDirectory / "dumps", FileExistsPolicy.Overwrite);
}
}
}
private void CopyMemoryDumps()
{
foreach (var file in Directory.EnumerateFiles(TracerDirectory, "*.dmp", SearchOption.AllDirectories))
{
CopyFileToDirectory(file, BuildDataDirectory, FileExistsPolicy.OverwriteIfNewer);
}
}
private DotNetTestSettings ConfigureCodeCoverage(DotNetTestSettings settings)
{
var strongNameKeyPath = Solution.Directory / "Datadog.Trace.snk";
return settings.SetDataCollector("XPlat Code Coverage")
.SetProcessArgumentConfigurator(
args =>
args.Add("--")
.Add("RunConfiguration.DisableAppDomain=true") // https://github.com/coverlet-coverage/coverlet/issues/347
.Add("DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.SkipAutoProps=true")
.Add("DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.Format=cobertura")
.Add($"DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.StrongNameKey=\"{strongNameKeyPath}\"")
.Add("DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.ExcludeByFile=\"**/NuGet/**/LibLog/**/*.cs\",")
.Add("DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.Exclude=\"[*]Datadog.Trace.Vendors.*,[Datadog.Trace]System.*,[Datadog.Trace]Mono.*\",")
.Add("DataCollectionRunSettings.DataCollectors.DataCollector.Configuration.Include=\"[Datadog.Trace.ClrProfiler.*]*,[Datadog.Trace]*,[Datadog.Trace.AspNet]*\""));
}
protected override void OnTargetStart(string target)
{
if (PrintDriveSpace)
{
foreach (var drive in DriveInfo.GetDrives().Where(d => d.IsReady))
{
Logger.Info($"Drive space available on '{drive.Name}': {PrettyPrint(drive.AvailableFreeSpace)} / {PrettyPrint(drive.TotalSize)}");
}
}
base.OnTargetStart(target);
static string PrettyPrint(long bytes)
{
var power = Math.Min((int)Math.Log(bytes, 1000), 4);
var normalised = bytes / Math.Pow(1000, power);
return power switch
{
4 => $"{normalised:F}TB",
3 => $"{normalised:F}GB",
2 => $"{normalised:F}MB",
1 => $"{normalised:F}KB",
_ => $"{bytes}B",
};
}
}
private record ParsedLogLine(DateTimeOffset Timestamp, LogLevel Level, string Message, AbsolutePath FileName);
}
| 1 | 24,717 | `project` should never be null here, unless we have projects that aren't in the solution? Which we shouldn't have | DataDog-dd-trace-dotnet | .cs |
@@ -1,13 +1,18 @@
#include "extractor/graph_compressor.hpp"
#include "extractor/compressed_edge_container.hpp"
-#include "extractor/restriction_map.hpp"
+#include "extractor/restriction.hpp"
+#include "extractor/restriction_compressor.hpp"
+
#include "util/dynamic_graph.hpp"
#include "util/node_based_graph.hpp"
#include "util/percent.hpp"
#include "util/log.hpp"
+#include <boost/assert.hpp>
+#include <unordered_set>
+
namespace osrm
{
namespace extractor | 1 | #include "extractor/graph_compressor.hpp"
#include "extractor/compressed_edge_container.hpp"
#include "extractor/restriction_map.hpp"
#include "util/dynamic_graph.hpp"
#include "util/node_based_graph.hpp"
#include "util/percent.hpp"
#include "util/log.hpp"
namespace osrm
{
namespace extractor
{
void GraphCompressor::Compress(const std::unordered_set<NodeID> &barrier_nodes,
const std::unordered_set<NodeID> &traffic_lights,
RestrictionMap &restriction_map,
util::NodeBasedDynamicGraph &graph,
CompressedEdgeContainer &geometry_compressor)
{
const unsigned original_number_of_nodes = graph.GetNumberOfNodes();
const unsigned original_number_of_edges = graph.GetNumberOfEdges();
{
util::UnbufferedLog log;
util::Percent progress(log, original_number_of_nodes);
for (const NodeID node_v : util::irange(0u, original_number_of_nodes))
{
progress.PrintStatus(node_v);
// only contract degree 2 vertices
if (2 != graph.GetOutDegree(node_v))
{
continue;
}
// don't contract barrier node
if (barrier_nodes.end() != barrier_nodes.find(node_v))
{
continue;
}
// check if v is a via node for a turn restriction, i.e. a 'directed' barrier node
if (restriction_map.IsViaNode(node_v))
{
continue;
}
// reverse_e2 forward_e2
// u <---------- v -----------> w
// ----------> <-----------
// forward_e1 reverse_e1
//
// Will be compressed to:
//
// reverse_e1
// u <---------- w
// ---------->
// forward_e1
//
// If the edges are compatible.
const bool reverse_edge_order = graph.GetEdgeData(graph.BeginEdges(node_v)).reversed;
const EdgeID forward_e2 = graph.BeginEdges(node_v) + reverse_edge_order;
BOOST_ASSERT(SPECIAL_EDGEID != forward_e2);
BOOST_ASSERT(forward_e2 >= graph.BeginEdges(node_v) &&
forward_e2 < graph.EndEdges(node_v));
const EdgeID reverse_e2 = graph.BeginEdges(node_v) + 1 - reverse_edge_order;
BOOST_ASSERT(SPECIAL_EDGEID != reverse_e2);
BOOST_ASSERT(reverse_e2 >= graph.BeginEdges(node_v) &&
reverse_e2 < graph.EndEdges(node_v));
const EdgeData &fwd_edge_data2 = graph.GetEdgeData(forward_e2);
const EdgeData &rev_edge_data2 = graph.GetEdgeData(reverse_e2);
const NodeID node_w = graph.GetTarget(forward_e2);
BOOST_ASSERT(SPECIAL_NODEID != node_w);
BOOST_ASSERT(node_v != node_w);
const NodeID node_u = graph.GetTarget(reverse_e2);
BOOST_ASSERT(SPECIAL_NODEID != node_u);
BOOST_ASSERT(node_u != node_v);
const EdgeID forward_e1 = graph.FindEdge(node_u, node_v);
BOOST_ASSERT(SPECIAL_EDGEID != forward_e1);
BOOST_ASSERT(node_v == graph.GetTarget(forward_e1));
const EdgeID reverse_e1 = graph.FindEdge(node_w, node_v);
BOOST_ASSERT(SPECIAL_EDGEID != reverse_e1);
BOOST_ASSERT(node_v == graph.GetTarget(reverse_e1));
const EdgeData &fwd_edge_data1 = graph.GetEdgeData(forward_e1);
const EdgeData &rev_edge_data1 = graph.GetEdgeData(reverse_e1);
if (graph.FindEdgeInEitherDirection(node_u, node_w) != SPECIAL_EDGEID)
{
continue;
}
// this case can happen if two ways with different names overlap
if (fwd_edge_data1.name_id != rev_edge_data1.name_id ||
fwd_edge_data2.name_id != rev_edge_data2.name_id)
{
continue;
}
if (fwd_edge_data1.CanCombineWith(fwd_edge_data2) &&
rev_edge_data1.CanCombineWith(rev_edge_data2))
{
BOOST_ASSERT(graph.GetEdgeData(forward_e1).name_id ==
graph.GetEdgeData(reverse_e1).name_id);
BOOST_ASSERT(graph.GetEdgeData(forward_e2).name_id ==
graph.GetEdgeData(reverse_e2).name_id);
/*
* Remember Lane Data for compressed parts. This handles scenarios where lane-data
* is
* only kept up until a traffic light.
*
* | |
* ---------------- |
* -^ | |
* ----------- |
* -v | |
* --------------- |
* | |
*
* u ------- v ---- w
*
* Since the edge is compressable, we can transfer:
* "left|right" (uv) and "" (uw) into a string with "left|right" (uw) for the
* compressed
* edge.
* Doing so, we might mess up the point from where the lanes are shown. It should be
* reasonable, since the announcements have to come early anyhow. So there is a
* potential danger in here, but it saves us from adding a lot of additional edges
* for
* turn-lanes. Without this,we would have to treat any turn-lane beginning/ending
* just
* like a barrier.
*/
const auto selectLaneID = [](const LaneDescriptionID front,
const LaneDescriptionID back) {
// A lane has tags: u - (front) - v - (back) - w
// During contraction, we keep only one of the tags. Usually the one closer to
// the
// intersection is preferred. If its empty, however, we keep the non-empty one
if (back == INVALID_LANE_DESCRIPTIONID)
return front;
return back;
};
graph.GetEdgeData(forward_e1).lane_description_id = selectLaneID(
fwd_edge_data1.lane_description_id, fwd_edge_data2.lane_description_id);
graph.GetEdgeData(reverse_e1).lane_description_id = selectLaneID(
rev_edge_data1.lane_description_id, rev_edge_data2.lane_description_id);
graph.GetEdgeData(forward_e2).lane_description_id = selectLaneID(
fwd_edge_data2.lane_description_id, fwd_edge_data1.lane_description_id);
graph.GetEdgeData(reverse_e2).lane_description_id = selectLaneID(
rev_edge_data2.lane_description_id, rev_edge_data1.lane_description_id);
// Do not compress edge if it crosses a traffic signal.
// This can't be done in CanCombineWith, becase we only store the
// traffic signals in the `traffic_lights` list, which EdgeData
// doesn't have access to.
const bool has_node_penalty = traffic_lights.find(node_v) != traffic_lights.end();
if (has_node_penalty)
continue;
// Get weights before graph is modified
const auto forward_weight1 = fwd_edge_data1.weight;
const auto forward_weight2 = fwd_edge_data2.weight;
const auto forward_duration1 = fwd_edge_data1.duration;
const auto forward_duration2 = fwd_edge_data2.duration;
BOOST_ASSERT(0 != forward_weight1);
BOOST_ASSERT(0 != forward_weight2);
const auto reverse_weight1 = rev_edge_data1.weight;
const auto reverse_weight2 = rev_edge_data2.weight;
const auto reverse_duration1 = rev_edge_data1.duration;
const auto reverse_duration2 = rev_edge_data2.duration;
BOOST_ASSERT(0 != reverse_weight1);
BOOST_ASSERT(0 != reverse_weight2);
// add weight of e2's to e1
graph.GetEdgeData(forward_e1).weight += forward_weight2;
graph.GetEdgeData(reverse_e1).weight += reverse_weight2;
// add duration of e2's to e1
graph.GetEdgeData(forward_e1).duration += forward_duration2;
graph.GetEdgeData(reverse_e1).duration += reverse_duration2;
// extend e1's to targets of e2's
graph.SetTarget(forward_e1, node_w);
graph.SetTarget(reverse_e1, node_u);
// remove e2's (if bidir, otherwise only one)
graph.DeleteEdge(node_v, forward_e2);
graph.DeleteEdge(node_v, reverse_e2);
// update any involved turn restrictions
restriction_map.FixupStartingTurnRestriction(node_u, node_v, node_w);
restriction_map.FixupArrivingTurnRestriction(node_u, node_v, node_w, graph);
restriction_map.FixupStartingTurnRestriction(node_w, node_v, node_u);
restriction_map.FixupArrivingTurnRestriction(node_w, node_v, node_u, graph);
// store compressed geometry in container
geometry_compressor.CompressEdge(forward_e1,
forward_e2,
node_v,
node_w,
forward_weight1,
forward_weight2,
forward_duration1,
forward_duration2);
geometry_compressor.CompressEdge(reverse_e1,
reverse_e2,
node_v,
node_u,
reverse_weight1,
reverse_weight2,
reverse_duration1,
reverse_duration2);
}
}
}
PrintStatistics(original_number_of_nodes, original_number_of_edges, graph);
// Repeate the loop, but now add all edges as uncompressed values.
// The function AddUncompressedEdge does nothing if the edge is already
// in the CompressedEdgeContainer.
for (const NodeID node_u : util::irange(0u, original_number_of_nodes))
{
for (const auto edge_id : util::irange(graph.BeginEdges(node_u), graph.EndEdges(node_u)))
{
const EdgeData &data = graph.GetEdgeData(edge_id);
const NodeID target = graph.GetTarget(edge_id);
geometry_compressor.AddUncompressedEdge(edge_id, target, data.weight, data.duration);
}
}
}
void GraphCompressor::PrintStatistics(unsigned original_number_of_nodes,
unsigned original_number_of_edges,
const util::NodeBasedDynamicGraph &graph) const
{
unsigned new_node_count = 0;
unsigned new_edge_count = 0;
for (const auto i : util::irange(0u, graph.GetNumberOfNodes()))
{
if (graph.GetOutDegree(i) > 0)
{
++new_node_count;
new_edge_count += (graph.EndEdges(i) - graph.BeginEdges(i));
}
}
util::Log() << "Node compression ratio: " << new_node_count / (double)original_number_of_nodes;
util::Log() << "Edge compression ratio: " << new_edge_count / (double)original_number_of_edges;
}
}
}
| 1 | 22,206 | Simple `for` loop instead of `for_each` would help here by making it more readable. | Project-OSRM-osrm-backend | cpp |
@@ -96,6 +96,8 @@ namespace thread_role
case nano::thread_role::name::signature_checking:
thread_role_name_string = "Signature check";
break;
+ case nano::thread_role::name::confirmation_height_processing:
+ thread_role_name_string = "Conf height";
}
/* | 1 | #include <iostream>
#include <nano/lib/utility.hpp>
namespace nano
{
seq_con_info_composite::seq_con_info_composite (const std::string & name) :
name (name)
{
}
bool seq_con_info_composite::is_composite () const
{
return true;
}
void seq_con_info_composite::add_component (std::unique_ptr<seq_con_info_component> child)
{
children.push_back (std::move (child));
}
const std::vector<std::unique_ptr<seq_con_info_component>> & seq_con_info_composite::get_children () const
{
return children;
}
const std::string & seq_con_info_composite::get_name () const
{
return name;
}
seq_con_info_leaf::seq_con_info_leaf (const seq_con_info & info) :
info (info)
{
}
bool seq_con_info_leaf::is_composite () const
{
return false;
}
const seq_con_info & seq_con_info_leaf::get_info () const
{
return info;
}
namespace thread_role
{
/*
* nano::thread_role namespace
*
* Manage thread role
*/
static thread_local nano::thread_role::name current_thread_role = nano::thread_role::name::unknown;
nano::thread_role::name get ()
{
return current_thread_role;
}
static std::string get_string (nano::thread_role::name role)
{
std::string thread_role_name_string;
switch (role)
{
case nano::thread_role::name::unknown:
thread_role_name_string = "<unknown>";
break;
case nano::thread_role::name::io:
thread_role_name_string = "I/O";
break;
case nano::thread_role::name::work:
thread_role_name_string = "Work pool";
break;
case nano::thread_role::name::packet_processing:
thread_role_name_string = "Pkt processing";
break;
case nano::thread_role::name::alarm:
thread_role_name_string = "Alarm";
break;
case nano::thread_role::name::vote_processing:
thread_role_name_string = "Vote processing";
break;
case nano::thread_role::name::block_processing:
thread_role_name_string = "Blck processing";
break;
case nano::thread_role::name::request_loop:
thread_role_name_string = "Request loop";
break;
case nano::thread_role::name::wallet_actions:
thread_role_name_string = "Wallet actions";
break;
case nano::thread_role::name::bootstrap_initiator:
thread_role_name_string = "Bootstrap init";
break;
case nano::thread_role::name::voting:
thread_role_name_string = "Voting";
break;
case nano::thread_role::name::signature_checking:
thread_role_name_string = "Signature check";
break;
}
/*
* We want to constrain the thread names to 15
* characters, since this is the smallest maximum
* length supported by the platforms we support
* (specifically, Linux)
*/
assert (thread_role_name_string.size () < 16);
return (thread_role_name_string);
}
std::string get_string ()
{
return get_string (current_thread_role);
}
void set (nano::thread_role::name role)
{
auto thread_role_name_string (get_string (role));
nano::thread_role::set_os_name (thread_role_name_string);
nano::thread_role::current_thread_role = role;
}
}
}
void nano::thread_attributes::set (boost::thread::attributes & attrs)
{
auto attrs_l (&attrs);
attrs_l->set_stack_size (8000000); //8MB
}
nano::thread_runner::thread_runner (boost::asio::io_context & io_ctx_a, unsigned service_threads_a)
{
boost::thread::attributes attrs;
nano::thread_attributes::set (attrs);
for (auto i (0u); i < service_threads_a; ++i)
{
threads.push_back (boost::thread (attrs, [&io_ctx_a]() {
nano::thread_role::set (nano::thread_role::name::io);
try
{
io_ctx_a.run ();
}
catch (...)
{
#ifndef NDEBUG
/*
* In a release build, catch and swallow the
* io_context exception, in debug mode pass it
* on
*/
throw;
#endif
}
}));
}
}
nano::thread_runner::~thread_runner ()
{
join ();
}
void nano::thread_runner::join ()
{
for (auto & i : threads)
{
if (i.joinable ())
{
i.join ();
}
}
}
/*
* Backing code for "release_assert", which is itself a macro
*/
void release_assert_internal (bool check, const char * check_expr, const char * file, unsigned int line)
{
if (check)
{
return;
}
std::cerr << "Assertion (" << check_expr << ") failed " << file << ":" << line << std::endl;
abort ();
}
| 1 | 15,262 | Looks like this falls through, should break. | nanocurrency-nano-node | cpp |
@@ -73,6 +73,7 @@ class ManifestListWriter implements FileAppender<ManifestFile> {
.schema(ManifestFile.schema())
.named("manifest_file")
.meta(meta)
+ .overwrite(false)
.build();
} catch (IOException e) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.google.common.collect.ImmutableMap;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import org.apache.iceberg.avro.Avro;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.io.FileAppender;
import org.apache.iceberg.io.OutputFile;
class ManifestListWriter implements FileAppender<ManifestFile> {
private final FileAppender<ManifestFile> writer;
ManifestListWriter(OutputFile snapshotFile, long snapshotId, Long parentSnapshotId) {
this.writer = newAppender(snapshotFile, ImmutableMap.of(
"snapshot-id", String.valueOf(snapshotId),
"parent-snapshot-id", String.valueOf(parentSnapshotId)));
}
@Override
public void add(ManifestFile file) {
writer.add(file);
}
@Override
public void addAll(Iterator<ManifestFile> values) {
writer.addAll(values);
}
@Override
public void addAll(Iterable<ManifestFile> values) {
writer.addAll(values);
}
@Override
public Metrics metrics() {
return writer.metrics();
}
@Override
public void close() throws IOException {
writer.close();
}
@Override
public long length() {
return writer.length();
}
private static FileAppender<ManifestFile> newAppender(OutputFile file, Map<String, String> meta) {
try {
return Avro.write(file)
.schema(ManifestFile.schema())
.named("manifest_file")
.meta(meta)
.build();
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to create snapshot list writer for path: " + file);
}
}
}
| 1 | 14,727 | Let's default manifest lists and manifests to overwrite. These use UUID-based file names and should never conflict. | apache-iceberg | java |
@@ -135,6 +135,9 @@ confspec = ConfigObj(StringIO(
audioCoordinates_maxPitch = integer(default=880)
reportMouseShapeChanges = boolean(default=false)
+[speechView]
+ showSpeechViewerAtStartup = boolean(default=false)
+
#Keyboard settings
[keyboard]
useCapsLockAsNVDAModifierKey = boolean(default=false) | 1 | """Manages NVDA configuration.
"""
import globalVars
import _winreg
import ctypes
import ctypes.wintypes
import os
import sys
from cStringIO import StringIO
import itertools
import contextlib
from collections import OrderedDict
from configobj import ConfigObj, ConfigObjError
from validate import Validator
from logHandler import log
import shlobj
import baseObject
import easeOfAccess
import winKernel
def validateConfig(configObj,validator,validationResult=None,keyList=None):
"""
@deprecated: Add-ons which need this should provide their own implementation.
"""
import warnings
warnings.warn("config.validateConfig deprecated. Callers should provide their own implementation.",
DeprecationWarning, 2)
if validationResult is None:
validationResult=configObj.validate(validator,preserve_errors=True)
if validationResult is True:
return None #No errors
if validationResult is False:
return "Badly formed configuration file"
errorStrings=[]
for k,v in validationResult.iteritems():
if v is True:
continue
newKeyList=list(keyList) if keyList is not None else []
newKeyList.append(k)
if isinstance(v,dict):
errorStrings.extend(validateConfig(configObj[k],validator,v,newKeyList))
else:
#If a key is invalid configObj does not record its default, thus we need to get and set the default manually
defaultValue=validator.get_default_value(configObj.configspec[k])
configObj[k]=defaultValue
if k not in configObj.defaults:
configObj.defaults.append(k)
errorStrings.append("%s: %s, defaulting to %s"%(k,v,defaultValue))
return errorStrings
#: @deprecated: Use C{conf.validator} instead.
val = Validator()
#: The configuration specification
#: @type: ConfigObj
confspec = ConfigObj(StringIO(
"""# NVDA Configuration File
[general]
language = string(default="Windows")
saveConfigurationOnExit = boolean(default=True)
askToExit = boolean(default=true)
playStartAndExitSounds = boolean(default=true)
#possible log levels are DEBUG, IO, DEBUGWARNING, INFO
loggingLevel = string(default="INFO")
showWelcomeDialogAtStartup = boolean(default=true)
# Speech settings
[speech]
# The synthesiser to use
synth = string(default=auto)
symbolLevel = integer(default=100)
trustVoiceLanguage = boolean(default=true)
beepSpeechModePitch = integer(default=10000,min=50,max=11025)
outputDevice = string(default=default)
autoLanguageSwitching = boolean(default=true)
autoDialectSwitching = boolean(default=false)
[[__many__]]
capPitchChange = integer(default=30,min=-100,max=100)
sayCapForCapitals = boolean(default=false)
beepForCapitals = boolean(default=false)
useSpellingFunctionality = boolean(default=true)
# Audio settings
[audio]
audioDuckingMode = integer(default=0)
# Braille settings
[braille]
display = string(default=noBraille)
translationTable = string(default=en-us-comp8.ctb)
inputTable = string(default=en-us-comp8.ctb)
expandAtCursor = boolean(default=true)
showCursor = boolean(default=true)
cursorBlinkRate = integer(default=500,min=0,max=2000)
cursorShape = integer(default=192,min=1,max=255)
messageTimeout = integer(default=4,min=0,max=20)
tetherTo = string(default="focus")
readByParagraph = boolean(default=false)
wordWrap = boolean(default=true)
# Braille display driver settings
[[__many__]]
port = string(default="")
# Presentation settings
[presentation]
reportKeyboardShortcuts = boolean(default=true)
reportObjectPositionInformation = boolean(default=true)
guessObjectPositionInformationWhenUnavailable = boolean(default=false)
reportTooltips = boolean(default=false)
reportHelpBalloons = boolean(default=true)
reportObjectDescriptions = boolean(default=True)
reportDynamicContentChanges = boolean(default=True)
[[progressBarUpdates]]
reportBackgroundProgressBars = boolean(default=false)
#output modes are beep, speak, both, or off
progressBarOutputMode = string(default="beep")
speechPercentageInterval = integer(default=10)
beepPercentageInterval = integer(default=1)
beepMinHZ = integer(default=110)
[mouse]
enableMouseTracking = boolean(default=True) #must be true for any of the other settings to work
mouseTextUnit = string(default="paragraph")
reportObjectRoleOnMouseEnter = boolean(default=False)
audioCoordinatesOnMouseMove = boolean(default=False)
audioCoordinates_detectBrightness = boolean(default=False)
audioCoordinates_blurFactor = integer(default=3)
audioCoordinates_minVolume = float(default=0.1)
audioCoordinates_maxVolume = float(default=1.0)
audioCoordinates_minPitch = integer(default=220)
audioCoordinates_maxPitch = integer(default=880)
reportMouseShapeChanges = boolean(default=false)
#Keyboard settings
[keyboard]
useCapsLockAsNVDAModifierKey = boolean(default=false)
useNumpadInsertAsNVDAModifierKey = boolean(default=true)
useExtendedInsertAsNVDAModifierKey = boolean(default=true)
keyboardLayout = string(default="desktop")
speakTypedCharacters = boolean(default=true)
speakTypedWords = boolean(default=false)
beepForLowercaseWithCapslock = boolean(default=true)
speakCommandKeys = boolean(default=false)
speechInterruptForCharacters = boolean(default=true)
speechInterruptForEnter = boolean(default=true)
allowSkimReadingInSayAll = boolean(default=False)
alertForSpellingErrors = boolean(default=True)
handleInjectedKeys= boolean(default=true)
[virtualBuffers]
maxLineLength = integer(default=100)
linesPerPage = integer(default=25)
useScreenLayout = boolean(default=True)
autoPassThroughOnFocusChange = boolean(default=true)
autoPassThroughOnCaretMove = boolean(default=false)
passThroughAudioIndication = boolean(default=true)
autoSayAllOnPageLoad = boolean(default=true)
trapNonCommandGestures = boolean(default=true)
#Settings for document reading (such as MS Word and wordpad)
[documentFormatting]
#These settings affect what information is reported when you navigate to text where the formatting or placement has changed
detectFormatAfterCursor = boolean(default=false)
reportFontName = boolean(default=false)
reportFontSize = boolean(default=false)
reportFontAttributes = boolean(default=false)
reportRevisions = boolean(default=true)
reportEmphasis = boolean(default=false)
reportColor = boolean(default=False)
reportAlignment = boolean(default=false)
reportLineSpacing = boolean(default=false)
reportStyle = boolean(default=false)
reportSpellingErrors = boolean(default=true)
reportPage = boolean(default=true)
reportLineNumber = boolean(default=False)
reportLineIndentation = boolean(default=False)
reportParagraphIndentation = boolean(default=False)
reportTables = boolean(default=true)
includeLayoutTables = boolean(default=False)
reportTableHeaders = boolean(default=True)
reportTableCellCoords = boolean(default=True)
reportLinks = boolean(default=true)
reportComments = boolean(default=true)
reportLists = boolean(default=true)
reportHeadings = boolean(default=true)
reportBlockQuotes = boolean(default=true)
reportLandmarks = boolean(default=true)
reportFrames = boolean(default=true)
reportClickable = boolean(default=true)
[reviewCursor]
simpleReviewMode = boolean(default=True)
followFocus = boolean(default=True)
followCaret = boolean(default=True)
followMouse = boolean(default=False)
[UIA]
minWindowsVersion = float(default=6.1)
enabled = boolean(default=true)
[update]
autoCheck = boolean(default=true)
[inputComposition]
autoReportAllCandidates = boolean(default=True)
announceSelectedCandidate = boolean(default=True)
alwaysIncludeShortCharacterDescriptionInCandidateName = boolean(default=True)
reportReadingStringChanges = boolean(default=True)
reportCompositionStringChanges = boolean(default=True)
[debugLog]
hwIo = boolean(default=false)
audioDucking = boolean(default=false)
[upgrade]
newLaptopKeyboardLayout = boolean(default=false)
"""
), list_values=False, encoding="UTF-8")
confspec.newlines = "\r\n"
#: The active configuration, C{None} if it has not yet been loaded.
#: @type: ConfigObj
conf = None
def initialize():
global conf
conf = ConfigManager()
def save():
"""
@deprecated: Use C{conf.save} instead.
"""
import warnings
warnings.warn("config.save deprecated. Use config.conf.save instead.",
DeprecationWarning, 2)
conf.save()
def saveOnExit():
"""Save the configuration if configured to save on exit.
This should only be called if NVDA is about to exit.
Errors are ignored.
"""
if conf["general"]["saveConfigurationOnExit"]:
try:
conf.save()
except:
pass
def isInstalledCopy():
"""Checks to see if this running copy of NVDA is installed on the system"""
try:
k=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,"SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\NVDA")
instDir=_winreg.QueryValueEx(k,"UninstallDirectory")[0]
except WindowsError:
return False
_winreg.CloseKey(k)
try:
return os.stat(instDir)==os.stat(os.getcwdu())
except WindowsError:
return False
def getInstalledUserConfigPath():
try:
return os.path.join(shlobj.SHGetFolderPath(0, shlobj.CSIDL_APPDATA), "nvda")
except WindowsError:
return None
def getUserDefaultConfigPath(useInstalledPathIfExists=False):
"""Get the default path for the user configuration directory.
This is the default path and doesn't reflect overriding from the command line,
which includes temporary copies.
Most callers will want the C{globalVars.appArgs.configPath variable} instead.
"""
installedUserConfigPath=getInstalledUserConfigPath()
if installedUserConfigPath and (isInstalledCopy() or (useInstalledPathIfExists and os.path.isdir(installedUserConfigPath))):
return installedUserConfigPath
return u'.\\userConfig\\'
def getSystemConfigPath():
if isInstalledCopy():
try:
return os.path.join(shlobj.SHGetFolderPath(0, shlobj.CSIDL_COMMON_APPDATA), "nvda")
except WindowsError:
pass
return None
def initConfigPath(configPath=None):
"""
Creates the current configuration path if it doesn't exist. Also makes sure that various sub directories also exist.
@param configPath: an optional path which should be used instead (only useful when being called from outside of NVDA)
@type configPath: basestring
"""
if not configPath:
configPath=globalVars.appArgs.configPath
if not os.path.isdir(configPath):
os.makedirs(configPath)
for subdir in ("addons", "appModules","brailleDisplayDrivers","speechDicts","synthDrivers","globalPlugins","profiles"):
subdir=os.path.join(configPath,subdir)
if not os.path.isdir(subdir):
os.makedirs(subdir)
RUN_REGKEY = ur"SOFTWARE\Microsoft\Windows\CurrentVersion\Run"
def getStartAfterLogon():
if (easeOfAccess.isSupported and easeOfAccess.canConfigTerminateOnDesktopSwitch
and easeOfAccess.willAutoStart(_winreg.HKEY_CURRENT_USER)):
return True
try:
k = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, RUN_REGKEY)
val = _winreg.QueryValueEx(k, u"nvda")[0]
return os.stat(val) == os.stat(sys.argv[0])
except (WindowsError, OSError):
return False
def setStartAfterLogon(enable):
if getStartAfterLogon() == enable:
return
if easeOfAccess.isSupported and easeOfAccess.canConfigTerminateOnDesktopSwitch:
easeOfAccess.setAutoStart(_winreg.HKEY_CURRENT_USER, enable)
if enable:
return
# We're disabling, so ensure the run key is cleared,
# as it might have been set by an old version.
run = False
else:
run = enable
k = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, RUN_REGKEY, 0, _winreg.KEY_WRITE)
if run:
_winreg.SetValueEx(k, u"nvda", None, _winreg.REG_SZ, sys.argv[0])
else:
try:
_winreg.DeleteValue(k, u"nvda")
except WindowsError:
pass
SERVICE_FILENAME = u"nvda_service.exe"
def isServiceInstalled():
if not os.path.isfile(SERVICE_FILENAME):
return False
try:
k = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, ur"SYSTEM\CurrentControlSet\Services\nvda")
val = _winreg.QueryValueEx(k, u"ImagePath")[0].replace(u'"', u'')
return os.stat(val) == os.stat(SERVICE_FILENAME)
except (WindowsError, OSError):
return False
def canStartOnSecureScreens():
return isInstalledCopy() and (easeOfAccess.isSupported or isServiceInstalled())
def execElevated(path, params=None, wait=False,handleAlreadyElevated=False):
import subprocess
import shellapi
import winUser
if params is not None:
params = subprocess.list2cmdline(params)
sei = shellapi.SHELLEXECUTEINFO(lpFile=os.path.abspath(path), lpParameters=params, nShow=winUser.SW_HIDE)
#IsUserAnAdmin is apparently deprecated so may not work above Windows 8
if not handleAlreadyElevated or not ctypes.windll.shell32.IsUserAnAdmin():
sei.lpVerb=u"runas"
if wait:
sei.fMask = shellapi.SEE_MASK_NOCLOSEPROCESS
shellapi.ShellExecuteEx(sei)
if wait:
try:
h=ctypes.wintypes.HANDLE(sei.hProcess)
msg=ctypes.wintypes.MSG()
while ctypes.windll.user32.MsgWaitForMultipleObjects(1,ctypes.byref(h),False,-1,255)==1:
while ctypes.windll.user32.PeekMessageW(ctypes.byref(msg),None,0,0,1):
ctypes.windll.user32.TranslateMessage(ctypes.byref(msg))
ctypes.windll.user32.DispatchMessageW(ctypes.byref(msg))
return winKernel.GetExitCodeProcess(sei.hProcess)
finally:
winKernel.closeHandle(sei.hProcess)
SLAVE_FILENAME = u"nvda_slave.exe"
NVDA_REGKEY = ur"SOFTWARE\NVDA"
def getStartOnLogonScreen():
if easeOfAccess.isSupported and easeOfAccess.willAutoStart(_winreg.HKEY_LOCAL_MACHINE):
return True
try:
k = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, NVDA_REGKEY)
return bool(_winreg.QueryValueEx(k, u"startOnLogonScreen")[0])
except WindowsError:
return False
def _setStartOnLogonScreen(enable):
if easeOfAccess.isSupported:
# The installer will have migrated service config to EoA if appropriate,
# so we only need to deal with EoA here.
easeOfAccess.setAutoStart(_winreg.HKEY_LOCAL_MACHINE, enable)
else:
k = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, NVDA_REGKEY, 0, _winreg.KEY_WRITE)
_winreg.SetValueEx(k, u"startOnLogonScreen", None, _winreg.REG_DWORD, int(enable))
def setSystemConfigToCurrentConfig():
fromPath=os.path.abspath(globalVars.appArgs.configPath)
if ctypes.windll.shell32.IsUserAnAdmin():
_setSystemConfig(fromPath)
else:
res=execElevated(SLAVE_FILENAME, (u"setNvdaSystemConfig", fromPath), wait=True)
if res==2:
raise installer.RetriableFailure
elif res!=0:
raise RuntimeError("Slave failure")
def _setSystemConfig(fromPath):
import installer
toPath=os.path.join(sys.prefix.decode('mbcs'),'systemConfig')
if os.path.isdir(toPath):
installer.tryRemoveFile(toPath)
for curSourceDir,subDirs,files in os.walk(fromPath):
if curSourceDir==fromPath:
curDestDir=toPath
else:
curDestDir=os.path.join(toPath,os.path.relpath(curSourceDir,fromPath))
if not os.path.isdir(curDestDir):
os.makedirs(curDestDir)
for f in files:
sourceFilePath=os.path.join(curSourceDir,f)
destFilePath=os.path.join(curDestDir,f)
installer.tryCopyFile(sourceFilePath,destFilePath)
def setStartOnLogonScreen(enable):
if getStartOnLogonScreen() == enable:
return
try:
# Try setting it directly.
_setStartOnLogonScreen(enable)
except WindowsError:
# We probably don't have admin privs, so we need to elevate to do this using the slave.
if execElevated(SLAVE_FILENAME, (u"config_setStartOnLogonScreen", u"%d" % enable), wait=True) != 0:
raise RuntimeError("Slave failed to set startOnLogonScreen")
def getConfigDirs(subpath=None):
"""Retrieve all directories that should be used when searching for configuration.
IF C{subpath} is provided, it will be added to each directory returned.
@param subpath: The path to be added to each directory, C{None} for none.
@type subpath: str
@return: The configuration directories in the order in which they should be searched.
@rtype: list of str
"""
return [os.path.join(dir, subpath) if subpath else dir
for dir in (globalVars.appArgs.configPath,)
]
def addConfigDirsToPythonPackagePath(module, subdir=None):
"""Add the configuration directories to the module search path (__path__) of a Python package.
C{subdir} is added to each configuration directory. It defaults to the name of the Python package.
@param module: The root module of the package.
@type module: module
@param subdir: The subdirectory to be used, C{None} for the name of C{module}.
@type subdir: str
"""
if globalVars.appArgs.disableAddons:
return
if not subdir:
subdir = module.__name__
# Python 2.x doesn't properly handle unicode import paths, so convert them.
dirs = [dir.encode("mbcs") for dir in getConfigDirs(subdir)]
dirs.extend(module.__path__ )
module.__path__ = dirs
# FIXME: this should not be coupled to the config module....
import addonHandler
for addon in addonHandler.getRunningAddons():
addon.addToPackagePath(module)
class ConfigManager(object):
"""Manages and provides access to configuration.
In addition to the base configuration, there can be multiple active configuration profiles.
Settings in more recently activated profiles take precedence,
with the base configuration being consulted last.
This allows a profile to override settings in profiles activated earlier and the base configuration.
A profile need only include a subset of the available settings.
Changed settings are written to the most recently activated profile.
"""
#: Sections that only apply to the base configuration;
#: i.e. they cannot be overridden in profiles.
BASE_ONLY_SECTIONS = {"general", "update", "upgrade"}
def __init__(self):
self.spec = confspec
#: All loaded profiles by name.
self._profileCache = {}
#: The active profiles.
self.profiles = []
#: Whether profile triggers are enabled (read-only).
#: @type: bool
self.profileTriggersEnabled = True
self.validator = val
self.rootSection = None
self._shouldHandleProfileSwitch = True
self._pendingHandleProfileSwitch = False
self._suspendedTriggers = None
self._initBaseConf()
#: Maps triggers to profiles.
self.triggersToProfiles = None
self._loadProfileTriggers()
#: The names of all profiles that have been modified since they were last saved.
self._dirtyProfiles = set()
def _handleProfileSwitch(self):
if not self._shouldHandleProfileSwitch:
self._pendingHandleProfileSwitch = True
return
init = self.rootSection is None
# Reset the cache.
self.rootSection = AggregatedSection(self, (), self.spec, self.profiles)
if init:
# We're still initialising, so don't notify anyone about this change.
return
import synthDriverHandler
synthDriverHandler.handleConfigProfileSwitch()
import braille
braille.handler.handleConfigProfileSwitch()
import audioDucking
audioDucking.handleConfigProfileSwitch()
def _initBaseConf(self, factoryDefaults=False):
fn = os.path.join(globalVars.appArgs.configPath, "nvda.ini")
if factoryDefaults:
profile = ConfigObj(None, indent_type="\t", encoding="UTF-8")
profile.filename = fn
else:
try:
profile = ConfigObj(fn, indent_type="\t", encoding="UTF-8")
self.baseConfigError = False
except:
log.error("Error loading base configuration", exc_info=True)
self.baseConfigError = True
return self._initBaseConf(factoryDefaults=True)
# Python converts \r\n to \n when reading files in Windows, so ConfigObj can't determine the true line ending.
profile.newlines = "\r\n"
for key in self.BASE_ONLY_SECTIONS:
# These sections are returned directly from the base config, so validate them here.
try:
sect = profile[key]
except KeyError:
profile[key] = {}
# ConfigObj mutates this into a configobj.Section.
sect = profile[key]
sect.configspec = self.spec[key]
profile.validate(self.validator, section=sect)
self._profileCache[None] = profile
self.profiles.append(profile)
self._handleProfileSwitch()
def __getitem__(self, key):
if key in self.BASE_ONLY_SECTIONS:
# Return these directly from the base configuration.
return self.profiles[0][key]
return self.rootSection[key]
def __contains__(self, key):
return key in self.rootSection
def get(self, key, default=None):
return self.rootSection.get(key, default)
def __setitem__(self, key, val):
self.rootSection[key] = val
def listProfiles(self):
for name in os.listdir(os.path.join(globalVars.appArgs.configPath, "profiles")):
name, ext = os.path.splitext(name)
if ext == ".ini":
yield name
def _getProfileFn(self, name):
return os.path.join(globalVars.appArgs.configPath, "profiles", name + ".ini")
def _getProfile(self, name, load=True):
try:
return self._profileCache[name]
except KeyError:
if not load:
raise KeyError(name)
# Load the profile.
fn = self._getProfileFn(name)
profile = ConfigObj(fn, indent_type="\t", encoding="UTF-8", file_error=True)
# Python converts \r\n to \n when reading files in Windows, so ConfigObj can't determine the true line ending.
profile.newlines = "\r\n"
profile.name = name
profile.manual = False
profile.triggered = False
self._profileCache[name] = profile
return profile
def getProfile(self, name):
"""Get a profile given its name.
This is useful for checking whether a profile has been manually activated or triggered.
@param name: The name of the profile.
@type name: basestring
@return: The profile object.
@raise KeyError: If the profile is not loaded.
"""
return self._getProfile(name, load=False)
def manualActivateProfile(self, name):
"""Manually activate a profile.
Only one profile can be manually active at a time.
If another profile was manually activated, deactivate it first.
If C{name} is C{None}, a profile will not be activated.
@param name: The name of the profile or C{None} for no profile.
@type name: basestring
"""
if len(self.profiles) > 1:
profile = self.profiles[-1]
if profile.manual:
del self.profiles[-1]
profile.manual = False
if name:
profile = self._getProfile(name)
profile.manual = True
self.profiles.append(profile)
self._handleProfileSwitch()
def _markWriteProfileDirty(self):
if len(self.profiles) == 1:
# There's nothing other than the base config, which is always saved anyway.
return
self._dirtyProfiles.add(self.profiles[-1].name)
def save(self):
"""Save all modified profiles and the base configuration to disk.
"""
if globalVars.appArgs.secure:
# Never save the config if running securely.
return
try:
self.profiles[0].write()
log.info("Base configuration saved")
for name in self._dirtyProfiles:
self._profileCache[name].write()
log.info("Saved configuration profile %s" % name)
self._dirtyProfiles.clear()
except Exception as e:
log.warning("Error saving configuration; probably read only file system")
log.debugWarning("", exc_info=True)
raise e
def reset(self, factoryDefaults=False):
"""Reset the configuration to saved settings or factory defaults.
@param factoryDefaults: C{True} to reset to factory defaults, C{False} to reset to saved configuration.
@type factoryDefaults: bool
"""
self.profiles = []
self._profileCache.clear()
# Signal that we're initialising.
self.rootSection = None
self._initBaseConf(factoryDefaults=factoryDefaults)
def createProfile(self, name):
"""Create a profile.
@param name: The name of the profile ot create.
@type name: basestring
@raise ValueError: If a profile with this name already exists.
"""
if globalVars.appArgs.secure:
return
fn = self._getProfileFn(name)
if os.path.isfile(fn):
raise ValueError("A profile with the same name already exists: %s" % name)
# Just create an empty file to make sure we can.
file(fn, "w")
def deleteProfile(self, name):
"""Delete a profile.
@param name: The name of the profile to delete.
@type name: basestring
@raise LookupError: If the profile doesn't exist.
"""
if globalVars.appArgs.secure:
return
fn = self._getProfileFn(name)
if not os.path.isfile(fn):
raise LookupError("No such profile: %s" % name)
os.remove(fn)
try:
del self._profileCache[name]
except KeyError:
pass
# Remove any triggers associated with this profile.
allTriggers = self.triggersToProfiles
# You can't delete from a dict while iterating through it.
delTrigs = [trigSpec for trigSpec, trigProfile in allTriggers.iteritems()
if trigProfile == name]
if delTrigs:
for trigSpec in delTrigs:
del allTriggers[trigSpec]
self.saveProfileTriggers()
# Check if this profile was active.
delProfile = None
for index in xrange(len(self.profiles) - 1, -1, -1):
profile = self.profiles[index]
if profile.name == name:
# Deactivate it.
del self.profiles[index]
delProfile = profile
if not delProfile:
return
self._handleProfileSwitch()
if self._suspendedTriggers:
# Remove any suspended triggers referring to this profile.
for trigger in self._suspendedTriggers.keys():
if trigger._profile == delProfile:
del self._suspendedTriggers[trigger]
def renameProfile(self, oldName, newName):
"""Rename a profile.
@param oldName: The current name of the profile.
@type oldName: basestring
@param newName: The new name for the profile.
@type newName: basestring
@raise LookupError: If the profile doesn't exist.
@raise ValueError: If a profile with the new name already exists.
"""
if globalVars.appArgs.secure:
return
if newName == oldName:
return
oldFn = self._getProfileFn(oldName)
newFn = self._getProfileFn(newName)
if not os.path.isfile(oldFn):
raise LookupError("No such profile: %s" % oldName)
# Windows file names are case insensitive,
# so only test for file existence if the names don't match case insensitively.
if oldName.lower() != newName.lower() and os.path.isfile(newFn):
raise ValueError("A profile with the same name already exists: %s" % newName)
os.rename(oldFn, newFn)
# Update any associated triggers.
allTriggers = self.triggersToProfiles
saveTrigs = False
for trigSpec, trigProfile in allTriggers.iteritems():
if trigProfile == oldName:
allTriggers[trigSpec] = newName
saveTrigs = True
if saveTrigs:
self.saveProfileTriggers()
try:
profile = self._profileCache.pop(oldName)
except KeyError:
# The profile hasn't been loaded, so there's nothing more to do.
return
profile.name = newName
self._profileCache[newName] = profile
try:
self._dirtyProfiles.remove(oldName)
except KeyError:
# The profile wasn't dirty.
return
self._dirtyProfiles.add(newName)
def _triggerProfileEnter(self, trigger):
"""Called by L{ProfileTrigger.enter}}}.
"""
if not self.profileTriggersEnabled:
return
if self._suspendedTriggers is not None:
self._suspendedTriggers[trigger] = "enter"
return
try:
profile = trigger._profile = self._getProfile(trigger.profileName)
except:
trigger._profile = None
raise
profile.triggered = True
if len(self.profiles) > 1 and self.profiles[-1].manual:
# There's a manually activated profile.
# Manually activated profiles must be at the top of the stack, so insert this one below.
self.profiles.insert(-1, profile)
else:
self.profiles.append(profile)
self._handleProfileSwitch()
def _triggerProfileExit(self, trigger):
"""Called by L{ProfileTrigger.exit}}}.
"""
if not self.profileTriggersEnabled:
return
if self._suspendedTriggers is not None:
if trigger in self._suspendedTriggers:
# This trigger was entered and is now being exited.
# These cancel each other out.
del self._suspendedTriggers[trigger]
else:
self._suspendedTriggers[trigger] = "exit"
return
profile = trigger._profile
if profile is None:
return
profile.triggered = False
try:
self.profiles.remove(profile)
except ValueError:
# This is probably due to the user resetting the configuration.
log.debugWarning("Profile not active when exiting trigger")
return
self._handleProfileSwitch()
@contextlib.contextmanager
def atomicProfileSwitch(self):
"""Indicate that multiple profile switches should be treated as one.
This is useful when multiple triggers may be exited/entered at once;
e.g. when switching applications.
While multiple switches aren't harmful, they might take longer;
e.g. unnecessarily switching speech synthesizers or braille displays.
This is a context manager to be used with the C{with} statement.
"""
self._shouldHandleProfileSwitch = False
try:
yield
finally:
self._shouldHandleProfileSwitch = True
if self._pendingHandleProfileSwitch:
self._handleProfileSwitch()
self._pendingHandleProfileSwitch = False
def suspendProfileTriggers(self):
"""Suspend handling of profile triggers.
Any triggers that currently apply will continue to apply.
Subsequent enters or exits will not apply until triggers are resumed.
@see: L{resumeTriggers}
"""
if self._suspendedTriggers is not None:
return
self._suspendedTriggers = OrderedDict()
def resumeProfileTriggers(self):
"""Resume handling of profile triggers after previous suspension.
Any trigger enters or exits that occurred while triggers were suspended will be applied.
Trigger handling will then return to normal.
@see: L{suspendTriggers}
"""
if self._suspendedTriggers is None:
return
triggers = self._suspendedTriggers
self._suspendedTriggers = None
with self.atomicProfileSwitch():
for trigger, action in triggers.iteritems():
trigger.enter() if action == "enter" else trigger.exit()
def disableProfileTriggers(self):
"""Temporarily disable all profile triggers.
Any triggered profiles will be deactivated and subsequent triggers will not apply.
Call L{enableTriggers} to re-enable triggers.
"""
if not self.profileTriggersEnabled:
return
self.profileTriggersEnabled = False
for profile in self.profiles[1:]:
profile.triggered = False
if len(self.profiles) > 1 and self.profiles[-1].manual:
del self.profiles[1:-1]
else:
del self.profiles[1:]
self._suspendedTriggers = None
self._handleProfileSwitch()
def enableProfileTriggers(self):
"""Re-enable profile triggers after they were previously disabled.
"""
self.profileTriggersEnabled = True
def _loadProfileTriggers(self):
fn = os.path.join(globalVars.appArgs.configPath, "profileTriggers.ini")
try:
cobj = ConfigObj(fn, indent_type="\t", encoding="UTF-8")
except:
log.error("Error loading profile triggers", exc_info=True)
cobj = ConfigObj(None, indent_type="\t", encoding="UTF-8")
cobj.filename = fn
# Python converts \r\n to \n when reading files in Windows, so ConfigObj can't determine the true line ending.
cobj.newlines = "\r\n"
try:
self.triggersToProfiles = cobj["triggersToProfiles"]
except KeyError:
cobj["triggersToProfiles"] = {}
# ConfigObj will have mutated this into a configobj.Section.
self.triggersToProfiles = cobj["triggersToProfiles"]
def saveProfileTriggers(self):
"""Save profile trigger information to disk.
This should be called whenever L{profilesToTriggers} is modified.
"""
if globalVars.appArgs.secure:
# Never save if running securely.
return
self.triggersToProfiles.parent.write()
log.info("Profile triggers saved")
class AggregatedSection(object):
"""A view of a section of configuration which aggregates settings from all active profiles.
"""
def __init__(self, manager, path, spec, profiles):
self.manager = manager
self.path = path
self._spec = spec
#: The relevant section in all of the profiles.
self.profiles = profiles
self._cache = {}
def __getitem__(self, key):
# Try the cache first.
try:
val = self._cache[key]
except KeyError:
pass
else:
if val is KeyError:
# We know there's no such setting.
raise KeyError(key)
return val
spec = self._spec.get(key)
foundSection = False
if isinstance(spec, dict):
foundSection = True
# Walk through the profiles looking for the key.
# If it's a section, collect that section from all profiles.
subProfiles = []
for profile in reversed(self.profiles):
try:
val = profile[key]
except (KeyError, TypeError):
# Indicate that this key doesn't exist in this profile.
subProfiles.append(None)
continue
if isinstance(val, dict):
foundSection = True
subProfiles.append(val)
else:
# This is a setting.
return self._cacheLeaf(key, spec, val)
subProfiles.reverse()
if not foundSection and spec:
# This might have a default.
try:
val = self.manager.validator.get_default_value(spec)
except KeyError:
pass
else:
self._cache[key] = val
return val
if not foundSection:
# The key doesn't exist, so cache this fact.
self._cache[key] = KeyError
raise KeyError(key)
if spec is None:
# Create this section in the config spec.
self._spec[key] = {}
# ConfigObj might have mutated this into a configobj.Section.
spec = self._spec[key]
sect = self._cache[key] = AggregatedSection(self.manager, self.path + (key,), spec, subProfiles)
return sect
def __contains__(self, key):
try:
self[key]
return True
except KeyError:
return False
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def isSet(self, key):
"""Check whether a given key has been explicitly set.
This is sometimes useful because it can return C{False} even if there is a default for the key.
@return: C{True} if the key has been explicitly set, C{False} if not.
@rtype: bool
"""
for profile in self.profiles:
if not profile:
continue
if key in profile:
return True
return False
def _cacheLeaf(self, key, spec, val):
if spec:
# Validate and convert the value.
val = self.manager.validator.check(spec, val)
self._cache[key] = val
return val
def iteritems(self):
keys = set()
# Start with the cached items.
for key, val in self._cache.iteritems():
keys.add(key)
if val is not KeyError:
yield key, val
# Walk through the profiles and spec looking for items not yet cached.
for profile in itertools.chain(reversed(self.profiles), (self._spec,)):
if not profile:
continue
for key in profile:
if key in keys:
continue
keys.add(key)
# Use __getitem__ so caching, AggregatedSections, etc. are handled.
try:
yield key, self[key]
except KeyError:
# This could happen if the item is in the spec but there's no default.
pass
def copy(self):
return dict(self.iteritems())
def __setitem__(self, key, val):
spec = self._spec.get(key) if self.spec else None
if isinstance(spec, dict) and not isinstance(val, dict):
raise ValueError("Value must be a section")
if isinstance(spec, dict) or isinstance(val, dict):
# The value is a section.
# Update the profile.
updateSect = self._getUpdateSection()
updateSect[key] = val
self.manager._markWriteProfileDirty()
# ConfigObj will have mutated this into a configobj.Section.
val = updateSect[key]
cache = self._cache.get(key)
if cache and cache is not KeyError:
# An AggregatedSection has already been cached, so update it.
cache = self._cache[key]
cache.profiles[-1] = val
cache._cache.clear()
elif cache is KeyError:
# This key now exists, so remove the cached non-existence.
del self._cache[key]
# If an AggregatedSection isn't already cached,
# An appropriate AggregatedSection will be created the next time this section is fetched.
return
if spec:
# Validate and convert the value.
val = self.manager.validator.check(spec, val)
try:
curVal = self[key]
except KeyError:
pass
else:
if val == curVal:
# The value isn't different, so there's nothing to do.
return
# Set this value in the most recently activated profile.
self._getUpdateSection()[key] = val
self.manager._markWriteProfileDirty()
self._cache[key] = val
def _getUpdateSection(self):
profile = self.profiles[-1]
if profile is not None:
# This section already exists in the profile.
return profile
section = self.manager.rootSection
profile = section.profiles[-1]
for part in self.path:
parentProfile = profile
section = section[part]
profile = section.profiles[-1]
if profile is None:
# This section doesn't exist in the profile yet.
# Create it and update the AggregatedSection.
parentProfile[part] = {}
# ConfigObj might have mutated this into a configobj.Section.
profile = section.profiles[-1] = parentProfile[part]
return profile
@property
def spec(self):
return self._spec
@spec.setter
def spec(self, val):
# This section is being replaced.
# Clear it and replace the content so it remains linked to the main spec.
self._spec.clear()
self._spec.update(val)
class ProfileTrigger(object):
"""A trigger for automatic activation/deactivation of a configuration profile.
The user can associate a profile with a trigger.
When the trigger applies, the associated profile is activated.
When the trigger no longer applies, the profile is deactivated.
L{spec} is a string used to search for this trigger and must be implemented.
To signal that this trigger applies, call L{enter}.
To signal that it no longer applies, call L{exit}.
Alternatively, you can use this object as a context manager via the with statement;
i.e. this trigger will apply only inside the with block.
"""
@baseObject.Getter
def spec(self):
"""The trigger specification.
This is a string used to search for this trigger in the user's configuration.
@rtype: basestring
"""
raise NotImplementedError
def enter(self):
"""Signal that this trigger applies.
The associated profile (if any) will be activated.
"""
try:
self.profileName = conf.triggersToProfiles[self.spec]
except KeyError:
self.profileName = None
return
try:
conf._triggerProfileEnter(self)
except:
log.error("Error entering trigger %s, profile %s"
% (self.spec, self.profileName), exc_info=True)
__enter__ = enter
def exit(self):
"""Signal that this trigger no longer applies.
The associated profile (if any) will be deactivated.
"""
if not self.profileName:
return
try:
conf._triggerProfileExit(self)
except:
log.error("Error exiting trigger %s, profile %s"
% (self.spec, self.profileName), exc_info=True)
def __exit__(self, excType, excVal, traceback):
self.exit()
TokenUIAccess = 26
def hasUiAccess():
token = ctypes.wintypes.HANDLE()
ctypes.windll.advapi32.OpenProcessToken(ctypes.windll.kernel32.GetCurrentProcess(),
winKernel.MAXIMUM_ALLOWED, ctypes.byref(token))
try:
val = ctypes.wintypes.DWORD()
ctypes.windll.advapi32.GetTokenInformation(token, TokenUIAccess,
ctypes.byref(val), ctypes.sizeof(ctypes.wintypes.DWORD),
ctypes.byref(ctypes.wintypes.DWORD()))
return bool(val.value)
finally:
ctypes.windll.kernel32.CloseHandle(token)
| 1 | 18,205 | Please rename [speechView] to [speechViewer]. | nvaccess-nvda | py |
@@ -20,6 +20,12 @@ type CpuUsage struct {
// Total CPU time consumed per core.
// Units: nanoseconds.
PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
+ // CPU time consumed per core in kernel mode
+ // Units: nanoseconds.
+ PercpuUsageInKernelmode []uint64 `json:"percpu_usage_in_kernelmode"`
+ // CPU time consumed per core in user mode
+ // Units: nanoseconds.
+ PercpuUsageInUsermode []uint64 `json:"percpu_usage_in_usermode"`
// Time spent by tasks of the cgroup in kernel mode.
// Units: nanoseconds.
UsageInKernelmode uint64 `json:"usage_in_kernelmode"` | 1 | // +build linux
package cgroups
type ThrottlingData struct {
// Number of periods with throttling active
Periods uint64 `json:"periods,omitempty"`
// Number of periods when the container hit its throttling limit.
ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
// Aggregate time the container was throttled for in nanoseconds.
ThrottledTime uint64 `json:"throttled_time,omitempty"`
}
// CpuUsage denotes the usage of a CPU.
// All CPU stats are aggregate since container inception.
type CpuUsage struct {
// Total CPU time consumed.
// Units: nanoseconds.
TotalUsage uint64 `json:"total_usage,omitempty"`
// Total CPU time consumed per core.
// Units: nanoseconds.
PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
// Time spent by tasks of the cgroup in kernel mode.
// Units: nanoseconds.
UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
// Time spent by tasks of the cgroup in user mode.
// Units: nanoseconds.
UsageInUsermode uint64 `json:"usage_in_usermode"`
}
type CpuStats struct {
CpuUsage CpuUsage `json:"cpu_usage,omitempty"`
ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
}
type MemoryData struct {
Usage uint64 `json:"usage,omitempty"`
MaxUsage uint64 `json:"max_usage,omitempty"`
Failcnt uint64 `json:"failcnt"`
Limit uint64 `json:"limit"`
}
type MemoryStats struct {
// memory used for cache
Cache uint64 `json:"cache,omitempty"`
// usage of memory
Usage MemoryData `json:"usage,omitempty"`
// usage of memory + swap
SwapUsage MemoryData `json:"swap_usage,omitempty"`
// usage of kernel memory
KernelUsage MemoryData `json:"kernel_usage,omitempty"`
// usage of kernel TCP memory
KernelTCPUsage MemoryData `json:"kernel_tcp_usage,omitempty"`
// usage of memory pages by NUMA node
// see chapter 5.6 of memory controller documentation
PageUsageByNUMA PageUsageByNUMA `json:"page_usage_by_numa,omitempty"`
// if true, memory usage is accounted for throughout a hierarchy of cgroups.
UseHierarchy bool `json:"use_hierarchy"`
Stats map[string]uint64 `json:"stats,omitempty"`
}
type PageUsageByNUMA struct {
// Embedding is used as types can't be recursive.
PageUsageByNUMAInner
Hierarchical PageUsageByNUMAInner `json:"hierarchical,omitempty"`
}
type PageUsageByNUMAInner struct {
Total PageStats `json:"total,omitempty"`
File PageStats `json:"file,omitempty"`
Anon PageStats `json:"anon,omitempty"`
Unevictable PageStats `json:"unevictable,omitempty"`
}
type PageStats struct {
Total uint64 `json:"total,omitempty"`
Nodes map[uint8]uint64 `json:"nodes,omitempty"`
}
type PidsStats struct {
// number of pids in the cgroup
Current uint64 `json:"current,omitempty"`
// active pids hard limit
Limit uint64 `json:"limit,omitempty"`
}
type BlkioStatEntry struct {
Major uint64 `json:"major,omitempty"`
Minor uint64 `json:"minor,omitempty"`
Op string `json:"op,omitempty"`
Value uint64 `json:"value,omitempty"`
}
type BlkioStats struct {
// number of bytes tranferred to and from the block device
IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"`
IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"`
IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"`
IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"`
IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"`
IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"`
IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"`
SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"`
}
type HugetlbStats struct {
// current res_counter usage for hugetlb
Usage uint64 `json:"usage,omitempty"`
// maximum usage ever recorded.
MaxUsage uint64 `json:"max_usage,omitempty"`
// number of times hugetlb usage allocation failure.
Failcnt uint64 `json:"failcnt"`
}
type Stats struct {
CpuStats CpuStats `json:"cpu_stats,omitempty"`
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
PidsStats PidsStats `json:"pids_stats,omitempty"`
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
// the map is in the format "size of hugepage: stats of the hugepage"
HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"`
}
func NewStats() *Stats {
memoryStats := MemoryStats{Stats: make(map[string]uint64)}
hugetlbStats := make(map[string]HugetlbStats)
return &Stats{MemoryStats: memoryStats, HugetlbStats: hugetlbStats}
}
| 1 | 18,787 | I would use shorter yet still descriptive names, e.g. `KernelPerCpu` and `UserPerCpu`. | opencontainers-runc | go |
@@ -5017,7 +5017,7 @@ def print_record(recID, format='hb', ot='', ln=CFG_SITE_LANG, decompress=zlib.de
display_claim_this_paper = False
can_edit_record = False
- if check_user_can_edit_record(user_info, recID):
+ if not (format.lower().startswith('t')) and check_user_can_edit_record(user_info, recID):
can_edit_record = True
out = "" | 1 | # -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301,W0703
"""Invenio Search Engine in mod_python."""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
# import general modules:
import cgi
import cStringIO
import copy
import os
import re
import time
import string
import urllib
import urlparse
import zlib
import sys
try:
## import optional module:
import numpy
CFG_NUMPY_IMPORTABLE = True
except ImportError:
CFG_NUMPY_IMPORTABLE = False
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
# import Invenio stuff:
from invenio.config import \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_SCOAP3_SITE, \
CFG_OAI_ID_FIELD, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_CALL_BIBFORMAT, \
CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX, \
CFG_WEBSEARCH_FIELDS_CONVERT, \
CFG_WEBSEARCH_NB_RECORDS_TO_SORT, \
CFG_WEBSEARCH_SEARCH_CACHE_SIZE, \
CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS, \
CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_WEBSEARCH_FULLTEXT_SNIPPETS, \
CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \
CFG_WEBSEARCH_SYNONYM_KBRS, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_LOGDIR, \
CFG_BIBFORMAT_HIDDEN_TAGS, \
CFG_BIBFORMAT_HIDDEN_RECJSON_FIELDS, \
CFG_SITE_URL, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SOLR_URL, \
CFG_WEBSEARCH_DETAILED_META_FORMAT, \
CFG_SITE_RECORD, \
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT, \
CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY, \
CFG_BIBSORT_BUCKETS, \
CFG_BIBSORT_ENABLED, \
CFG_XAPIAN_ENABLED, \
CFG_BIBINDEX_CHARS_PUNCTUATION, \
CFG_BASE_URL, \
CFG_WEBSEARCH_MAX_RECORDS_REFERSTO, \
CFG_WEBSEARCH_MAX_RECORDS_CITEDBY
from invenio.search_engine_config import \
InvenioWebSearchUnknownCollectionError, \
InvenioWebSearchWildcardLimitError, \
InvenioWebSearchReferstoLimitError, \
InvenioWebSearchCitedbyLimitError, \
CFG_WEBSEARCH_IDXPAIRS_FIELDS,\
CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH, \
CFG_WEBSEARCH_BLACKLISTED_FORMATS
from invenio.search_engine_utils import (get_fieldvalues,
get_fieldvalues_alephseq_like,
record_exists)
from invenio.bibrecord import create_record, record_xml_output
from invenio.bibrank_record_sorter import (get_bibrank_methods,
is_method_valid,
rank_records as rank_records_bibrank,
rank_by_citations)
from invenio.bibrank_downloads_similarity import register_page_view_event, calculate_reading_similarity_list
from invenio.bibindex_engine_stemmer import stem
from invenio.bibindex_tokenizers.BibIndexDefaultTokenizer import BibIndexDefaultTokenizer
from invenio.bibindex_tokenizers.BibIndexCJKTokenizer import BibIndexCJKTokenizer, is_there_any_CJK_character_in_text
from invenio.bibindex_engine_utils import author_name_requires_phrase_search, \
get_field_tags
from invenio.bibindex_engine_washer import wash_index_term, lower_index_term, wash_author_name
from invenio.bibindex_engine_config import CFG_BIBINDEX_SYNONYM_MATCH_TYPE
from invenio.bibindex_engine_utils import get_idx_indexer
from invenio.bibformat import format_record, format_records, get_output_format_content_type, create_excel
from invenio.bibrank_downloads_grapher import create_download_history_graph_and_box
from invenio.bibknowledge import get_kbr_values
from invenio.data_cacher import DataCacher
from invenio.websearch_external_collections import print_external_results_overview, perform_external_collection_search
from invenio.access_control_admin import acc_get_action_id
from invenio.access_control_config import VIEWRESTRCOLL, \
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS, \
CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
from invenio.websearchadminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
from invenio.intbitset import intbitset
from invenio.dbquery import DatabaseError, deserialize_via_marshal, InvenioDbQueryWildcardLimitError
from invenio.access_control_engine import acc_authorize_action
from invenio.errorlib import register_exception
from invenio.textutils import encode_for_xml, wash_for_utf8, strip_accents
from invenio.htmlutils import get_mathjax_header
from invenio.htmlutils import nmtoken_from_string
from invenio import bibrecord
import invenio.template
webstyle_templates = invenio.template.load('webstyle')
webcomment_templates = invenio.template.load('webcomment')
from invenio.bibrank_citation_searcher import calculate_cited_by_list, \
calculate_co_cited_with_list, get_records_with_num_cites, \
get_refersto_hitset, get_citedby_hitset, get_cited_by_list, \
get_refers_to_list, get_citers_log
from invenio.bibrank_citation_grapher import create_citation_history_graph_and_box
from invenio.bibrank_selfcites_searcher import get_self_cited_by_list, \
get_self_cited_by, \
get_self_refers_to_list
from invenio.dbquery import run_sql, \
run_sql_with_limit, \
wash_table_column_name, \
get_table_update_time
from invenio.webuser import getUid, collect_user_info, session_param_set
from invenio.webpage import pageheaderonly, pagefooteronly, create_error_box, write_warning
from invenio.messages import gettext_set_language
from invenio.search_engine_query_parser import SearchQueryParenthesisedParser, \
SpiresToInvenioSyntaxConverter
from invenio import webinterface_handler_config as apache
from invenio.solrutils_bibindex_searcher import solr_get_bitset
from invenio.xapianutils_bibindex_searcher import xapian_get_bitset
from invenio.websearch_services import \
get_search_services, \
CFG_WEBSEARCH_SERVICE_MAX_SERVICE_ANSWER_RELEVANCE, \
CFG_WEBSEARCH_SERVICE_MAX_NB_SERVICE_DISPLAY, \
CFG_WEBSEARCH_SERVICE_MIN_RELEVANCE_TO_DISPLAY, \
CFG_WEBSEARCH_SERVICE_MAX_RELEVANCE_DIFFERENCE
try:
import invenio.template
websearch_templates = invenio.template.load('websearch')
except:
pass
from invenio.websearch_external_collections import calculate_hosted_collections_results, do_calculate_hosted_collections_results
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH
from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
from invenio.bibauthorid_config import LIMIT_TO_COLLECTIONS as BIBAUTHORID_LIMIT_TO_COLLECTIONS
websearch_templates = invenio.template.load('websearch')
VIEWRESTRCOLL_ID = acc_get_action_id(VIEWRESTRCOLL)
# global vars:
cfg_nb_browse_seen_records = 100 # limit of the number of records to check when browsing certain collection
cfg_nicely_ordered_collection_list = 0 # do we propose collection list nicely ordered or alphabetical?
# precompile some often-used regexp for speed reasons:
re_word = re.compile(r'[\s]')
re_quotes = re.compile('[\'\"]')
re_doublequote = re.compile('\"')
re_logical_and = re.compile(r'\sand\s', re.I)
re_logical_or = re.compile(r'\sor\s', re.I)
re_logical_not = re.compile(r'\snot\s', re.I)
re_operators = re.compile(r'\s([\+\-\|])\s')
re_pattern_wildcards_after_spaces = re.compile(r'(\s)[\*\%]+')
re_pattern_single_quotes = re.compile("'(.*?)'")
re_pattern_double_quotes = re.compile("\"(.*?)\"")
re_pattern_parens_quotes = re.compile(r'[\'\"]{1}[^\'\"]*(\([^\'\"]*\))[^\'\"]*[\'\"]{1}')
re_pattern_regexp_quotes = re.compile(r"\/(.*?)\/")
re_pattern_spaces_after_colon = re.compile(r'(:\s+)')
re_pattern_short_words = re.compile(r'([\s\"]\w{1,3})[\*\%]+')
re_pattern_space = re.compile("__SPACE__")
re_pattern_today = re.compile(r"\$TODAY\$")
re_pattern_parens = re.compile(r'\([^\)]+\s+[^\)]+\)')
re_punctuation_followed_by_space = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION + r'\s')
# em possible values
EM_REPOSITORY={"body" : "B",
"header" : "H",
"footer" : "F",
"search_box" : "S",
"see_also_box" : "L",
"basket" : "K",
"alert" : "A",
"search_info" : "I",
"overview" : "O",
"all_portalboxes" : "P",
"te_portalbox" : "Pte",
"tp_portalbox" : "Ptp",
"np_portalbox" : "Pnp",
"ne_portalbox" : "Pne",
"lt_portalbox" : "Plt",
"rt_portalbox" : "Prt",
"search_services": "SER"};
class RestrictedCollectionDataCacher(DataCacher):
def __init__(self):
def cache_filler():
ret = []
res = run_sql("""SELECT DISTINCT ar.value
FROM accROLE_accACTION_accARGUMENT raa JOIN accARGUMENT ar ON raa.id_accARGUMENT = ar.id
WHERE ar.keyword = 'collection' AND raa.id_accACTION = %s""", (VIEWRESTRCOLL_ID,), run_on_slave=True)
for coll in res:
ret.append(coll[0])
return ret
def timestamp_verifier():
return max(get_table_update_time('accROLE_accACTION_accARGUMENT'), get_table_update_time('accARGUMENT'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def collection_restricted_p(collection, recreate_cache_if_needed=True):
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
return collection in restricted_collection_cache.cache
try:
restricted_collection_cache.is_ok_p
except NameError:
restricted_collection_cache = RestrictedCollectionDataCacher()
def ziplist(*lists):
"""Just like zip(), but returns lists of lists instead of lists of tuples
Example:
zip([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[(f1, p1, op1), (f2, p2, op2), (f3, p3, '')]
ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[[f1, p1, op1], [f2, p2, op2], [f3, p3, '']]
FIXME: This is handy to have, and should live somewhere else, like
miscutil.really_useful_functions or something.
XXX: Starting in python 2.6, the same can be achieved (faster) by
using itertools.izip_longest(); when the minimum recommended Python
is bumped, we should use that instead.
"""
def l(*items):
return list(items)
return map(l, *lists)
def get_permitted_restricted_collections(user_info, recreate_cache_if_needed=True):
"""Return a list of collection that are restricted but for which the user
is authorized."""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
ret = []
for collection in restricted_collection_cache.cache:
if acc_authorize_action(user_info, 'viewrestrcoll', collection=collection)[0] == 0:
ret.append(collection)
return ret
def get_all_restricted_recids():
"""
Return the set of all the restricted recids, i.e. the ids of those records
which belong to at least one restricted collection.
"""
ret = intbitset()
for collection in restricted_collection_cache.cache:
ret |= get_collection_reclist(collection)
return ret
def get_restricted_collections_for_recid(recid, recreate_cache_if_needed=True):
"""
Return the list of restricted collection names to which recid belongs.
"""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
collection_reclist_cache.recreate_cache_if_needed()
return [collection for collection in restricted_collection_cache.cache if recid in get_collection_reclist(collection, recreate_cache_if_needed=False)]
def is_user_owner_of_record(user_info, recid):
"""
Check if the user is owner of the record, i.e. he is the submitter
and/or belongs to a owner-like group authorized to 'see' the record.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'owner' of the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
if CFG_CERN_SITE:
#the egroup might be in the form [email protected]
if email_or_group.replace('@cern.ch', ' [CERN]') in user_info['group']:
return True
return False
###FIXME: This method needs to be refactorized
def is_user_viewer_of_record(user_info, recid):
"""
Check if the user is allow to view the record based in the marc tags
inside CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
i.e. his email is inside the 506__m tag or he is inside an e-group listed
in the 506__m tag
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'allow to view' the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
return False
def check_user_can_view_record(user_info, recid):
"""
Check if the user is authorized to view the given recid. The function
grants access in two cases: either user has author rights on this
record, or he has view rights to the primary collection this record
belongs to.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: (0, ''), when authorization is granted, (>0, 'message') when
authorization is not granted
@rtype: (int, string)
"""
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
if isinstance(recid, str):
recid = int(recid)
## At this point, either webcoll has not yet run or there are some
## restricted collections. Let's see first if the user own the record.
if is_user_owner_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
if is_user_viewer_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
restricted_collections = get_restricted_collections_for_recid(recid, recreate_cache_if_needed=False)
if not restricted_collections and record_public_p(recid):
## The record is public and not part of any restricted collection
return (0, '')
if restricted_collections:
## If there are restricted collections the user must be authorized to all/any of them (depending on the policy)
auth_code, auth_msg = 0, ''
for collection in restricted_collections:
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=collection)
if auth_code and policy != 'ANY':
## Ouch! the user is not authorized to this collection
return (auth_code, auth_msg)
elif auth_code == 0 and policy == 'ANY':
## Good! At least one collection is authorized
return (0, '')
## Depending on the policy, the user will be either authorized or not
return auth_code, auth_msg
if is_record_in_any_collection(recid, recreate_cache_if_needed=False):
## the record is not in any restricted collection
return (0, '')
elif record_exists(recid) > 0:
## We are in the case where webcoll has not run.
## Let's authorize SUPERADMIN
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=None)
if auth_code == 0:
return (0, '')
else:
## Too bad. Let's print a nice message:
return (1, """The record you are trying to access has just been
submitted to the system and needs to be assigned to the
proper collections. It is currently restricted for security reasons
until the assignment will be fully completed. Please come back later to
properly access this record.""")
else:
## The record either does not exists or has been deleted.
## Let's handle these situations outside of this code.
return (0, '')
class IndexStemmingDataCacher(DataCacher):
"""
Provides cache for stemming information for word/phrase indexes.
This class is not to be used directly; use function
get_index_stemming_language() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT id, stemming_language FROM idxINDEX""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
index_stemming_cache.is_ok_p
except Exception:
index_stemming_cache = IndexStemmingDataCacher()
def get_index_stemming_language(index_id, recreate_cache_if_needed=True):
"""Return stemming langugage for given index."""
if recreate_cache_if_needed:
index_stemming_cache.recreate_cache_if_needed()
return index_stemming_cache.cache[index_id]
class FieldTokenizerDataCacher(DataCacher):
"""
Provides cache for tokenizer information for fields corresponding to indexes.
This class is not to be used directly; use function
get_field_tokenizer_type() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT fld.code, ind.tokenizer FROM idxINDEX AS ind, field AS fld, idxINDEX_field AS indfld WHERE ind.id = indfld.id_idxINDEX AND indfld.id_field = fld.id""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
field_tokenizer_cache.is_ok_p
except Exception:
field_tokenizer_cache = FieldTokenizerDataCacher()
def get_field_tokenizer_type(field_name, recreate_cache_if_needed=True):
"""Return tokenizer type for given field corresponding to an index if applicable."""
if recreate_cache_if_needed:
field_tokenizer_cache.recreate_cache_if_needed()
tokenizer = None
try:
tokenizer = field_tokenizer_cache.cache[field_name]
except KeyError:
return None
return tokenizer
class CollectionRecListDataCacher(DataCacher):
"""
Provides cache for collection reclist hitsets. This class is not
to be used directly; use function get_collection_reclist() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
res = run_sql("SELECT name FROM collection")
for name in res:
ret[name[0]] = None # this will be filled later during runtime by calling get_collection_reclist(coll)
return ret
def timestamp_verifier():
return get_table_update_time('collection')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_reclist_cache.is_ok_p:
raise Exception
except Exception:
collection_reclist_cache = CollectionRecListDataCacher()
def get_collection_reclist(coll, recreate_cache_if_needed=True):
"""Return hitset of recIDs that belong to the collection 'coll'."""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
if coll not in collection_reclist_cache.cache:
return intbitset() # collection does not exist; return empty set
if not collection_reclist_cache.cache[coll]:
# collection's reclist not in the cache yet, so calculate it
# and fill the cache:
reclist = intbitset()
query = "SELECT nbrecs,reclist FROM collection WHERE name=%s"
res = run_sql(query, (coll, ), 1)
try:
reclist = intbitset(res[0][1])
except (IndexError, TypeError):
pass
collection_reclist_cache.cache[coll] = reclist
# finally, return reclist:
return collection_reclist_cache.cache[coll]
def get_available_output_formats(visible_only=False):
"""
Return the list of available output formats. When visible_only is
True, returns only those output formats that have visibility flag
set to 1.
"""
formats = []
query = "SELECT code,name FROM format"
if visible_only:
query += " WHERE visibility='1'"
query += " ORDER BY name ASC"
res = run_sql(query)
if res:
# propose found formats:
for code, name in res:
formats.append({'value': code,
'text': name
})
else:
formats.append({'value': 'hb',
'text': "HTML brief"
})
return formats
class SearchResultsCache(DataCacher):
"""
Provides temporary lazy cache for Search Results.
Useful when users click on `next page'.
"""
def __init__(self):
def cache_filler():
return {}
def timestamp_verifier():
return '1970-01-01 00:00:00' # lazy cache is always okay;
# its filling is governed by
# CFG_WEBSEARCH_SEARCH_CACHE_SIZE
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not search_results_cache.is_ok_p:
raise Exception
except Exception:
search_results_cache = SearchResultsCache()
class CollectionI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N collection names. This class is not to be
used directly; use function get_coll_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT c.name,cn.ln,cn.value FROM collectionname AS cn, collection AS c WHERE cn.id_collection=c.id AND cn.type='ln'") # ln=long name
except Exception:
# database problems
return {}
for c, ln, i18nname in res:
if i18nname:
if c not in ret:
ret[c] = {}
ret[c][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('collectionname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_i18nname_cache.is_ok_p:
raise Exception
except Exception:
collection_i18nname_cache = CollectionI18nNameDataCacher()
def get_coll_i18nname(c, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted collection name (of the name type `ln'
(=long name)) for collection C in language LN.
This function uses collection_i18nname_cache, but it verifies
whether the cache is up-to-date first by default. This
verification step is performed by checking the DB table update
time. So, if you call this function 1000 times, it can get very
slow because it will do 1000 table update time verifications, even
though collection names change not that often.
Hence the parameter VERIFY_CACHE_TIMESTAMP which, when set to
False, will assume the cache is already up-to-date. This is
useful namely in the generation of collection lists for the search
results page.
"""
if verify_cache_timestamp:
collection_i18nname_cache.recreate_cache_if_needed()
out = c
try:
out = collection_i18nname_cache.cache[c][ln]
except KeyError:
pass # translation in LN does not exist
return out
class FieldI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N field names. This class is not to be used
directly; use function get_field_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT f.name,fn.ln,fn.value FROM fieldname AS fn, field AS f WHERE fn.id_field=f.id AND fn.type='ln'") # ln=long name
except Exception:
# database problems, return empty cache
return {}
for f, ln, i18nname in res:
if i18nname:
if f not in ret:
ret[f] = {}
ret[f][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('fieldname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not field_i18nname_cache.is_ok_p:
raise Exception
except Exception:
field_i18nname_cache = FieldI18nNameDataCacher()
def get_field_i18nname(f, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted field name (of type 'ln', 'long name') for
field F in language LN.
If VERIFY_CACHE_TIMESTAMP is set to True, then verify DB timestamp
and field I18N name cache timestamp and refresh cache from the DB
if needed. Otherwise don't bother checking DB timestamp and
return the cached value. (This is useful when get_field_i18nname
is called inside a loop.)
"""
if verify_cache_timestamp:
field_i18nname_cache.recreate_cache_if_needed()
out = f
try:
out = field_i18nname_cache.cache[f][ln]
except KeyError:
pass # translation in LN does not exist
return out
def get_alphabetically_ordered_collection_list(level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
out = []
res = run_sql("SELECT name FROM collection ORDER BY name ASC")
for c_name in res:
c_name = c_name[0]
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c_name, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
out.append([c_name, c_printable])
return out
def get_nicely_ordered_collection_list(collid=1, level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
colls_nicely_ordered = []
res = run_sql("""SELECT c.name,cc.id_son FROM collection_collection AS cc, collection AS c
WHERE c.id=cc.id_son AND cc.id_dad=%s ORDER BY score ASC""", (collid, ))
for c, cid in res:
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
colls_nicely_ordered.append([c, c_printable])
colls_nicely_ordered = colls_nicely_ordered + get_nicely_ordered_collection_list(cid, level+1, ln=ln)
return colls_nicely_ordered
def get_index_id_from_field(field):
"""
Return index id with name corresponding to FIELD, or the first
index id where the logical field code named FIELD is indexed.
Return zero in case there is no index defined for this field.
Example: field='author', output=4.
"""
out = 0
if not field:
field = 'global' # empty string field means 'global' index (field 'anyfield')
# first look in the index table:
res = run_sql("""SELECT id FROM idxINDEX WHERE name=%s""", (field,))
if res:
out = res[0][0]
return out
# not found in the index table, now look in the field table:
res = run_sql("""SELECT w.id FROM idxINDEX AS w, idxINDEX_field AS wf, field AS f
WHERE f.code=%s AND wf.id_field=f.id AND w.id=wf.id_idxINDEX
LIMIT 1""", (field,))
if res:
out = res[0][0]
return out
def get_words_from_pattern(pattern):
"""
Returns list of whitespace-separated words from pattern, removing any
trailing punctuation-like signs from words in pattern.
"""
words = {}
# clean trailing punctuation signs inside pattern
pattern = re_punctuation_followed_by_space.sub(' ', pattern)
for word in pattern.split():
if word not in words:
words[word] = 1
return words.keys()
def create_basic_search_units(req, p, f, m=None, of='hb'):
"""Splits search pattern and search field into a list of independently searchable units.
- A search unit consists of '(operator, pattern, field, type, hitset)' tuples where
'operator' is set union (|), set intersection (+) or set exclusion (-);
'pattern' is either a word (e.g. muon*) or a phrase (e.g. 'nuclear physics');
'field' is either a code like 'title' or MARC tag like '100__a';
'type' is the search type ('w' for word file search, 'a' for access file search).
- Optionally, the function accepts the match type argument 'm'.
If it is set (e.g. from advanced search interface), then it
performs this kind of matching. If it is not set, then a guess is made.
'm' can have values: 'a'='all of the words', 'o'='any of the words',
'p'='phrase/substring', 'r'='regular expression',
'e'='exact value'.
- Warnings are printed on req (when not None) in case of HTML output formats."""
opfts = [] # will hold (o,p,f,t,h) units
# FIXME: quick hack for the journal index
if f == 'journal':
opfts.append(['+', p, f, 'w'])
return opfts
## check arguments: is desired matching type set?
if m:
## A - matching type is known; good!
if m == 'e':
# A1 - exact value:
opfts.append(['+', p, f, 'a']) # '+' since we have only one unit
elif m == 'p':
# A2 - phrase/substring:
opfts.append(['+', "%" + p + "%", f, 'a']) # '+' since we have only one unit
elif m == 'r':
# A3 - regular expression:
opfts.append(['+', p, f, 'r']) # '+' since we have only one unit
elif m == 'a' or m == 'w':
# A4 - all of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
opfts.append(['+', word, f, 'w']) # '+' in all units
elif m == 'o':
# A5 - any of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
if len(opfts)==0:
opfts.append(['+', word, f, 'w']) # '+' in the first unit
else:
opfts.append(['|', word, f, 'w']) # '|' in further units
else:
if of.startswith("h"):
write_warning("Matching type '%s' is not implemented yet." % cgi.escape(m), "Warning", req=req)
opfts.append(['+', "%" + p + "%", f, 'w'])
else:
## B - matching type is not known: let us try to determine it by some heuristics
if f and p[0] == '"' and p[-1] == '"':
## B0 - does 'p' start and end by double quote, and is 'f' defined? => doing ACC search
opfts.append(['+', p[1:-1], f, 'a'])
elif f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor', 'authorityauthor') and author_name_requires_phrase_search(p):
## B1 - do we search in author, and does 'p' contain space/comma/dot/etc?
## => doing washed ACC search
opfts.append(['+', p, f, 'a'])
elif f and p[0] == "'" and p[-1] == "'":
## B0bis - does 'p' start and end by single quote, and is 'f' defined? => doing ACC search
opfts.append(['+', '%' + p[1:-1] + '%', f, 'a'])
elif f and p[0] == "/" and p[-1] == "/":
## B0ter - does 'p' start and end by a slash, and is 'f' defined? => doing regexp search
opfts.append(['+', p[1:-1], f, 'r'])
elif f and p.find(',') >= 0:
## B1 - does 'p' contain comma, and is 'f' defined? => doing ACC search
opfts.append(['+', p, f, 'a'])
elif f and str(f[0:2]).isdigit():
## B2 - does 'f' exist and starts by two digits? => doing ACC search
opfts.append(['+', p, f, 'a'])
else:
## B3 - doing WRD search, but maybe ACC too
# search units are separated by spaces unless the space is within single or double quotes
# so, let us replace temporarily any space within quotes by '__SPACE__'
p = re_pattern_single_quotes.sub(lambda x: "'"+x.group(1).replace(' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+x.group(1).replace(' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+x.group(1).replace(' ', '__SPACE__')+"/", p)
# and spaces after colon as well:
p = re_pattern_spaces_after_colon.sub(lambda x: x.group(1).replace(' ', '__SPACE__'), p)
# wash argument:
p = re_logical_and.sub(" ", p)
p = re_logical_or.sub(" |", p)
p = re_logical_not.sub(" -", p)
p = re_operators.sub(r' \1', p)
for pi in p.split(): # iterate through separated units (or items, as "pi" stands for "p item")
pi = re_pattern_space.sub(" ", pi) # replace back '__SPACE__' by ' '
# firstly, determine set operator
if pi[0] == '+' or pi[0] == '-' or pi[0] == '|':
oi = pi[0]
pi = pi[1:]
else:
# okay, there is no operator, so let us decide what to do by default
oi = '+' # by default we are doing set intersection...
# secondly, determine search pattern and field:
if pi.find(":") > 0:
fi, pi = pi.split(":", 1)
fi = wash_field(fi)
# test whether fi is a real index code or a MARC-tag defined code:
if fi in get_fieldcodes() or '00' <= fi[:2] <= '99':
pass
else:
# it is not, so join it back:
fi, pi = f, fi + ":" + pi
else:
fi, pi = f, pi
# wash 'fi' argument:
fi = wash_field(fi)
# wash 'pi' argument:
pi = pi.strip() # strip eventual spaces
if re_quotes.match(pi):
# B3a - quotes are found => do ACC search (phrase search)
if pi[0] == '"' and pi[-1] == '"':
pi = pi.replace('"', '') # remove quote signs
opfts.append([oi, pi, fi, 'a'])
elif pi[0] == "'" and pi[-1] == "'":
pi = pi.replace("'", "") # remove quote signs
opfts.append([oi, "%" + pi + "%", fi, 'a'])
else: # unbalanced quotes, so fall back to WRD query:
opfts.append([oi, pi, fi, 'w'])
elif pi.startswith('/') and pi.endswith('/'):
# B3b - pi has slashes around => do regexp search
opfts.append([oi, pi[1:-1], fi, 'r'])
elif fi and len(fi) > 1 and str(fi[0]).isdigit() and str(fi[1]).isdigit():
# B3c - fi exists and starts by two digits => do ACC search
opfts.append([oi, pi, fi, 'a'])
elif fi and not get_index_id_from_field(fi) and get_field_name(fi):
# B3d - logical field fi exists but there is no WRD index for fi => try ACC search
opfts.append([oi, pi, fi, 'a'])
else:
# B3e - general case => do WRD search
pi = strip_accents(pi) # strip accents for 'w' mode, FIXME: delete when not needed
for pii in get_words_from_pattern(pi):
opfts.append([oi, pii, fi, 'w'])
## sanity check:
for i in range(0, len(opfts)):
try:
pi = opfts[i][1]
if pi == '*':
if of.startswith("h"):
write_warning("Ignoring standalone wildcard word.", "Warning", req=req)
del opfts[i]
if pi == '' or pi == ' ':
fi = opfts[i][2]
if fi:
if of.startswith("h"):
write_warning("Ignoring empty <em>%s</em> search term." % fi, "Warning", req=req)
del opfts[i]
except:
pass
## replace old logical field names if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
opfts = [[o, p, wash_field(f), t] for o, p, f, t in opfts]
## return search units:
return opfts
def page_start(req, of, cc, aas, ln, uid, title_message=None,
description='', keywords='', recID=-1, tab='', p='', em=''):
"""
Start page according to given output format.
@param title_message: title of the page, not escaped for HTML
@param description: description of the page, not escaped for HTML
@param keywords: keywords of the page, not escaped for HTML
"""
_ = gettext_set_language(ln)
if not req or isinstance(req, cStringIO.OutputType):
return # we were called from CLI
if not title_message:
title_message = _("Search Results")
content_type = get_output_format_content_type(of)
if of.startswith('x'):
if of == 'xr':
# we are doing RSS output
req.content_type = "application/rss+xml"
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
else:
# we are doing XML output:
req.content_type = get_output_format_content_type(of, 'text/xml')
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
elif of.startswith('t') or str(of[0:3]).isdigit():
# we are doing plain text output:
req.content_type = "text/plain"
req.send_http_header()
elif of == "intbitset":
req.content_type = "application/octet-stream"
req.send_http_header()
elif of == "recjson":
req.content_type = "application/json"
req.send_http_header()
elif of == "id":
pass # nothing to do, we shall only return list of recIDs
elif content_type == 'text/html':
# we are doing HTML output:
req.content_type = "text/html"
req.send_http_header()
if not description:
description = "%s %s." % (cc, _("Search Results"))
if not keywords:
keywords = "%s, WebSearch, %s" % (get_coll_i18nname(CFG_SITE_NAME, ln, False), get_coll_i18nname(cc, ln, False))
## generate RSS URL:
argd = {}
if req.args:
argd = cgi.parse_qs(req.args)
rssurl = websearch_templates.build_rss_url(argd)
## add MathJax if displaying single records (FIXME: find
## eventual better place to this code)
if of.lower() in CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS:
metaheaderadd = get_mathjax_header(req.is_https())
else:
metaheaderadd = ''
# Add metadata in meta tags for Google scholar-esque harvesting...
# only if we have a detailed meta format and we are looking at a
# single record
if recID != -1 and CFG_WEBSEARCH_DETAILED_META_FORMAT and \
record_exists(recID) == 1:
metaheaderadd += format_record(recID,
CFG_WEBSEARCH_DETAILED_META_FORMAT,
ln=ln)
## generate navtrail:
navtrail = create_navtrail_links(cc, aas, ln)
if navtrail != '':
navtrail += ' > '
if (tab != '' or ((of != '' or of.lower() != 'hd') and of != 'hb')) and \
recID != -1:
# If we are not in information tab in HD format, customize
# the nav. trail to have a link back to main record. (Due
# to the way perform_request_search() works, hb
# (lowercase) is equal to hd)
navtrail += ' <a class="navtrail" href="%s/%s/%s">%s</a>' % \
(CFG_BASE_URL, CFG_SITE_RECORD, recID, cgi.escape(title_message))
if (of != '' or of.lower() != 'hd') and of != 'hb':
# Export
format_name = of
query = "SELECT name FROM format WHERE code=%s"
res = run_sql(query, (of,))
if res:
format_name = res[0][0]
navtrail += ' > ' + format_name
else:
# Discussion, citations, etc. tabs
tab_label = get_detailed_page_tabs(cc, ln=ln)[tab]['label']
navtrail += ' > ' + _(tab_label)
else:
navtrail += cgi.escape(title_message)
if p:
# we are serving search/browse results pages, so insert pattern:
navtrail += ": " + cgi.escape(p)
title_message = p + " - " + title_message
body_css_classes = []
if cc:
# we know the collection, lets allow page styles based on cc
#collection names may not satisfy rules for css classes which
#are something like: -?[_a-zA-Z]+[_a-zA-Z0-9-]*
#however it isn't clear what we should do about cases with
#numbers, so we leave them to fail. Everything else becomes "_"
css = nmtoken_from_string(cc).replace('.', '_').replace('-', '_').replace(':', '_')
body_css_classes.append(css)
## finally, print page header:
if em == '' or EM_REPOSITORY["header"] in em:
req.write(pageheaderonly(req=req, title=title_message,
navtrail=navtrail,
description=description,
keywords=keywords,
metaheaderadd=metaheaderadd,
uid=uid,
language=ln,
navmenuid='search',
navtrail_append_title_p=0,
rssurl=rssurl,
body_css_classes=body_css_classes))
req.write(websearch_templates.tmpl_search_pagestart(ln=ln))
else:
req.content_type = content_type
req.send_http_header()
def page_end(req, of="hb", ln=CFG_SITE_LANG, em=""):
"End page according to given output format: e.g. close XML tags, add HTML footer, etc."
if of == "id":
return [] # empty recID list
if of == "intbitset":
return intbitset()
if not req:
return # we were called from CLI
if of.startswith('h'):
req.write(websearch_templates.tmpl_search_pageend(ln = ln)) # pagebody end
if em == "" or EM_REPOSITORY["footer"] in em:
req.write(pagefooteronly(lastupdated=__lastupdated__, language=ln, req=req))
return
def create_add_to_search_pattern(p, p1, f1, m1, op1):
"""Create the search pattern """
if not p1:
return p
init_search_pattern = p
# operation: AND, OR, AND NOT
if op1 == 'a' and p: # we don't want '+' at the begining of the query
op = ' +'
elif op1 == 'o':
op = ' |'
elif op1 == 'n':
op = ' -'
else:
op = ''
# field
field = ''
if f1:
field = f1 + ':'
# type of search
pattern = p1
start = '('
end = ')'
if m1 == 'e':
start = end = '"'
elif m1 == 'p':
start = end = "'"
elif m1 == 'r':
start = end = '/'
else: # m1 == 'o' or m1 =='a'
words = p1.strip().split(' ')
if len(words) == 1:
start = end = ''
pattern = field + words[0]
elif m1 == 'o':
pattern = ' |'.join([field + word for word in words])
else:
pattern = ' '.join([field + word for word in words])
#avoid having field:(word1 word2) since this is not currently correctly working
return init_search_pattern + op + start + pattern + end
if not pattern:
return ''
#avoid having field:(word1 word2) since this is not currently correctly working
return init_search_pattern + op + field + start + pattern + end
def create_page_title_search_pattern_info(p, p1, p2, p3):
"""Create the search pattern bit for the page <title> web page
HTML header. Basically combine p and (p1,p2,p3) together so that
the page header may be filled whether we are in the Simple Search
or Advanced Search interface contexts."""
out = ""
if p:
out = p
else:
out = p1
if p2:
out += ' ' + p2
if p3:
out += ' ' + p3
return out
def create_inputdate_box(name="d1", selected_year=0, selected_month=0, selected_day=0, ln=CFG_SITE_LANG):
"Produces 'From Date', 'Until Date' kind of selection box. Suitable for search options."
_ = gettext_set_language(ln)
box = ""
# day
box += """<select name="%sd">""" % name
box += """<option value="">%s""" % _("any day")
for day in range(1, 32):
box += """<option value="%02d"%s>%02d""" % (day, is_selected(day, selected_day), day)
box += """</select>"""
# month
box += """<select name="%sm">""" % name
box += """<option value="">%s""" % _("any month")
# trailing space in May distinguishes short/long form of the month name
for mm, month in [(1, _("January")), (2, _("February")), (3, _("March")), (4, _("April")),
(5, _("May ")), (6, _("June")), (7, _("July")), (8, _("August")),
(9, _("September")), (10, _("October")), (11, _("November")), (12, _("December"))]:
box += """<option value="%02d"%s>%s""" % (mm, is_selected(mm, selected_month), month.strip())
box += """</select>"""
# year
box += """<select name="%sy">""" % name
box += """<option value="">%s""" % _("any year")
this_year = int(time.strftime("%Y", time.localtime()))
for year in range(this_year-20, this_year+1):
box += """<option value="%d"%s>%d""" % (year, is_selected(year, selected_year), year)
box += """</select>"""
return box
def create_search_box(cc, colls, p, f, rg, sf, so, sp, rm, of, ot, aas,
ln, p1, f1, m1, op1, p2, f2, m2, op2, p3, f3,
m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec,
action="", em=""):
"""Create search box for 'search again in the results page' functionality."""
if em != "" and EM_REPOSITORY["search_box"] not in em:
if EM_REPOSITORY["body"] in em and cc != CFG_SITE_NAME:
return '''
<h1 class="headline">%(ccname)s</h1>''' % {'ccname' : cgi.escape(cc), }
else:
return ""
# load the right message language
_ = gettext_set_language(ln)
# some computations
cc_intl = get_coll_i18nname(cc, ln, False)
cc_colID = get_colID(cc)
colls_nicely_ordered = []
if cfg_nicely_ordered_collection_list:
colls_nicely_ordered = get_nicely_ordered_collection_list(ln=ln)
else:
colls_nicely_ordered = get_alphabetically_ordered_collection_list(ln=ln)
colls_nice = []
for (cx, cx_printable) in colls_nicely_ordered:
if not cx.startswith("Unnamed collection"):
colls_nice.append({'value': cx,
'text': cx_printable
})
coll_selects = []
if colls and colls[0] != CFG_SITE_NAME:
# some collections are defined, so print these first, and only then print 'add another collection' heading:
for c in colls:
if c:
temp = []
temp.append({'value': CFG_SITE_NAME,
'text': '*** %s ***' % (CFG_SCOAP3_SITE and _("any publisher or journal") or _("any public collection"))
})
# this field is used to remove the current collection from the ones to be searched.
temp.append({'value': '',
'text': '*** %s ***' % (CFG_SCOAP3_SITE and _("remove this publisher or journal") or _("remove this collection"))
})
for val in colls_nice:
# print collection:
if not cx.startswith("Unnamed collection"):
temp.append({'value': val['value'],
'text': val['text'],
'selected' : (c == re.sub(r"^[\s\-]*", "", val['value']))
})
coll_selects.append(temp)
coll_selects.append([{'value': '',
'text' : '*** %s ***' % (CFG_SCOAP3_SITE and _("add another publisher or journal") or _("add another collection"))
}] + colls_nice)
else: # we searched in CFG_SITE_NAME, so print 'any public collection' heading
coll_selects.append([{'value': CFG_SITE_NAME,
'text' : '*** %s ***' % (CFG_SCOAP3_SITE and _("any publisher or journal") or _("any public collection"))
}] + colls_nice)
## ranking methods
ranks = [{
'value' : '',
'text' : "- %s %s -" % (_("OR").lower(), _("rank by")),
}]
for (code, name) in get_bibrank_methods(cc_colID, ln):
# propose found rank methods:
ranks.append({
'value': code,
'text': name,
})
formats = get_available_output_formats(visible_only=True)
# show collections in the search box? (not if there is only one
# collection defined, and not if we are in light search)
show_colls = True
show_title = True
if len(collection_reclist_cache.cache.keys()) == 1 or \
aas == -1:
show_colls = False
show_title = False
if cc == CFG_SITE_NAME:
show_title = False
if CFG_INSPIRE_SITE:
show_title = False
return websearch_templates.tmpl_search_box(
ln = ln,
aas = aas,
cc_intl = cc_intl,
cc = cc,
ot = ot,
sp = sp,
action = action,
fieldslist = get_searchwithin_fields(ln=ln, colID=cc_colID),
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
p1 = p1,
p2 = p2,
p3 = p3,
op1 = op1,
op2 = op2,
rm = rm,
p = p,
f = f,
coll_selects = coll_selects,
d1y = d1y, d2y = d2y, d1m = d1m, d2m = d2m, d1d = d1d, d2d = d2d,
dt = dt,
sort_fields = get_sortby_fields(ln=ln, colID=cc_colID),
sf = sf,
so = so,
ranks = ranks,
sc = sc,
rg = rg,
formats = formats,
of = of,
pl = pl,
jrec = jrec,
ec = ec,
show_colls = show_colls,
show_title = show_title and (em=="" or EM_REPOSITORY["body"] in em)
)
def create_exact_author_browse_help_link(p=None, p1=None, p2=None, p3=None, f=None, f1=None, f2=None, f3=None,
rm=None, cc=None, ln=None, jrec=None, rg=None, aas=0, action=""):
"""Creates a link to help switch from author to exact author while browsing"""
if action == 'browse':
search_fields = (f, f1, f2, f3)
if 'author' in search_fields or 'firstauthor' in search_fields:
def add_exact(field):
if field == 'author' or field == 'firstauthor':
return 'exact' + field
return field
fe, f1e, f2e, f3e = [add_exact(field) for field in search_fields]
link_name = f or f1
link_name = (link_name == 'firstauthor' and 'exact first author') or 'exact author'
return websearch_templates.tmpl_exact_author_browse_help_link(p=p, p1=p1, p2=p2, p3=p3, f=fe, f1=f1e, f2=f2e, f3=f3e,
rm=rm, cc=cc, ln=ln, jrec=jrec, rg=rg, aas=aas, action=action,
link_name=link_name)
return ""
def create_navtrail_links(cc=CFG_SITE_NAME, aas=0, ln=CFG_SITE_LANG, self_p=1, tab=''):
"""Creates navigation trail links, i.e. links to collection
ancestors (except Home collection). If aas==1, then links to
Advanced Search interfaces; otherwise Simple Search.
"""
dads = []
for dad in get_coll_ancestors(cc):
if dad != CFG_SITE_NAME: # exclude Home collection
dads.append((dad, get_coll_i18nname(dad, ln, False)))
if self_p and cc != CFG_SITE_NAME:
dads.append((cc, get_coll_i18nname(cc, ln, False)))
return websearch_templates.tmpl_navtrail_links(
aas=aas, ln=ln, dads=dads)
def get_searchwithin_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'search within' selection box for the collection ID colID."""
res = None
if colID:
res = run_sql("""SELECT f.code,f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='sew' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
res = run_sql("SELECT code,name FROM field ORDER BY name ASC")
fields = [{
'value' : '',
'text' : get_field_i18nname("any field", ln, False)
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({'value': field_code,
'text': get_field_i18nname(field_name, ln, False)
})
return fields
def get_sortby_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'sort by' selection box for the collection ID colID."""
_ = gettext_set_language(ln)
res = None
if colID:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
# no sort fields defined for this colID, try to take Home collection:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (1,))
if not res:
# no sort fields defined for the Home collection, take all sort fields defined wherever they are:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""",)
fields = [{
'value': '',
'text': _("latest first")
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({'value': field_code,
'text': get_field_i18nname(field_name, ln, False)
})
return fields
def create_andornot_box(name='op', value='', ln='en'):
"Returns HTML code for the AND/OR/NOT selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="n"%s>%s
</select>
""" % (name,
is_selected('a', value), _("AND"),
is_selected('o', value), _("OR"),
is_selected('n', value), _("AND NOT"))
return out
def create_matchtype_box(name='m', value='', ln='en'):
"Returns HTML code for the 'match type' selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="e"%s>%s
<option value="p"%s>%s
<option value="r"%s>%s
</select>
""" % (name,
is_selected('a', value), _("All of the words:"),
is_selected('o', value), _("Any of the words:"),
is_selected('e', value), _("Exact phrase:"),
is_selected('p', value), _("Partial phrase:"),
is_selected('r', value), _("Regular expression:"))
return out
def is_selected(var, fld):
"Checks if the two are equal, and if yes, returns ' selected'. Useful for select boxes."
if type(var) is int and type(fld) is int:
if var == fld:
return " selected"
elif str(var) == str(fld):
return " selected"
elif fld and len(fld)==3 and fld[0] == "w" and var == fld[1:]:
return " selected"
return ""
def wash_colls(cc, c, split_colls=0, verbose=0):
"""Wash collection list by checking whether user has deselected
anything under 'Narrow search'. Checks also if cc is a list or not.
Return list of cc, colls_to_display, colls_to_search since the list
of collections to display is different from that to search in.
This is because users might have chosen 'split by collection'
functionality.
The behaviour of "collections to display" depends solely whether
user has deselected a particular collection: e.g. if it started
from 'Articles and Preprints' page, and deselected 'Preprints',
then collection to display is 'Articles'. If he did not deselect
anything, then collection to display is 'Articles & Preprints'.
The behaviour of "collections to search in" depends on the
'split_colls' parameter:
* if is equal to 1, then we can wash the colls list down
and search solely in the collection the user started from;
* if is equal to 0, then we are splitting to the first level
of collections, i.e. collections as they appear on the page
we started to search from;
The function raises exception
InvenioWebSearchUnknownCollectionError
if cc or one of c collections is not known.
"""
colls_out = []
colls_out_for_display = []
# list to hold the hosted collections to be searched and displayed
hosted_colls_out = []
debug = ""
if verbose:
debug += "<br />"
debug += "<br />1) --- initial parameters ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# check what type is 'cc':
if type(cc) is list:
for ci in cc:
if ci in collection_reclist_cache.cache:
# yes this collection is real, so use it:
cc = ci
break
else:
# check once if cc is real:
if cc not in collection_reclist_cache.cache:
if cc:
raise InvenioWebSearchUnknownCollectionError(cc)
else:
cc = CFG_SITE_NAME # cc is not set, so replace it with Home collection
# check type of 'c' argument:
if type(c) is list:
colls = c
else:
colls = [c]
if verbose:
debug += "<br />2) --- after check for the integrity of cc and the being or not c a list ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# remove all 'unreal' collections:
colls_real = []
for coll in colls:
if coll in collection_reclist_cache.cache:
colls_real.append(coll)
else:
if coll:
raise InvenioWebSearchUnknownCollectionError(coll)
colls = colls_real
if verbose:
debug += "<br />3) --- keeping only the real colls of c ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# check if some real collections remain:
if len(colls)==0:
colls = [cc]
if verbose:
debug += "<br />4) --- in case no colls were left we use cc directly ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# then let us check the list of non-restricted "real" sons of 'cc' and compare it to 'coll':
res = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'""", (cc,))
# list that holds all the non restricted sons of cc that are also not hosted collections
l_cc_nonrestricted_sons_and_nonhosted_colls = []
res_hosted = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'
AND (c.dbquery NOT LIKE 'hostedcollection:%%' OR c.dbquery IS NULL)""", (cc,))
for row_hosted in res_hosted:
l_cc_nonrestricted_sons_and_nonhosted_colls.append(row_hosted[0])
l_cc_nonrestricted_sons_and_nonhosted_colls.sort()
l_cc_nonrestricted_sons = []
l_c = colls[:]
for row in res:
if not collection_restricted_p(row[0]):
l_cc_nonrestricted_sons.append(row[0])
l_c.sort()
l_cc_nonrestricted_sons.sort()
if l_cc_nonrestricted_sons == l_c:
colls_out_for_display = [cc] # yep, washing permitted, it is sufficient to display 'cc'
# the following elif is a hack that preserves the above funcionality when we start searching from
# the frontpage with some hosted collections deselected (either by default or manually)
elif set(l_cc_nonrestricted_sons_and_nonhosted_colls).issubset(set(l_c)):
colls_out_for_display = colls
split_colls = 0
else:
colls_out_for_display = colls # nope, we need to display all 'colls' successively
# remove duplicates:
#colls_out_for_display_nondups=filter(lambda x, colls_out_for_display=colls_out_for_display: colls_out_for_display[x-1] not in colls_out_for_display[x:], range(1, len(colls_out_for_display)+1))
#colls_out_for_display = map(lambda x, colls_out_for_display=colls_out_for_display:colls_out_for_display[x-1], colls_out_for_display_nondups)
#colls_out_for_display = list(set(colls_out_for_display))
#remove duplicates while preserving the order
set_out = set()
colls_out_for_display = [coll for coll in colls_out_for_display if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />5) --- decide whether colls_out_for_diplay should be colls or is it sufficient for it to be cc; remove duplicates ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# FIXME: The below quoted part of the code has been commented out
# because it prevents searching in individual restricted daughter
# collections when both parent and all its public daughter
# collections were asked for, in addition to some restricted
# daughter collections. The removal was introduced for hosted
# collections, so we may want to double check in this context.
# the following piece of code takes care of removing collections whose ancestors are going to be searched anyway
# list to hold the collections to be removed
#colls_to_be_removed = []
# first calculate the collections that can safely be removed
#for coll in colls_out_for_display:
# for ancestor in get_coll_ancestors(coll):
# #if ancestor in colls_out_for_display: colls_to_be_removed.append(coll)
# if ancestor in colls_out_for_display and not is_hosted_collection(coll): colls_to_be_removed.append(coll)
# secondly remove the collections
#for coll in colls_to_be_removed:
# colls_out_for_display.remove(coll)
if verbose:
debug += "<br />6) --- remove collections that have ancestors about to be search, unless they are hosted ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# calculate the hosted collections to be searched.
if colls_out_for_display == [cc]:
if is_hosted_collection(cc):
hosted_colls_out.append(cc)
else:
for coll in get_coll_sons(cc):
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
else:
for coll in colls_out_for_display:
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
if verbose:
debug += "<br />7) --- calculate the hosted_colls_out ---"
debug += "<br />hosted_colls_out : %s" % hosted_colls_out
debug += "<br />"
# second, let us decide on collection splitting:
if split_colls == 0:
# type A - no sons are wanted
colls_out = colls_out_for_display
else:
# type B - sons (first-level descendants) are wanted
for coll in colls_out_for_display:
coll_sons = get_coll_sons(coll)
if coll_sons == []:
colls_out.append(coll)
else:
for coll_son in coll_sons:
if not is_hosted_collection(coll_son):
colls_out.append(coll_son)
#else:
# colls_out = colls_out + coll_sons
# remove duplicates:
#colls_out_nondups=filter(lambda x, colls_out=colls_out: colls_out[x-1] not in colls_out[x:], range(1, len(colls_out)+1))
#colls_out = map(lambda x, colls_out=colls_out:colls_out[x-1], colls_out_nondups)
#colls_out = list(set(colls_out))
#remove duplicates while preserving the order
set_out = set()
colls_out = [coll for coll in colls_out if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />8) --- calculate the colls_out; remove duplicates ---"
debug += "<br />colls_out : %s" % colls_out
debug += "<br />"
# remove the hosted collections from the collections to be searched
if hosted_colls_out:
for coll in hosted_colls_out:
try:
colls_out.remove(coll)
except ValueError:
# in case coll was not found in colls_out
pass
if verbose:
debug += "<br />9) --- remove the hosted_colls from the colls_out ---"
debug += "<br />colls_out : %s" % colls_out
return (cc, colls_out_for_display, colls_out, hosted_colls_out, debug)
def get_synonym_terms(term, kbr_name, match_type, use_memoise=False):
"""
Return list of synonyms for TERM by looking in KBR_NAME in
MATCH_TYPE style.
@param term: search-time term or index-time term
@type term: str
@param kbr_name: knowledge base name
@type kbr_name: str
@param match_type: specifies how the term matches against the KBR
before doing the lookup. Could be `exact' (default),
'leading_to_comma', `leading_to_number'.
@type match_type: str
@param use_memoise: can we memoise while doing lookups?
@type use_memoise: bool
@return: list of term synonyms
@rtype: list of strings
"""
dterms = {}
## exact match is default:
term_for_lookup = term
term_remainder = ''
## but maybe match different term:
if match_type == CFG_BIBINDEX_SYNONYM_MATCH_TYPE['leading_to_comma']:
mmm = re.match(r'^(.*?)(\s*,.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
elif match_type == CFG_BIBINDEX_SYNONYM_MATCH_TYPE['leading_to_number']:
mmm = re.match(r'^(.*?)(\s*\d.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
## FIXME: workaround: escaping SQL wild-card signs, since KBR's
## exact search is doing LIKE query, so would match everything:
term_for_lookup = term_for_lookup.replace('%', '\\%')
## OK, now find synonyms:
for kbr_values in get_kbr_values(kbr_name,
searchkey=term_for_lookup,
searchtype='e',
use_memoise=use_memoise):
for kbr_value in kbr_values:
dterms[kbr_value + term_remainder] = 1
## return list of term synonyms:
return dterms.keys()
def wash_output_format(ouput_format, verbose=False, req=None):
"""Wash output format FORMAT. Currently only prevents input like
'of=9' for backwards-compatible format that prints certain fields
only. (for this task, 'of=tm' is preferred)"""
if str(ouput_format[0:3]).isdigit() and len(ouput_format) != 6:
# asked to print MARC tags, but not enough digits,
# so let's switch back to HTML brief default
return 'hb'
elif format in CFG_WEBSEARCH_BLACKLISTED_FORMATS:
if verbose:
write_warning("Selected format is not available through perform_request_search", req=req)
# Returning an empty list seems dangerous because you wouldn't know
# right away that the list is not supposed to be empty.
return 'hb'
else:
return ouput_format
def wash_pattern(p):
"""Wash pattern passed by URL. Check for sanity of the wildcard by
removing wildcards if they are appended to extremely short words
(1-3 letters). TODO: instead of this approximative treatment, it
will be much better to introduce a temporal limit, e.g. to kill a
query if it does not finish in 10 seconds."""
# strip accents:
# p = strip_accents(p) # FIXME: when available, strip accents all the time
# add leading/trailing whitespace for the two following wildcard-sanity checking regexps:
p = " " + p + " "
# replace spaces within quotes by __SPACE__ temporarily:
p = re_pattern_single_quotes.sub(lambda x: "'"+x.group(1).replace(' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+x.group(1).replace(' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+x.group(1).replace(' ', '__SPACE__')+"/", p)
# get rid of unquoted wildcards after spaces:
p = re_pattern_wildcards_after_spaces.sub("\\1", p)
# get rid of extremely short words (1-3 letters with wildcards):
#p = re_pattern_short_words.sub("\\1", p)
# replace back __SPACE__ by spaces:
p = re_pattern_space.sub(" ", p)
# replace special terms:
p = re_pattern_today.sub(time.strftime("%Y-%m-%d", time.localtime()), p)
# remove unnecessary whitespace:
p = p.strip()
# remove potentially wrong UTF-8 characters:
p = wash_for_utf8(p)
return p
def wash_field(f):
"""Wash field passed by URL."""
if f:
# get rid of unnecessary whitespace and make it lowercase
# (e.g. Author -> author) to better suit iPhone etc input
# mode:
f = f.strip().lower()
# wash legacy 'f' field names, e.g. replace 'wau' or `au' by
# 'author', if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
f = CFG_WEBSEARCH_FIELDS_CONVERT.get(f, f)
return f
def wash_dates(d1="", d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0):
"""
Take user-submitted date arguments D1 (full datetime string) or
(D1Y, D1M, D1Y) year, month, day tuple and D2 or (D2Y, D2M, D2Y)
and return (YYY1-M1-D2 H1:M1:S2, YYY2-M2-D2 H2:M2:S2) datetime
strings in the YYYY-MM-DD HH:MM:SS format suitable for time
restricted searching.
Note that when both D1 and (D1Y, D1M, D1D) parameters are present,
the precedence goes to D1. Ditto for D2*.
Note that when (D1Y, D1M, D1D) are taken into account, some values
may be missing and are completed e.g. to 01 or 12 according to
whether it is the starting or the ending date.
"""
datetext1, datetext2 = "", ""
# sanity checking:
if d1 == "" and d1y == 0 and d1m == 0 and d1d == 0 and d2 == "" and d2y == 0 and d2m == 0 and d2d == 0:
return ("", "") # nothing selected, so return empty values
# wash first (starting) date:
if d1:
# full datetime string takes precedence:
datetext1 = d1
else:
# okay, first date passed as (year,month,day):
if d1y:
datetext1 += "%04d" % d1y
else:
datetext1 += "0000"
if d1m:
datetext1 += "-%02d" % d1m
else:
datetext1 += "-01"
if d1d:
datetext1 += "-%02d" % d1d
else:
datetext1 += "-01"
datetext1 += " 00:00:00"
# wash second (ending) date:
if d2:
# full datetime string takes precedence:
datetext2 = d2
else:
# okay, second date passed as (year,month,day):
if d2y:
datetext2 += "%04d" % d2y
else:
datetext2 += "9999"
if d2m:
datetext2 += "-%02d" % d2m
else:
datetext2 += "-12"
if d2d:
datetext2 += "-%02d" % d2d
else:
datetext2 += "-31" # NOTE: perhaps we should add max(datenumber) in
# given month, but for our quering it's not
# needed, 31 will always do
datetext2 += " 00:00:00"
# okay, return constructed YYYY-MM-DD HH:MM:SS datetexts:
return (datetext1, datetext2)
def is_hosted_collection(coll):
"""Check if the given collection is a hosted one; i.e. its dbquery starts with hostedcollection:
Returns True if it is, False if it's not or if the result is empty or if the query failed"""
res = run_sql("SELECT dbquery FROM collection WHERE name=%s", (coll, ))
if not res or not res[0][0]:
return False
try:
return res[0][0].startswith("hostedcollection:")
except IndexError:
return False
def get_colID(c):
"Return collection ID for collection name C. Return None if no match found."
colID = None
res = run_sql("SELECT id FROM collection WHERE name=%s", (c,), 1)
if res:
colID = res[0][0]
return colID
def get_coll_normalised_name(c):
"""Returns normalised collection name (case sensitive) for collection name
C (case insensitive).
Returns None if no match found."""
res = run_sql("SELECT name FROM collection WHERE name=%s", (c,))
if res:
return res[0][0]
else:
return None
def get_coll_ancestors(coll):
"Returns a list of ancestors for collection 'coll'."
coll_ancestors = []
coll_ancestor = coll
while 1:
res = run_sql("""SELECT c.name FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_dad
LEFT JOIN collection AS ccc ON ccc.id=cc.id_son
WHERE ccc.name=%s ORDER BY cc.id_dad ASC LIMIT 1""",
(coll_ancestor,))
if res:
coll_name = res[0][0]
coll_ancestors.append(coll_name)
coll_ancestor = coll_name
else:
break
# ancestors found, return reversed list:
coll_ancestors.reverse()
return coll_ancestors
def get_coll_sons(coll, coll_type='r', public_only=1):
"""Return a list of sons (first-level descendants) of type 'coll_type' for collection 'coll'.
If coll_type = '*', both regular and virtual collections will be returned.
If public_only, then return only non-restricted son collections.
"""
coll_sons = []
if coll_type == '*':
coll_type_query = " IN ('r', 'v')"
query_params = (coll, )
else:
coll_type_query = "=%s"
query_params = (coll_type, coll)
query = "SELECT c.name FROM collection AS c "\
"LEFT JOIN collection_collection AS cc ON c.id=cc.id_son "\
"LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad "\
"WHERE cc.type%s AND ccc.name=%%s" % coll_type_query
query += " ORDER BY cc.score ASC"
res = run_sql(query, query_params)
for name in res:
if not public_only or not collection_restricted_p(name[0]):
coll_sons.append(name[0])
return coll_sons
class CollectionAllChildrenDataCacher(DataCacher):
"""Cache for all children of a collection (regular & virtual, public & private)"""
def __init__(self):
def cache_filler():
def get_all_children(coll, coll_type='r', public_only=1, d_internal_coll_sons=None):
"""Return a list of all children of type 'coll_type' for collection 'coll'.
If public_only, then return only non-restricted child collections.
If coll_type='*', then return both regular and virtual collections.
d_internal_coll_sons is an internal dictionary used in recursion for
minimizing the number of database calls and should not be used outside
this scope.
"""
if not d_internal_coll_sons:
d_internal_coll_sons = {}
children = []
if coll not in d_internal_coll_sons:
d_internal_coll_sons[coll] = get_coll_sons(coll, coll_type, public_only)
for child in d_internal_coll_sons[coll]:
children.append(child)
children.extend(get_all_children(child, coll_type, public_only, d_internal_coll_sons)[0])
return children, d_internal_coll_sons
ret = {}
d_internal_coll_sons = None
collections = collection_reclist_cache.cache.keys()
for collection in collections:
ret[collection], d_internal_coll_sons = get_all_children(collection, '*', public_only=0, d_internal_coll_sons=d_internal_coll_sons)
return ret
def timestamp_verifier():
return max(get_table_update_time('collection'), get_table_update_time('collection_collection'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_allchildren_cache.is_ok_p:
raise Exception
except Exception:
collection_allchildren_cache = CollectionAllChildrenDataCacher()
def get_collection_allchildren(coll, recreate_cache_if_needed=True):
"""Returns the list of all children of a collection."""
if recreate_cache_if_needed:
collection_allchildren_cache.recreate_cache_if_needed()
if coll not in collection_allchildren_cache.cache:
return [] # collection does not exist; return empty list
return collection_allchildren_cache.cache[coll]
def get_coll_real_descendants(coll, coll_type='_', get_hosted_colls=True):
"""Return a list of all descendants of collection 'coll' that are defined by a 'dbquery'.
IOW, we need to decompose compound collections like "A & B" into "A" and "B" provided
that "A & B" has no associated database query defined.
"""
coll_sons = []
res = run_sql("""SELECT c.name,c.dbquery FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_son
LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad
WHERE ccc.name=%s AND cc.type LIKE %s ORDER BY cc.score ASC""",
(coll, coll_type,))
for name, dbquery in res:
if dbquery: # this is 'real' collection, so return it:
if get_hosted_colls:
coll_sons.append(name)
else:
if not dbquery.startswith("hostedcollection:"):
coll_sons.append(name)
else: # this is 'composed' collection, so recurse:
coll_sons.extend(get_coll_real_descendants(name))
return coll_sons
def browse_pattern(req, colls, p, f, rg, ln=CFG_SITE_LANG):
"""Browse either biliographic phrases or words indexes, and display it."""
# load the right message language
_ = gettext_set_language(ln)
## is p enclosed in quotes? (coming from exact search)
if p.startswith('"') and p.endswith('"'):
p = p[1:-1]
## okay, "real browse" follows:
## FIXME: the maths in the get_nearest_terms_in_bibxxx is just a test
if not f and p.find(":") > 0: # does 'p' contain ':'?
f, p = p.split(":", 1)
## do we search in words indexes?
if not f:
return browse_in_bibwords(req, p, f)
coll_hitset = intbitset()
for coll_name in colls:
coll_hitset |= get_collection_reclist(coll_name)
index_id = get_index_id_from_field(f)
if index_id != 0:
browsed_phrases_in_colls = get_nearest_terms_in_idxphrase_with_collection(p, index_id, rg/2, rg/2, coll_hitset)
else:
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
while not browsed_phrases:
# try again and again with shorter and shorter pattern:
try:
p = p[:-1]
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
except:
register_exception(req=req, alert_admin=True)
# probably there are no hits at all:
req.write(_("No values found."))
return
## try to check hits in these particular collection selection:
browsed_phrases_in_colls = []
if 0:
for phrase in browsed_phrases:
phrase_hitset = intbitset()
phrase_hitsets = search_pattern("", phrase, f, 'e')
for coll in colls:
phrase_hitset.union_update(phrase_hitsets[coll])
if len(phrase_hitset) > 0:
# okay, this phrase has some hits in colls, so add it:
browsed_phrases_in_colls.append([phrase, len(phrase_hitset)])
## were there hits in collections?
if browsed_phrases_in_colls == []:
if browsed_phrases != []:
#write_warning(req, """<p>No match close to <em>%s</em> found in given collections.
#Please try different term.<p>Displaying matches in any collection...""" % p_orig)
## try to get nbhits for these phrases in any collection:
for phrase in browsed_phrases:
nbhits = get_nbhits_in_bibxxx(phrase, f, coll_hitset)
if nbhits > 0:
browsed_phrases_in_colls.append([phrase, nbhits])
## display results now:
out = websearch_templates.tmpl_browse_pattern(
f=f,
fn=get_field_i18nname(get_field_name(f) or f, ln, False),
ln=ln,
browsed_phrases_in_colls=browsed_phrases_in_colls,
colls=colls,
rg=rg,
)
req.write(out)
return
def browse_in_bibwords(req, p, f, ln=CFG_SITE_LANG):
"""Browse inside words indexes."""
if not p:
return
_ = gettext_set_language(ln)
urlargd = {}
urlargd.update(req.argd)
urlargd['action'] = 'search'
nearest_box = create_nearest_terms_box(urlargd, p, f, 'w', ln=ln, intro_text_p=0)
req.write(websearch_templates.tmpl_search_in_bibwords(
p = p,
f = f,
ln = ln,
nearest_box = nearest_box
))
return
def search_pattern(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' within field 'f' according to
matching type 'm'. Return hitset of recIDs.
The function uses multi-stage searching algorithm in case of no
exact match found. See the Search Internals document for
detailed description.
The 'ap' argument governs whether an alternative patterns are to
be used in case there is no direct hit for (p,f,m). For
example, whether to replace non-alphanumeric characters by
spaces if it would give some hits. See the Search Internals
document for detailed description. (ap=0 forbits the
alternative pattern usage, ap=1 permits it.)
'ap' is also internally used for allowing hidden tag search
(for requests coming from webcoll, for example). In this
case ap=-9
The 'of' argument governs whether to print or not some
information to the user in case of no match found. (Usually it
prints the information in case of HTML formats, otherwise it's
silent).
The 'verbose' argument controls the level of debugging information
to be printed (0=least, 9=most).
All the parameters are assumed to have been previously washed.
This function is suitable as a mid-level API.
"""
_ = gettext_set_language(ln)
hitset_empty = intbitset()
# sanity check:
if not p:
hitset_full = intbitset(trailing_bits=1)
hitset_full.discard(0)
# no pattern, so return all universe
return hitset_full
# search stage 1: break up arguments into basic search units:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units = create_basic_search_units(req, p, f, m, of)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 1: basic search units are: %s" % cgi.escape(repr(basic_search_units)), req=req)
write_warning("Search stage 1: execution took %.2f seconds." % (t2 - t1), req=req)
# search stage 2: do search for each search unit and verify hit presence:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units_hitsets = []
#prepare hiddenfield-related..
myhiddens = CFG_BIBFORMAT_HIDDEN_TAGS
can_see_hidden = False
if req:
user_info = collect_user_info(req)
can_see_hidden = user_info.get('precached_canseehiddenmarctags', False)
if not req and ap == -9: # special request, coming from webcoll
can_see_hidden = True
if can_see_hidden:
myhiddens = []
if CFG_INSPIRE_SITE and of.startswith('h'):
# fulltext/caption search warnings for INSPIRE:
fields_to_be_searched = [f for dummy_o, p, f, m in basic_search_units]
if 'fulltext' in fields_to_be_searched:
write_warning(_("Full-text search is currently available for all arXiv papers, many theses, a few report series and some journal articles"), req=req)
elif 'caption' in fields_to_be_searched:
write_warning(_("Warning: figure caption search is only available for a subset of papers mostly from %(x_range_from_year)s-%(x_range_to_year)s.") %
{'x_range_from_year': '2008',
'x_range_to_year': '2012'}, req=req)
for idx_unit in xrange(len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_f and len(bsu_f) < 2:
if of.startswith("h"):
write_warning(_("There is no index %s. Searching for %s in all fields." % (bsu_f, bsu_p)), req=req)
bsu_f = ''
bsu_m = 'w'
if of.startswith("h") and verbose:
write_warning(_('Instead searching %s.' % str([bsu_o, bsu_p, bsu_f, bsu_m])), req=req)
try:
basic_search_unit_hitset = search_unit(bsu_p, bsu_f, bsu_m, wl)
except InvenioWebSearchWildcardLimitError, excp:
basic_search_unit_hitset = excp.res
if of.startswith("h"):
write_warning(_("Search term too generic, displaying only partial results..."), req=req)
except InvenioWebSearchReferstoLimitError, excp:
basic_search_unit_hitset = excp.res
if of.startswith("h"):
write_warning(_("Search term after reference operator too generic, displaying only partial results..."), req=req)
except InvenioWebSearchCitedbyLimitError, excp:
basic_search_unit_hitset = excp.res
if of.startswith("h"):
write_warning(_("Search term after citedby operator too generic, displaying only partial results..."), req=req)
# FIXME: print warning if we use native full-text indexing
if bsu_f == 'fulltext' and bsu_m != 'w' and of.startswith('h') and not CFG_SOLR_URL:
write_warning(_("No phrase index available for fulltext yet, looking for word combination..."), req=req)
#check that the user is allowed to search with this tag
#if he/she tries it
if bsu_f and len(bsu_f) > 1 and bsu_f[0].isdigit() and bsu_f[1].isdigit():
for htag in myhiddens:
ltag = len(htag)
samelenfield = bsu_f[0:ltag]
if samelenfield == htag: #user searches by a hidden tag
#we won't show you anything..
basic_search_unit_hitset = intbitset()
if verbose >= 9 and of.startswith("h"):
write_warning("Pattern %s hitlist omitted since \
it queries in a hidden tag %s" %
(cgi.escape(repr(bsu_p)), repr(myhiddens)), req=req)
display_nearest_terms_box = False #..and stop spying, too.
if verbose >= 9 and of.startswith("h"):
write_warning("Search stage 1: pattern %s gave hitlist %s" % (cgi.escape(bsu_p), basic_search_unit_hitset), req=req)
if len(basic_search_unit_hitset) > 0 or \
ap<1 or \
bsu_o in ("|", "-") or \
((idx_unit+1)<len(basic_search_units) and basic_search_units[idx_unit+1][0]=="|"):
# stage 2-1: this basic search unit is retained, since
# either the hitset is non-empty, or the approximate
# pattern treatment is switched off, or the search unit
# was joined by an OR operator to preceding/following
# units so we do not require that it exists
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-2: no hits found for this search unit, try to replace non-alphanumeric chars inside pattern:
if re.search(r'[^a-zA-Z0-9\s\:]', bsu_p) and bsu_f != 'refersto' and bsu_f != 'citedby':
if bsu_p.startswith('"') and bsu_p.endswith('"'): # is it ACC query?
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', "*", bsu_p)
else: # it is WRD query
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', " ", bsu_p)
if verbose and of.startswith('h') and req:
write_warning("Trying (%s,%s,%s)" % (cgi.escape(bsu_pn), cgi.escape(bsu_f), cgi.escape(bsu_m)), req=req)
basic_search_unit_hitset = search_pattern(req=None, p=bsu_pn, f=bsu_f, m=bsu_m, of="id", ln=ln, wl=wl)
if len(basic_search_unit_hitset) > 0:
# we retain the new unit instead
if of.startswith('h'):
write_warning(_("No exact match found for %(x_query1)s, using %(x_query2)s instead...") %
{'x_query1': "<em>" + cgi.escape(bsu_p) + "</em>",
'x_query2': "<em>" + cgi.escape(bsu_pn) + "</em>"}, req=req)
basic_search_units[idx_unit][1] = bsu_pn
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
write_warning(_("Requested record does not seem to exist."), req=req)
else:
write_warning(create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln), req=req)
return hitset_empty
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
write_warning(_("Requested record does not seem to exist."), req=req)
else:
write_warning(create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln), req=req)
return hitset_empty
if verbose and of.startswith("h"):
t2 = os.times()[4]
for idx_unit in range(0, len(basic_search_units)):
write_warning("Search stage 2: basic search unit %s gave %d hits." %
(basic_search_units[idx_unit][1:], len(basic_search_units_hitsets[idx_unit])), req=req)
write_warning("Search stage 2: execution took %.2f seconds." % (t2 - t1), req=req)
# search stage 3: apply boolean query for each search unit:
if verbose and of.startswith("h"):
t1 = os.times()[4]
# let the initial set be the complete universe:
hitset_in_any_collection = intbitset(trailing_bits=1)
hitset_in_any_collection.discard(0)
for idx_unit in xrange(len(basic_search_units)):
this_unit_operation = basic_search_units[idx_unit][0]
this_unit_hitset = basic_search_units_hitsets[idx_unit]
if this_unit_operation == '+':
hitset_in_any_collection.intersection_update(this_unit_hitset)
elif this_unit_operation == '-':
hitset_in_any_collection.difference_update(this_unit_hitset)
elif this_unit_operation == '|':
hitset_in_any_collection.union_update(this_unit_hitset)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(this_unit_operation), "Error", req=req)
if len(hitset_in_any_collection) == 0:
# no hits found, propose alternative boolean query:
if of.startswith('h') and display_nearest_terms_box:
nearestterms = []
for idx_unit in range(0, len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_p.startswith("%") and bsu_p.endswith("%"):
bsu_p = "'" + bsu_p[1:-1] + "'"
bsu_nbhits = len(basic_search_units_hitsets[idx_unit])
# create a similar query, but with the basic search unit only
argd = {}
argd.update(req.argd)
argd['p'] = bsu_p
argd['f'] = bsu_f
nearestterms.append((bsu_p, bsu_nbhits, argd))
text = websearch_templates.tmpl_search_no_boolean_hits(
ln=ln, nearestterms=nearestterms)
write_warning(text, req=req)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 3: boolean query gave %d hits." % len(hitset_in_any_collection), req=req)
write_warning("Search stage 3: execution took %.2f seconds." % (t2 - t1), req=req)
return hitset_in_any_collection
def search_pattern_parenthesised(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' containing parenthesis within field 'f' according to
matching type 'm'. Return hitset of recIDs.
For more details on the parameters see 'search_pattern'
"""
_ = gettext_set_language(ln)
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
spires_syntax_query = False
# if the pattern uses SPIRES search syntax, convert it to Invenio syntax
if spires_syntax_converter.is_applicable(p):
spires_syntax_query = True
p = spires_syntax_converter.convert_query(p)
# sanity check: do not call parenthesised parser for search terms
# like U(1) but still call it for searches like ('U(1)' | 'U(2)'):
if not re_pattern_parens.search(re_pattern_parens_quotes.sub('_', p)):
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# Try searching with parentheses
try:
parser = SearchQueryParenthesisedParser()
# get a hitset with all recids
result_hitset = intbitset(trailing_bits=1)
# parse the query. The result is list of [op1, expr1, op2, expr2, ..., opN, exprN]
parsing_result = parser.parse_query(p)
if verbose and of.startswith("h"):
write_warning("Search stage 1: search_pattern_parenthesised() searched %s." % repr(p), req=req)
write_warning("Search stage 1: search_pattern_parenthesised() returned %s." % repr(parsing_result), req=req)
# go through every pattern
# calculate hitset for it
# combine pattern's hitset with the result using the corresponding operator
for index in xrange(0, len(parsing_result)-1, 2):
current_operator = parsing_result[index]
current_pattern = parsing_result[index+1]
if CFG_INSPIRE_SITE and spires_syntax_query:
# setting ap=0 to turn off approximate matching for 0 results.
# Doesn't work well in combinations.
# FIXME: The right fix involves collecting statuses for each
# hitset, then showing a nearest terms box exactly once,
# outside this loop.
ap = 0
display_nearest_terms_box = False
# obtain a hitset for the current pattern
current_hitset = search_pattern(req, current_pattern, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# combine the current hitset with resulting hitset using the current operator
if current_operator == '+':
result_hitset = result_hitset & current_hitset
elif current_operator == '-':
result_hitset = result_hitset - current_hitset
elif current_operator == '|':
result_hitset = result_hitset | current_hitset
else:
assert False, "Unknown operator in search_pattern_parenthesised()"
return result_hitset
# If searching with parenteses fails, perform search ignoring parentheses
except SyntaxError:
write_warning(_("Search syntax misunderstood. Ignoring all parentheses in the query. If this doesn't help, please check your search and try again."), req=req)
# remove the parentheses in the query. Current implementation removes all the parentheses,
# but it could be improved to romove only these that are not inside quotes
p = p.replace('(', ' ')
p = p.replace(')', ' ')
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
def search_unit(p, f=None, m=None, wl=0, ignore_synonyms=None):
"""Search for basic search unit defined by pattern 'p' and field
'f' and matching type 'm'. Return hitset of recIDs.
All the parameters are assumed to have been previously washed.
'p' is assumed to be already a ``basic search unit'' so that it
is searched as such and is not broken up in any way. Only
wildcard and span queries are being detected inside 'p'.
If CFG_WEBSEARCH_SYNONYM_KBRS is set and we are searching in
one of the indexes that has defined runtime synonym knowledge
base, then look up there and automatically enrich search
results with results for synonyms.
In case the wildcard limit (wl) is greater than 0 and this limit
is reached an InvenioWebSearchWildcardLimitError will be raised.
In case you want to call this function with no limit for the
wildcard queries, wl should be 0.
Parameter 'ignore_synonyms' is a list of terms for which we
should not try to further find a synonym.
This function is suitable as a low-level API.
"""
## create empty output results set:
hitset = intbitset()
if not p: # sanity checking
return hitset
tokenizer = get_field_tokenizer_type(f)
hitset_cjk = intbitset()
if tokenizer == "BibIndexCJKTokenizer":
if is_there_any_CJK_character_in_text(p):
cjk_tok = BibIndexCJKTokenizer()
chars = cjk_tok.tokenize_for_words(p)
for char in chars:
hitset_cjk |= search_unit_in_bibwords(char, f, wl)
## eventually look up runtime synonyms:
hitset_synonyms = intbitset()
if CFG_WEBSEARCH_SYNONYM_KBRS.has_key(f or 'anyfield'):
if ignore_synonyms is None:
ignore_synonyms = []
ignore_synonyms.append(p)
for p_synonym in get_synonym_terms(p,
CFG_WEBSEARCH_SYNONYM_KBRS[f or 'anyfield'][0],
CFG_WEBSEARCH_SYNONYM_KBRS[f or 'anyfield'][1]):
if p_synonym != p and \
not p_synonym in ignore_synonyms:
hitset_synonyms |= search_unit(p_synonym, f, m, wl,
ignore_synonyms)
## look up hits:
if f == 'fulltext' and get_idx_indexer('fulltext') == 'SOLR' and CFG_SOLR_URL:
# redirect to Solr
try:
return search_unit_in_solr(p, f, m)
except:
# There were troubles with getting full-text search
# results from Solr. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception()
return hitset
elif f == 'fulltext' and get_idx_indexer('fulltext') == 'XAPIAN' and CFG_XAPIAN_ENABLED:
# redirect to Xapian
try:
return search_unit_in_xapian(p, f, m)
except:
# There were troubles with getting full-text search
# results from Xapian. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception()
return hitset
if f == 'datecreated':
hitset = search_unit_in_bibrec(p, p, 'c')
elif f == 'datemodified':
hitset = search_unit_in_bibrec(p, p, 'm')
elif f == 'refersto':
# we are doing search by the citation count
hitset = search_unit_refersto(p)
elif f == 'referstoexcludingselfcites':
# we are doing search by the citation count
hitset = search_unit_refersto_excluding_selfcites(p)
elif f == 'cataloguer':
# we are doing search by the cataloguer nickname
hitset = search_unit_in_record_history(p)
elif f == 'rawref':
from invenio.refextract_api import search_from_reference
field, pattern = search_from_reference(p)
return search_unit(pattern, field)
elif f == 'citedby':
# we are doing search by the citation count
hitset = search_unit_citedby(p)
elif f == 'citedbyexcludingselfcites':
# we are doing search by the citation count
hitset = search_unit_citedby_excluding_selfcites(p)
elif m == 'a' or m == 'r':
# we are doing either phrase search or regexp search
if f == 'fulltext':
# FIXME: workaround for not having phrase index yet
return search_pattern(None, p, f, 'w')
index_id = get_index_id_from_field(f)
if index_id != 0:
if m == 'a' and index_id in get_idxpair_field_ids():
#for exact match on the admin configured fields we are searching in the pair tables
hitset = search_unit_in_idxpairs(p, f, m, wl)
else:
hitset = search_unit_in_idxphrases(p, f, m, wl)
else:
hitset = search_unit_in_bibxxx(p, f, m, wl)
# if not hitset and m == 'a' and (p[0] != '%' and p[-1] != '%'):
# #if we have no results by doing exact matching, do partial matching
# #for removing the distinction between simple and double quotes
# hitset = search_unit_in_bibxxx('%' + p + '%', f, m, wl)
elif p.startswith("cited:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[6:])
elif p.startswith("citedexcludingselfcites:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[6:], exclude_selfcites=True)
else:
# we are doing bibwords search by default
hitset = search_unit_in_bibwords(p, f, wl=wl)
## merge synonym results and return total:
hitset |= hitset_synonyms
hitset |= hitset_cjk
return hitset
def get_idxpair_field_ids():
"""Returns the list of ids for the fields that idxPAIRS should be used on"""
index_dict = dict(run_sql("SELECT name, id FROM idxINDEX"))
return [index_dict[field] for field in index_dict if field in CFG_WEBSEARCH_IDXPAIRS_FIELDS]
def search_unit_in_bibwords(word, f, decompress=zlib.decompress, wl=0):
"""Searches for 'word' inside bibwordsX table for field 'f' and returns hitset of recIDs."""
hitset = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
# if no field is specified, search in the global index.
f = f or 'anyfield'
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
stemming_language = get_index_stemming_language(index_id)
else:
return intbitset() # word index f does not exist
# wash 'word' argument and run query:
if f.endswith('count') and word.endswith('+'):
# field count query of the form N+ so transform N+ to N->99999:
word = word[:-1] + '->99999'
word = word.replace('*', '%') # we now use '*' as the truncation character
words = word.split("->", 1) # check for span query
if len(words) == 2:
word0 = re_word.sub('', words[0])
word1 = re_word.sub('', words[1])
if stemming_language:
word0 = lower_index_term(word0)
word1 = lower_index_term(word1)
# We remove trailing truncation character before stemming
if word0.endswith('%'):
word0 = stem(word0[:-1], stemming_language) + '%'
else:
word0 = stem(word0, stemming_language)
if word1.endswith('%'):
word1 = stem(word1[:-1], stemming_language) + '%'
else:
word1 = stem(word1, stemming_language)
word0_washed = wash_index_term(word0)
word1_washed = wash_index_term(word1)
if f.endswith('count'):
# field count query; convert to integers in order
# to have numerical behaviour for 'BETWEEN n1 AND n2' query
try:
word0_washed = int(word0_washed)
word1_washed = int(word1_washed)
except ValueError:
pass
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term BETWEEN %%s AND %%s" % bibwordsX,
(word0_washed, word1_washed), wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
if f == 'journal':
pass # FIXME: quick hack for the journal index
else:
word = re_word.sub('', word)
if stemming_language:
word = lower_index_term(word)
# We remove trailing truncation character before stemming
if word.endswith('%'):
word = stem(word[:-1], stemming_language) + '%'
else:
word = stem(word, stemming_language)
if word.find('%') >= 0: # do we have wildcard in the word?
if f == 'journal':
# FIXME: quick hack for the journal index
# FIXME: we can run a sanity check here for all indexes
res = ()
else:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term LIKE %%s" % bibwordsX,
(wash_index_term(word),), wildcard_limit = wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term=%%s" % bibwordsX,
(wash_index_term(word),))
# fill the result set:
for word, hitlist in res:
hitset_bibwrd = intbitset(hitlist)
# add the results:
if set_used:
hitset.union_update(hitset_bibwrd)
else:
hitset = hitset_bibwrd
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
# okay, return result set:
return hitset
def search_unit_in_idxpairs(p, f, search_type, wl=0):
"""Searches for pair 'p' inside idxPAIR table for field 'f' and
returns hitset of recIDs found."""
limit_reached = 0 # flag for knowing if the query limit has been reached
do_exact_search = True # flag to know when it makes sense to try to do exact matching
result_set = intbitset()
#determine the idxPAIR table to read from
index_id = get_index_id_from_field(f)
if not index_id:
return intbitset()
stemming_language = get_index_stemming_language(index_id)
pairs_tokenizer = BibIndexDefaultTokenizer(stemming_language)
idxpair_table_washed = wash_table_column_name("idxPAIR%02dF" % index_id)
if p.startswith("%") and p.endswith("%"):
p = p[1:-1]
original_pattern = p
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
queries_releated_vars = [] # contains tuples of (query_addons, query_params, use_query_limit)
#is it a span query?
ps = p.split("->", 1)
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
#so we are dealing with a span query
pairs_left = pairs_tokenizer.tokenize_for_pairs(ps[0])
pairs_right = pairs_tokenizer.tokenize_for_pairs(ps[1])
if not pairs_left or not pairs_right:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
elif len(pairs_left) != len(pairs_right):
# it is kind of hard to know what the user actually wanted
# we have to do: foo bar baz -> qux xyz, so let's swith to phrase
return search_unit_in_idxphrases(original_pattern, f, search_type, wl)
elif len(pairs_left) > 1 and \
len(pairs_right) > 1 and \
pairs_left[:-1] != pairs_right[:-1]:
# again we have something like: foo bar baz -> abc xyz qux
# so we'd better switch to phrase
return search_unit_in_idxphrases(original_pattern, f, search_type, wl)
else:
# finally, we can treat the search using idxPairs
# at this step we have either: foo bar -> abc xyz
# or foo bar abc -> foo bar xyz
queries_releated_vars = [("BETWEEN %s AND %s", (pairs_left[-1], pairs_right[-1]), True)]
for pair in pairs_left[:-1]:# which should be equal with pairs_right[:-1]
queries_releated_vars.append(("= %s", (pair, ), False))
do_exact_search = False # no exact search for span queries
elif p.find('%') > -1:
#tokenizing p will remove the '%', so we have to make sure it stays
replacement = 'xxxxxxxxxx' #hopefuly this will not clash with anything in the future
p = string.replace(p, '%', replacement)
pairs = pairs_tokenizer.tokenize_for_pairs(p)
if not pairs:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
queries_releated_vars = []
for pair in pairs:
if string.find(pair, replacement) > -1:
pair = string.replace(pair, replacement, '%') #we replace back the % sign
queries_releated_vars.append(("LIKE %s", (pair, ), True))
else:
queries_releated_vars.append(("= %s", (pair, ), False))
do_exact_search = False
else:
#normal query
pairs = pairs_tokenizer.tokenize_for_pairs(p)
if not pairs:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
queries_releated_vars = []
for pair in pairs:
queries_releated_vars.append(("= %s", (pair, ), False))
first_results = 1 # flag to know if it's the first set of results or not
for query_var in queries_releated_vars:
query_addons = query_var[0]
query_params = query_var[1]
use_query_limit = query_var[2]
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term, hitlist FROM %s WHERE term %s"
% (idxpair_table_washed, query_addons), query_params, wildcard_limit=wl) #kwalitee:disable=sql
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term, hitlist FROM %s WHERE term %s"
% (idxpair_table_washed, query_addons), query_params) #kwalitee:disable=sql
if not res:
return intbitset()
for pair, hitlist in res:
hitset_idxpairs = intbitset(hitlist)
if first_results:
result_set = hitset_idxpairs
first_results = 0
else:
result_set.intersection_update(hitset_idxpairs)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(result_set)
# check if we need to eliminate the false positives
if CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH and do_exact_search:
# we need to eliminate the false positives
idxphrase_table_washed = wash_table_column_name("idxPHRASE%02dR" % index_id)
not_exact_search = intbitset()
for recid in result_set:
res = run_sql("SELECT termlist FROM %s WHERE id_bibrec %s" %(idxphrase_table_washed, '=%s'), (recid, )) #kwalitee:disable=sql
if res:
termlist = deserialize_via_marshal(res[0][0])
if not [term for term in termlist if term.lower().find(p.lower()) > -1]:
not_exact_search.add(recid)
else:
not_exact_search.add(recid)
# remove the recs that are false positives from the final result
result_set.difference_update(not_exact_search)
return result_set
def search_unit_in_idxphrases(p, f, search_type, wl=0):
"""Searches for phrase 'p' inside idxPHRASE*F table for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f.endswith('count'):
return search_unit_in_bibwords(p, f, wl=wl)
hitset = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
# deduce in which idxPHRASE table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return intbitset() # phrase index f does not exist
# detect query type (exact phrase, partial phrase, regexp):
if search_type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = p.replace('*', '%') # we now use '*' as the truncation character
ps = p.split("->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if p.find('%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# special washing for fuzzy author index:
if f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor', 'authorityauthor'):
query_params_washed = ()
for query_param in query_params:
query_params_washed += (wash_author_name(query_param),)
query_params = query_params_washed
# perform search:
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons),
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons), query_params)
# fill the result set:
for dummy_word, hitlist in res:
hitset_bibphrase = intbitset(hitlist)
# add the results:
if set_used:
hitset.union_update(hitset_bibphrase)
else:
hitset = hitset_bibphrase
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
# okay, return result set:
return hitset
def search_unit_in_bibxxx(p, f, type, wl=0):
"""Searches for pattern 'p' inside bibxxx tables for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f == 'journal' or f.endswith('count'):
return search_unit_in_bibwords(p, f, wl=wl)
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
query_addons = "" # will hold additional SQL code for the query
query_params = () # will hold parameters for the query (their number may vary depending on TYPE argument)
# wash arguments:
f = string.replace(f, '*', '%') # replace truncation char '*' in field definition
if type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
ps = string.split(p, "->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if string.find(p, '%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if len(f) >= 2 and str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
if not tl:
# f index does not exist, nevermind
pass
# okay, start search:
l = [] # will hold list of recID that matched
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
# construct and run query:
if t == "001":
if query_addons.find('BETWEEN') > -1 or query_addons.find('=') > -1:
# verify that the params are integers (to avoid returning record 123 when searching for 123foo)
try:
query_params = tuple(int(param) for param in query_params)
except ValueError:
return intbitset()
if use_query_limit:
try:
res = run_sql_with_limit("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params)
else:
query = "SELECT bibx.id_bibrec FROM %s AS bx LEFT JOIN %s AS bibx ON bx.id=bibx.id_bibxxx WHERE bx.value %s" % \
(bx, bibx, query_addons)
if len(t) != 6 or t[-1:]=='%':
# wildcard query, or only the beginning of field 't'
# is defined, so add wildcard character:
query += " AND bx.tag LIKE %s"
query_params_and_tag = query_params + (t + '%',)
else:
# exact query for 't':
query += " AND bx.tag=%s"
query_params_and_tag = query_params + (t,)
if use_query_limit:
try:
res = run_sql_with_limit(query, query_params_and_tag, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql(query, query_params_and_tag)
# fill the result set:
for id_bibrec in res:
if id_bibrec[0]:
l.append(id_bibrec[0])
# check no of hits found:
nb_hits = len(l)
# okay, return result set:
hitset = intbitset(l)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
return hitset
def search_unit_in_solr(p, f=None, m=None):
"""
Query a Solr index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return solr_get_bitset(f, p)
def search_unit_in_xapian(p, f=None, m=None):
"""
Query a Xapian index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return xapian_get_bitset(f, p)
def search_unit_in_bibrec(datetext1, datetext2, search_type='c'):
"""
Return hitset of recIDs found that were either created or modified
(according to 'type' arg being 'c' or 'm') from datetext1 until datetext2, inclusive.
Does not pay attention to pattern, collection, anything. Useful
to intersect later on with the 'real' query.
"""
hitset = intbitset()
if search_type and search_type.startswith("m"):
search_type = "modification_date"
else:
search_type = "creation_date" # by default we are searching for creation dates
parts = datetext1.split('->')
if len(parts) > 1 and datetext1 == datetext2:
datetext1 = parts[0]
datetext2 = parts[1]
if datetext1 == datetext2:
res = run_sql("SELECT id FROM bibrec WHERE %s LIKE %%s" % (search_type,),
(datetext1 + '%',))
else:
res = run_sql("SELECT id FROM bibrec WHERE %s>=%%s AND %s<=%%s" % (search_type, search_type),
(datetext1, datetext2))
for row in res:
hitset += row[0]
return hitset
def search_unit_by_times_cited(p, exclude_selfcites=False):
"""
Return histset of recIDs found that are cited P times.
Usually P looks like '10->23'.
"""
numstr = '"'+p+'"'
#this is sort of stupid but since we may need to
#get the records that do _not_ have cites, we have to
#know the ids of all records, too
#but this is needed only if bsu_p is 0 or 0 or 0->0
allrecs = []
if p == 0 or p == "0" or \
p.startswith("0->") or p.endswith("->0"):
allrecs = intbitset(run_sql("SELECT id FROM bibrec"))
return get_records_with_num_cites(numstr, allrecs,
exclude_selfcites=exclude_selfcites)
def search_unit_refersto(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
res = get_refersto_hitset(ahitset, record_limit=CFG_WEBSEARCH_MAX_RECORDS_REFERSTO)
if len(ahitset) >= CFG_WEBSEARCH_MAX_RECORDS_REFERSTO:
raise InvenioWebSearchReferstoLimitError(res)
return res
else:
return intbitset([])
def search_unit_refersto_excluding_selfcites(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
citers = intbitset()
citations = get_cited_by_list(ahitset, record_limit=CFG_WEBSEARCH_MAX_RECORDS_REFERSTO)
selfcitations = get_self_cited_by_list(ahitset, record_limit=CFG_WEBSEARCH_MAX_RECORDS_REFERSTO)
for cites, selfcites in zip(citations, selfcitations):
# cites is in the form [(citee, citers), ...]
citers += cites[1] - selfcites[1]
if len(ahitset) >= CFG_WEBSEARCH_MAX_RECORDS_REFERSTO:
raise InvenioWebSearchReferstoLimitError(citers)
return citers
else:
return intbitset([])
def search_unit_in_record_history(query):
"""
Return hitset of recIDs that were modified by the given cataloguer
"""
if query:
try:
cataloguer_name, modification_date = query.split(":")
except ValueError:
cataloguer_name = query
modification_date = ""
if modification_date:
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
modification_date = spires_syntax_converter.convert_date(modification_date)
parts = modification_date.split('->', 1)
if len(parts) > 1:
start_date, end_date = parts
res = run_sql("SELECT id_bibrec FROM hstRECORD WHERE job_person=%s AND job_date>=%s AND job_date<=%s",
(cataloguer_name, start_date, end_date))
else:
res = run_sql("SELECT id_bibrec FROM hstRECORD WHERE job_person=%s AND job_date LIKE %s",
(cataloguer_name, modification_date + '%',))
return intbitset(res)
else:
sql = "SELECT id_bibrec FROM hstRECORD WHERE job_person=%s"
res = intbitset(run_sql(sql, (cataloguer_name,)))
return res
else:
return intbitset([])
def search_unit_citedby(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records cited by these records.
"""
if query:
ahitset = search_pattern(p=query)
if ahitset:
res = get_citedby_hitset(ahitset, record_limit=CFG_WEBSEARCH_MAX_RECORDS_CITEDBY)
if len(ahitset) >= CFG_WEBSEARCH_MAX_RECORDS_CITEDBY:
raise InvenioWebSearchCitedbyLimitError(res)
return res
else:
return intbitset([])
else:
return intbitset([])
def search_unit_citedby_excluding_selfcites(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
citees = intbitset()
references = get_refers_to_list(ahitset, record_limit=CFG_WEBSEARCH_MAX_RECORDS_CITEDBY)
selfreferences = get_self_refers_to_list(ahitset, record_limit=CFG_WEBSEARCH_MAX_RECORDS_CITEDBY)
for refs, selfrefs in zip(references, selfreferences):
# refs is in the form [(citer, citees), ...]
citees += refs[1] - selfrefs[1]
if len(ahitset) >= CFG_WEBSEARCH_MAX_RECORDS_CITEDBY:
raise InvenioWebSearchCitedbyLimitError(citees)
return citees
else:
return intbitset([])
def intersect_results_with_collrecs(req, hitset_in_any_collection, colls, of="hb", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True):
"""Return dict of hitsets given by intersection of hitset with the collection universes."""
_ = gettext_set_language(ln)
# search stage 4: intersect with the collection universe
if verbose and of.startswith("h"):
t1 = os.times()[4]
results = {} # all final results
results_nbhits = 0
# calculate the list of recids (restricted or not) that the user has rights to access and we should display (only those)
records_that_can_be_displayed = intbitset()
if not req or isinstance(req, cStringIO.OutputType): # called from CLI
user_info = {}
for coll in colls:
results[coll] = hitset_in_any_collection & get_collection_reclist(coll)
results_nbhits += len(results[coll])
records_that_can_be_displayed = hitset_in_any_collection
permitted_restricted_collections = []
else:
user_info = collect_user_info(req)
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
# let's get the restricted collections the user has rights to view
if user_info['guest'] == '1':
permitted_restricted_collections = []
## For guest users that are actually authorized to some restricted
## collection (by virtue of the IP address in a FireRole rule)
## we explicitly build the list of permitted_restricted_collections
for coll in colls:
if collection_restricted_p(coll) and (acc_authorize_action(user_info, 'viewrestrcoll', collection=coll)[0] == 0):
permitted_restricted_collections.append(coll)
else:
permitted_restricted_collections = user_info.get('precached_permitted_restricted_collections', [])
# let's build the list of the both public and restricted
# child collections of the collection from which the user
# started his/her search. This list of children colls will be
# used in the warning proposing a search in that collections
try:
current_coll = req.argd['cc'] # current_coll: coll from which user started his/her search
except (AttributeError, KeyError):
current_coll = CFG_SITE_NAME
current_coll_children = get_collection_allchildren(current_coll) # real & virtual
# add all restricted collections, that the user has access to, and are under the current collection
# do not use set here, in order to maintain a specific order:
# children of 'cc' (real, virtual, restricted), rest of 'c' that are not cc's children
colls_to_be_displayed = [coll for coll in current_coll_children if coll in colls or coll in permitted_restricted_collections]
colls_to_be_displayed.extend([coll for coll in colls if coll not in colls_to_be_displayed])
if policy == 'ANY':# the user needs to have access to at least one collection that restricts the records
#we need this to be able to remove records that are both in a public and restricted collection
permitted_recids = intbitset()
notpermitted_recids = intbitset()
for collection in restricted_collection_cache.cache:
if collection in permitted_restricted_collections:
permitted_recids |= get_collection_reclist(collection)
else:
notpermitted_recids |= get_collection_reclist(collection)
records_that_can_be_displayed = hitset_in_any_collection - (notpermitted_recids - permitted_recids)
else:# the user needs to have access to all collections that restrict a records
notpermitted_recids = intbitset()
for collection in restricted_collection_cache.cache:
if collection not in permitted_restricted_collections:
notpermitted_recids |= get_collection_reclist(collection)
records_that_can_be_displayed = hitset_in_any_collection - notpermitted_recids
for coll in colls_to_be_displayed:
results[coll] = results.get(coll, intbitset()) | (records_that_can_be_displayed & get_collection_reclist(coll))
results_nbhits += len(results[coll])
if results_nbhits == 0:
# no hits found, try to search in Home and restricted and/or hidden collections:
results = {}
results_in_Home = records_that_can_be_displayed & get_collection_reclist(CFG_SITE_NAME)
results_in_restricted_collections = intbitset()
results_in_hidden_collections = intbitset()
for coll in permitted_restricted_collections:
if not get_coll_ancestors(coll): # hidden collection
results_in_hidden_collections.union_update(records_that_can_be_displayed & get_collection_reclist(coll))
else:
results_in_restricted_collections.union_update(records_that_can_be_displayed & get_collection_reclist(coll))
# in this way, we do not count twice, records that are both in Home collection and in a restricted collection
total_results = len(results_in_Home.union(results_in_restricted_collections))
if total_results > 0:
# some hits found in Home and/or restricted collections, so propose this search:
if of.startswith("h") and display_nearest_terms_box:
url = websearch_templates.build_search_url(req.argd, cc=CFG_SITE_NAME, c=[])
len_colls_to_display = len(colls_to_be_displayed)
# trim the list of collections to first two, since it might get very large
write_warning(_("No match found in collection %(x_collection)s. Other collections gave %(x_url_open)s%(x_nb_hits)d hits%(x_url_close)s.") %
{'x_collection': '<em>' +
string.join([get_coll_i18nname(coll, ln, False) for coll in colls_to_be_displayed[:2]], ', ') +
(len_colls_to_display > 2 and ' et al' or '') + '</em>',
'x_url_open': '<a class="nearestterms" href="%s">' % (url),
'x_nb_hits': total_results,
'x_url_close': '</a>'}, req=req)
# display the hole list of collections in a comment
if len_colls_to_display > 2:
write_warning("<!--No match found in collection <em>%(x_collection)s</em>.-->" %
{'x_collection': string.join([get_coll_i18nname(coll, ln, False) for coll in colls_to_be_displayed], ', ')},
req=req)
else:
# no hits found, either user is looking for a document and he/she has not rights
# or user is looking for a hidden document:
if of.startswith("h") and display_nearest_terms_box:
if len(results_in_hidden_collections) > 0:
write_warning(_("No public collection matched your query. "
"If you were looking for a hidden document, please type "
"the correct URL for this record."), req=req)
else:
write_warning(_("No public collection matched your query. "
"If you were looking for a non-public document, please choose "
"the desired restricted collection first."), req=req)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 4: intersecting with collection universe gave %d hits." % results_nbhits, req=req)
write_warning("Search stage 4: execution took %.2f seconds." % (t2 - t1), req=req)
return results
def intersect_results_with_hitset(req, results, hitset, ap=0, aptext="", of="hb"):
"""Return intersection of search 'results' (a dict of hitsets
with collection as key) with the 'hitset', i.e. apply
'hitset' intersection to each collection within search
'results'.
If the final set is to be empty, and 'ap'
(approximate pattern) is true, and then print the `warningtext'
and return the original 'results' set unchanged. If 'ap' is
false, then return empty results set.
"""
if ap:
results_ap = copy.deepcopy(results)
else:
results_ap = {} # will return empty dict in case of no hits found
nb_total = 0
final_results = {}
for coll in results.keys():
final_results[coll] = results[coll].intersection(hitset)
nb_total += len(final_results[coll])
if nb_total == 0:
if of.startswith("h"):
write_warning(aptext, req=req)
final_results = results_ap
return final_results
def create_similarly_named_authors_link_box(author_name, ln=CFG_SITE_LANG):
"""Return a box similar to ``Not satisfied...'' one by proposing
author searches for similar names. Namely, take AUTHOR_NAME
and the first initial of the firstame (after comma) and look
into author index whether authors with e.g. middle names exist.
Useful mainly for CERN Library that sometimes contains name
forms like Ellis-N, Ellis-Nick, Ellis-Nicolas all denoting the
same person. The box isn't proposed if no similarly named
authors are found to exist.
"""
# return nothing if not configured:
if CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX == 0:
return ""
# return empty box if there is no initial:
if re.match(r'[^ ,]+, [^ ]', author_name) is None:
return ""
# firstly find name comma initial:
author_name_to_search = re.sub(r'^([^ ,]+, +[^ ,]).*$', '\\1', author_name)
# secondly search for similar name forms:
similar_author_names = {}
for name in author_name_to_search, strip_accents(author_name_to_search):
for tag in get_field_tags("author"):
# deduce into which bibxxx table we will search:
digit1, digit2 = int(tag[0]), int(tag[1])
bx = "bib%d%dx" % (digit1, digit2)
if len(tag) != 6 or tag[-1:] == '%':
# only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag LIKE %%s""" % bx,
(name + "%", tag + "%"))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag=%%s""" % bx,
(name + "%", tag))
for row in res:
similar_author_names[row[0]] = 1
# remove the original name and sort the list:
try:
del similar_author_names[author_name]
except KeyError:
pass
# thirdly print the box:
out = ""
if similar_author_names:
out_authors = similar_author_names.keys()
out_authors.sort()
tmp_authors = []
for out_author in out_authors:
nbhits = get_nbhits_in_bibxxx(out_author, "author")
if nbhits:
tmp_authors.append((out_author, nbhits))
out += websearch_templates.tmpl_similar_author_names(
authors=tmp_authors, ln=ln)
return out
def create_nearest_terms_box(urlargd, p, f, t='w', n=5, ln=CFG_SITE_LANG, intro_text_p=True):
"""Return text box containing list of 'n' nearest terms above/below 'p'
for the field 'f' for matching type 't' (words/phrases) in
language 'ln'.
Propose new searches according to `urlargs' with the new words.
If `intro_text_p' is true, then display the introductory message,
otherwise print only the nearest terms in the box content.
"""
# load the right message language
_ = gettext_set_language(ln)
if not CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS:
return _("Your search did not match any records. Please try again.")
nearest_terms = []
if not p: # sanity check
p = "."
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
index_id = get_index_id_from_field(f)
if f == 'fulltext':
if CFG_SOLR_URL:
return _("No match found, please enter different search terms.")
else:
# FIXME: workaround for not having native phrase index yet
t = 'w'
# special indexes:
if f == 'refersto' or f == 'referstoexcludingselfcites':
return _("There are no records referring to %s.") % cgi.escape(p)
if f == 'cataloguer':
return _("There are no records modified by %s.") % cgi.escape(p)
if f == 'citedby' or f == 'citedbyexcludingselfcites':
return _("There are no records cited by %s.") % cgi.escape(p)
# look for nearest terms:
if t == 'w':
nearest_terms = get_nearest_terms_in_bibwords(p, f, n, n)
if not nearest_terms:
return _("No word index is available for %s.") % \
('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>')
else:
nearest_terms = []
if index_id:
nearest_terms = get_nearest_terms_in_idxphrase(p, index_id, n, n)
if f == 'datecreated' or f == 'datemodified':
nearest_terms = get_nearest_terms_in_bibrec(p, f, n, n)
if not nearest_terms:
nearest_terms = get_nearest_terms_in_bibxxx(p, f, n, n)
if not nearest_terms:
return _("No phrase index is available for %s.") % \
('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>')
terminfo = []
for term in nearest_terms:
if t == 'w':
hits = get_nbhits_in_bibwords(term, f)
else:
if index_id:
hits = get_nbhits_in_idxphrases(term, f)
elif f == 'datecreated' or f == 'datemodified':
hits = get_nbhits_in_bibrec(term, f)
else:
hits = get_nbhits_in_bibxxx(term, f)
argd = {}
argd.update(urlargd)
# check which fields contained the requested parameter, and replace it.
for px, dummy_fx in ('p', 'f'), ('p1', 'f1'), ('p2', 'f2'), ('p3', 'f3'):
if px in argd:
argd_px = argd[px]
if t == 'w':
# p was stripped of accents, to do the same:
argd_px = strip_accents(argd_px)
#argd[px] = string.replace(argd_px, p, term, 1)
#we need something similar, but case insensitive
pattern_index = string.find(argd_px.lower(), p.lower())
if pattern_index > -1:
argd[px] = argd_px[:pattern_index] + term + argd_px[pattern_index+len(p):]
break
#this is doing exactly the same as:
#argd[px] = re.sub('(?i)' + re.escape(p), term, argd_px, 1)
#but is ~4x faster (2us vs. 8.25us)
terminfo.append((term, hits, argd))
intro = ""
if intro_text_p: # add full leading introductory text
if f:
intro = _("Search term %(x_term)s inside index %(x_index)s did not match any record. Nearest terms in any collection are:") % \
{'x_term': "<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>",
'x_index': "<em>" + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + "</em>"}
else:
intro = _("Search term %s did not match any record. Nearest terms in any collection are:") % \
("<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>")
return websearch_templates.tmpl_nearest_term_box(p=p, ln=ln, f=f, terminfo=terminfo,
intro=intro)
def get_nearest_terms_in_bibwords(p, f, n_below, n_above):
"""Return list of +n -n nearest terms to word `p' in index for field `f'."""
nearest_words = [] # will hold the (sorted) list of nearest words to return
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return nearest_words
# firstly try to get `n' closest words above `p':
res = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % bibwordsX,
(p, n_above))
for row in res:
nearest_words.append(row[0])
nearest_words.reverse()
# secondly insert given word `p':
nearest_words.append(p)
# finally try to get `n' closest words below `p':
res = run_sql("SELECT term FROM %s WHERE term>%%s ORDER BY term ASC LIMIT %%s" % bibwordsX,
(p, n_below))
for row in res:
nearest_words.append(row[0])
return nearest_words
def get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
regardless of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
if CFG_INSPIRE_SITE and index_id in (3, 15): # FIXME: workaround due to new fuzzy index
return [p]
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above))
res_above = [x[0] for x in res_above]
res_above.reverse()
res_below = run_sql("SELECT term FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below))
res_below = [x[0] for x in res_below]
return res_above + res_below
def get_nearest_terms_in_idxphrase_with_collection(p, index_id, n_below, n_above, collection):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
considering the collection (intbitset).
Return list of [(phrase1, hitset), (phrase2, hitset), ... , (phrase_n, hitset)]."""
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term,hitlist FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above * 3))
res_above = [(term, intbitset(hitlist) & collection) for term, hitlist in res_above]
res_above = [(term, len(hitlist)) for term, hitlist in res_above if hitlist]
res_below = run_sql("SELECT term,hitlist FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below * 3))
res_below = [(term, intbitset(hitlist) & collection) for term, hitlist in res_below]
res_below = [(term, len(hitlist)) for term, hitlist in res_below if hitlist]
res_above.reverse()
return res_above[-n_above:] + res_below[:n_below]
def get_nearest_terms_in_bibxxx(p, f, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field f, regardless
of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nearest_terms_in_bibwords(p, f, n_below, n_above)
## We are going to take max(n_below, n_above) as the number of
## values to ferch from bibXXx. This is needed to work around
## MySQL UTF-8 sorting troubles in 4.0.x. Proper solution is to
## use MySQL 4.1.x or our own idxPHRASE in the future.
index_id = get_index_id_from_field(f)
if index_id:
return get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above)
n_fetch = 2*max(n_below, n_above)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
## start browsing to fetch list of hits:
browsed_phrases = {} # will hold {phrase1: 1, phrase2: 1, ..., phraseN: 1} dict of browsed phrases (to make them unique)
# always add self to the results set:
browsed_phrases[p.startswith("%") and p.endswith("%") and p[1:-1] or p] = 1
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
# firstly try to get `n' closest phrases above `p':
if len(t) != 6 or t[-1:] == '%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag LIKE %%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag=%%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# secondly try to get `n' closest phrases equal to or below `p':
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag LIKE %%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag=%%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# select first n words only: (this is needed as we were searching
# in many different tables and so aren't sure we have more than n
# words right; this of course won't be needed when we shall have
# one ACC table only for given field):
phrases_out = browsed_phrases.keys()
phrases_out.sort(lambda x, y: cmp(string.lower(strip_accents(x)),
string.lower(strip_accents(y))))
# find position of self:
try:
idx_p = phrases_out.index(p)
except ValueError:
idx_p = len(phrases_out)/2
# return n_above and n_below:
return phrases_out[max(0, idx_p-n_above):idx_p+n_below]
def get_nearest_terms_in_bibrec(p, f, n_below, n_above):
"""Return list of nearest terms and counts from bibrec table.
p is usually a date, and f either datecreated or datemodified.
Note: below/above count is very approximative, not really respected.
"""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res_above = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s < %%s
ORDER BY %s DESC LIMIT %%s""" % (col, col, col),
(p, n_above))
res_below = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s > %%s
ORDER BY %s ASC LIMIT %%s""" % (col, col, col),
(p, n_below))
out = set([])
for row in res_above:
out.add(row[0])
for row in res_below:
out.add(row[0])
out_list = list(out)
out_list.sort()
return list(out_list)
def get_nbhits_in_bibrec(term, f):
"""Return number of hits in bibrec table. term is usually a date,
and f is either 'datecreated' or 'datemodified'."""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res = run_sql("SELECT COUNT(*) FROM bibrec WHERE %s LIKE %%s" % (col,),
(term + '%',))
return res[0][0]
def get_nbhits_in_bibwords(word, f):
"""Return number of hits for word 'word' inside words index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % bibwordsX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_idxphrases(word, f):
"""Return number of hits for word 'word' inside phrase index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % idxphraseX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_bibxxx(p, f, in_hitset=None):
"""Return number of hits for word 'word' inside words index for field 'f'."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nbhits_in_bibwords(p, f)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
# start searching:
recIDs = {} # will hold dict of {recID1: 1, recID2: 1, ..., } (unique recIDs, therefore)
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag LIKE %%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t + "%"))
else:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag=%%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t))
for row in res:
recIDs[row[0]] = 1
if in_hitset is None:
nbhits = len(recIDs)
else:
nbhits = len(intbitset(recIDs.keys()).intersection(in_hitset))
return nbhits
def get_mysql_recid_from_aleph_sysno(sysno):
"""Returns DB's recID for ALEPH sysno passed in the argument (e.g. "002379334CER").
Returns None in case of failure."""
out = None
res = run_sql("""SELECT bb.id_bibrec FROM bibrec_bib97x AS bb, bib97x AS b
WHERE b.value=%s AND b.tag='970__a' AND bb.id_bibxxx=b.id""",
(sysno,))
if res:
out = res[0][0]
return out
def guess_primary_collection_of_a_record(recID):
"""Return primary collection name a record recid belongs to, by
testing 980 identifier.
May lead to bad guesses when a collection is defined dynamically
via dbquery.
In that case, return 'CFG_SITE_NAME'."""
out = CFG_SITE_NAME
dbcollids = get_fieldvalues(recID, "980__a")
for dbcollid in dbcollids:
variants = ("collection:" + dbcollid,
'collection:"' + dbcollid + '"',
"980__a:" + dbcollid,
'980__a:"' + dbcollid + '"',
'980:' + dbcollid ,
'980:"' + dbcollid + '"')
res = run_sql("SELECT name FROM collection WHERE dbquery IN (%s,%s,%s,%s,%s,%s)", variants)
if res:
out = res[0][0]
break
if CFG_CERN_SITE:
recID = int(recID)
# dirty hack for ATLAS collections at CERN:
if out in ('ATLAS Communications', 'ATLAS Internal Notes'):
for alternative_collection in ('ATLAS Communications Physics',
'ATLAS Communications General',
'ATLAS Internal Notes Physics',
'ATLAS Internal Notes General',):
if recID in get_collection_reclist(alternative_collection):
return alternative_collection
# dirty hack for FP
FP_collections = {'DO': ['Current Price Enquiries', 'Archived Price Enquiries'],
'IT': ['Current Invitation for Tenders', 'Archived Invitation for Tenders'],
'MS': ['Current Market Surveys', 'Archived Market Surveys']}
fp_coll_ids = [coll for coll in dbcollids if coll in FP_collections]
for coll in fp_coll_ids:
for coll_name in FP_collections[coll]:
if recID in get_collection_reclist(coll_name):
return coll_name
return out
_re_collection_url = re.compile('/collection/(.+)')
def guess_collection_of_a_record(recID, referer=None, recreate_cache_if_needed=True):
"""Return collection name a record recid belongs to, by first testing
the referer URL if provided and otherwise returning the
primary collection."""
if referer:
dummy, hostname, path, dummy, query, dummy = urlparse.urlparse(referer)
#requests can come from different invenio installations, with different collections
if CFG_SITE_URL.find(hostname) < 0:
return guess_primary_collection_of_a_record(recID)
g = _re_collection_url.match(path)
if g:
name = urllib.unquote_plus(g.group(1))
#check if this collection actually exist (also normalize the name if case-insensitive)
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name):
return name
elif path.startswith('/search'):
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
query = cgi.parse_qs(query)
for name in query.get('cc', []) + query.get('c', []):
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return name
return guess_primary_collection_of_a_record(recID)
def is_record_in_any_collection(recID, recreate_cache_if_needed=True):
"""Return True if the record belongs to at least one collection. This is a
good, although not perfect, indicator to guess if webcoll has already run
after this record has been entered into the system.
"""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return True
return False
def get_all_collections_of_a_record(recID, recreate_cache_if_needed=True):
"""Return all the collection names a record belongs to.
Note this function is O(n_collections)."""
ret = []
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
ret.append(name)
return ret
def get_tag_name(tag_value, prolog="", epilog=""):
"""Return tag name from the known tag value, by looking up the 'tag' table.
Return empty string in case of failure.
Example: input='100__%', output=first author'."""
out = ""
res = run_sql("SELECT name FROM tag WHERE value=%s", (tag_value,))
if res:
out = prolog + res[0][0] + epilog
return out
def get_fieldcodes():
"""Returns a list of field codes that may have been passed as 'search options' in URL.
Example: output=['subject','division']."""
out = []
res = run_sql("SELECT DISTINCT(code) FROM field")
for row in res:
out.append(row[0])
return out
def get_field_name(code):
"""Return the corresponding field_name given the field code.
e.g. reportnumber -> report number."""
res = run_sql("SELECT name FROM field WHERE code=%s", (code, ))
if res:
return res[0][0]
else:
return ""
def get_fieldvalues_alephseq_like(recID, tags_in, can_see_hidden=False):
"""Return buffer of ALEPH sequential-like textual format with fields found
in the list TAGS_IN for record RECID.
If can_see_hidden is True, just print everything. Otherwise hide fields
from CFG_BIBFORMAT_HIDDEN_TAGS.
"""
out = ""
if type(tags_in) is not list:
tags_in = [tags_in]
if len(tags_in) == 1 and len(tags_in[0]) == 6:
## case A: one concrete subfield asked, so print its value if found
## (use with care: can mislead if field has multiple occurrences)
out += string.join(get_fieldvalues(recID, tags_in[0]), "\n")
else:
## case B: print our "text MARC" format; works safely all the time
# find out which tags to output:
dict_of_tags_out = {}
if not tags_in:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
else:
for tag in tags_in:
if len(tag) == 0:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
elif len(tag) == 1:
for j in range(0, 10):
dict_of_tags_out["%s%d%%" % (tag, j)] = 1
elif len(tag) < 5:
dict_of_tags_out["%s%%" % tag] = 1
elif tag >= 6:
dict_of_tags_out[tag[0:5]] = 1
tags_out = dict_of_tags_out.keys()
tags_out.sort()
# search all bibXXx tables as needed:
for tag in tags_out:
digits = tag[0:2]
try:
intdigits = int(digits)
if intdigits < 0 or intdigits > 99:
raise ValueError
except ValueError:
# invalid tag value asked for
continue
if tag.startswith("001") or tag.startswith("00%"):
if out:
out += "\n"
out += "%09d %s %d" % (recID, "001__", recID)
bx = "bib%sx" % digits
bibx = "bibrec_bib%sx" % digits
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(tag)+'%'))
# go through fields:
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
printme = True
#check the stuff in hiddenfields
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if ind1 == "_":
ind1 = ""
if ind2 == "_":
ind2 = ""
# print field tag
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if out:
out += "\n"
out += "%09d %s " % (recID, field[:5])
field_number_old = field_number
field_old = field
# print subfield value
if field[0:2] == "00" and field[-1:] == "_":
out += value
else:
out += "$$%s%s" % (field[-1:], value)
return out
def get_merged_recid(recID):
""" Return the record ID of the record with
which the given record has been merged.
@param recID: deleted record recID
@type recID: int
@return: merged record recID
@rtype: int or None
"""
merged_recid = None
for val in get_fieldvalues(recID, "970__d"):
try:
merged_recid = int(val)
break
except ValueError:
pass
return merged_recid
def record_empty(recID):
"""
Is this record empty, e.g. has only 001, waiting for integration?
@param recID: the record identifier.
@type recID: int
@return: 1 if the record is empty, 0 otherwise.
@rtype: int
"""
return bibrecord.record_empty(get_record(recID))
def record_public_p(recID, recreate_cache_if_needed=True):
"""Return 1 if the record is public, i.e. if it can be found in the Home collection.
Return 0 otherwise.
"""
return recID in get_collection_reclist(CFG_SITE_NAME, recreate_cache_if_needed=recreate_cache_if_needed)
def get_creation_date(recID, fmt="%Y-%m-%d"):
"Returns the creation date of the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(creation_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def get_modification_date(recID, fmt="%Y-%m-%d"):
"Returns the date of last modification for the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(modification_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def print_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0, em=""):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
if em != '' and EM_REPOSITORY["search_info"] not in em:
return ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0, em=""):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
if em != '' and EM_REPOSITORY["search_info"] not in em:
return ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_hosted_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_results_overview(colls, results_final_nb_total, results_final_nb, cpu_time, ln=CFG_SITE_LANG, ec=[], hosted_colls_potential_results_p=False, em=""):
"""Prints results overview box with links to particular collections below."""
if em != "" and EM_REPOSITORY["overview"] not in em:
return ""
new_colls = []
for coll in colls:
new_colls.append({
'id': get_colID(coll),
'code': coll,
'name': get_coll_i18nname(coll, ln, False),
})
return websearch_templates.tmpl_print_results_overview(
ln = ln,
results_final_nb_total = results_final_nb_total,
results_final_nb = results_final_nb,
cpu_time = cpu_time,
colls = new_colls,
ec = ec,
hosted_colls_potential_results_p = hosted_colls_potential_results_p,
)
def print_hosted_results(url_and_engine, ln=CFG_SITE_LANG, of=None, req=None, no_records_found=False, search_timed_out=False, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS, em = ""):
"""Prints the full results of a hosted collection"""
if of.startswith("h"):
if no_records_found:
return "<br />No results found."
if search_timed_out:
return "<br />The search engine did not respond in time."
return websearch_templates.tmpl_print_hosted_results(
url_and_engine=url_and_engine,
ln=ln,
of=of,
req=req,
limit=limit,
display_body = em == "" or EM_REPOSITORY["body"] in em,
display_add_to_basket = em == "" or EM_REPOSITORY["basket"] in em)
class BibSortDataCacher(DataCacher):
"""
Cache holding all structures created by bibsort
( _data, data_dict).
"""
def __init__(self, method_name):
self.method_name = method_name
self.method_id = 0
res = run_sql("""SELECT id from bsrMETHOD where name = %s""", (self.method_name,))
if res and res[0]:
self.method_id = res[0][0]
else:
self.method_id = 0
def cache_filler():
method_id = self.method_id
alldicts = {}
if self.method_id == 0:
return {}
try:
res_data = run_sql("""SELECT data_dict_ordered from bsrMETHODDATA \
where id_bsrMETHOD = %s""", (method_id,))
res_buckets = run_sql("""SELECT bucket_no, bucket_data from bsrMETHODDATABUCKET\
where id_bsrMETHOD = %s""", (method_id,))
except Exception:
# database problems, return empty cache
return {}
try:
data_dict_ordered = deserialize_via_marshal(res_data[0][0])
except IndexError:
data_dict_ordered = {}
alldicts['data_dict_ordered'] = data_dict_ordered # recid: weight
if not res_buckets:
alldicts['bucket_data'] = {}
return alldicts
for row in res_buckets:
bucket_no = row[0]
try:
bucket_data = intbitset(row[1])
except IndexError:
bucket_data = intbitset([])
alldicts.setdefault('bucket_data', {})[bucket_no] = bucket_data
return alldicts
def timestamp_verifier():
method_id = self.method_id
res = run_sql("""SELECT last_updated from bsrMETHODDATA where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_methoddata = str(res[0][0])
except IndexError:
update_time_methoddata = '1970-01-01 00:00:00'
res = run_sql("""SELECT max(last_updated) from bsrMETHODDATABUCKET where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_buckets = str(res[0][0])
except IndexError:
update_time_buckets = '1970-01-01 00:00:00'
return max(update_time_methoddata, update_time_buckets)
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def get_sorting_methods():
res = run_sql("""SELECT m.name, m.definition
FROM bsrMETHOD m, bsrMETHODDATA md
WHERE m.id = md.id_bsrMETHOD""")
return dict(res)
SORTING_METHODS = get_sorting_methods()
CACHE_SORTED_DATA = {}
for sorting_method in SORTING_METHODS:
try:
CACHE_SORTED_DATA[sorting_method].is_ok_p
except KeyError:
CACHE_SORTED_DATA[sorting_method] = BibSortDataCacher(sorting_method)
def get_tags_from_sort_fields(sort_fields):
"""Given a list of sort_fields, return the tags associated with it and
also the name of the field that has no tags associated, to be able to
display a message to the user."""
tags = []
if not sort_fields:
return [], ''
for sort_field in sort_fields:
if sort_field and (len(sort_field) > 1 and str(sort_field[0:2]).isdigit()):
# sort_field starts by two digits, so this is probably a MARC tag already
tags.append(sort_field)
else:
# let us check the 'field' table
field_tags = get_field_tags(sort_field)
if field_tags:
tags.extend(field_tags)
else:
return [], sort_field
return tags, ''
def rank_records(req, rank_method_code, rank_limit_relevance, hitset_global, pattern=None, verbose=0, sort_order='d', of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, field='', sorting_methods=SORTING_METHODS):
"""Initial entry point for ranking records, acts like a dispatcher.
(i) rank_method_code is in bsrMETHOD, bibsort buckets can be used;
(ii)rank_method_code is not in bsrMETHOD, use bibrank;
"""
# Special case: sorting by citations is fast because we store the
# ranking dictionary in memory, so we do not use bibsort buckets.
if CFG_BIBSORT_ENABLED and sorting_methods and rank_method_code != 'citation':
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('RNK') and \
definition.replace('RNK:', '').strip().lower() == rank_method_code.lower():
solution_recs, solution_scores = \
sort_records_bibsort(req, hitset_global, sort_method,
'', sort_order, verbose, of, ln,
rg, jrec, 'r')
comment = ''
if verbose > 0:
comment = 'find_citations retlist %s' % [[solution_recs[i], solution_scores[i]] for i in range(len(solution_recs))]
return solution_recs, solution_scores, '(', ')', comment
if rank_method_code.lower() == 'citation':
related_to = []
else:
related_to = pattern
solution_recs, solution_scores, prefix, suffix, comment = \
rank_records_bibrank(rank_method_code=rank_method_code,
rank_limit_relevance=rank_limit_relevance,
hitset=hitset_global,
verbose=verbose,
field=field,
related_to=related_to,
rg=rg,
jrec=jrec)
# Solution recs can be None, in case of error or other cases
# which should be all be changed to return an empty list.
if solution_recs and sort_order == 'd':
solution_recs.reverse()
solution_scores.reverse()
return solution_recs, solution_scores, prefix, suffix, comment
def sort_records_latest(recIDs, jrec, rg, sort_order):
if sort_order == 'd':
recIDs.reverse()
return slice_records(recIDs, jrec, rg)
def sort_records(req, recIDs, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, sorting_methods=SORTING_METHODS):
"""Initial entry point for sorting records, acts like a dispatcher.
(i) sort_field is in the bsrMETHOD, and thus, the BibSort has sorted the data for this field, so we can use the cache;
(ii)sort_field is not in bsrMETHOD, and thus, the cache does not contain any information regarding this sorting method"""
_ = gettext_set_language(ln)
#bibsort does not handle sort_pattern for now, use bibxxx
if sort_pattern:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, sort_pattern, verbose, of, ln, rg, jrec)
#ignore the use of buckets, use old fashion sorting
use_sorting_buckets = CFG_BIBSORT_ENABLED and sorting_methods
if not sort_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, 'latest first', sort_field, sort_order, verbose, of, ln, rg, jrec)
else:
return sort_records_latest(recIDs, jrec, rg, sort_order)
sort_fields = sort_field.split(",")
if len(sort_fields) == 1:
# we have only one sorting_field, check if it is treated by BibSort
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if use_sorting_buckets and \
((definition.startswith('FIELD') and
definition.replace('FIELD:', '').strip().lower() == sort_fields[0].lower()) or
sort_method == sort_fields[0]):
#use BibSort
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#deduce sorting MARC tag out of the 'sort_field' argument:
tags, error_field = get_tags_from_sort_fields(sort_fields)
if error_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, 'latest first', sort_field, sort_order, verbose, of, ln, rg, jrec)
else:
if of.startswith('h'):
write_warning(_("Sorry, %s does not seem to be a valid sort option. The records will not be sorted.") % cgi.escape(error_field), "Error", req=req)
return slice_records(recIDs, jrec, rg)
elif tags:
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('MARC') \
and definition.replace('MARC:', '').strip().split(',') == tags \
and use_sorting_buckets:
#this list of tags have a designated method in BibSort, so use it
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#we do not have this sort_field in BibSort tables -> do the old fashion sorting
return sort_records_bibxxx(req, recIDs, tags, sort_field, sort_order, sort_pattern, verbose, of, ln, rg, jrec)
else:
return slice_records(recIDs, jrec, rg)
def sort_records_bibsort(req, recIDs, sort_method, sort_field='', sort_order='d', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=1, sort_or_rank='s', sorting_methods=SORTING_METHODS):
"""This function orders the recIDs list, based on a sorting method(sort_field) using the BibSortDataCacher for speed"""
_ = gettext_set_language(ln)
if not jrec:
jrec = 1
#sanity check
if sort_method not in sorting_methods:
if sort_or_rank == 'r':
return rank_records_bibrank(rank_method_code=sort_method,
rank_limit_relevance=0,
hitset=recIDs,
verbose=verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, '', verbose, of, ln, rg, jrec)
if verbose >= 3 and of.startswith('h'):
write_warning("Sorting (using BibSort cache) by method %s (definition %s)."
% (cgi.escape(repr(sort_method)), cgi.escape(repr(sorting_methods[sort_method]))), req=req)
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
solution = intbitset()
input_recids = intbitset(recIDs)
CACHE_SORTED_DATA[sort_method].recreate_cache_if_needed()
sort_cache = CACHE_SORTED_DATA[sort_method].cache
bucket_numbers = sort_cache['bucket_data'].keys()
#check if all buckets have been constructed
if len(bucket_numbers) != CFG_BIBSORT_BUCKETS:
if verbose > 3 and of.startswith('h'):
write_warning("Not all buckets have been constructed.. switching to old fashion sorting.", req=req)
if sort_or_rank == 'r':
return rank_records_bibrank(rank_method_code=sort_method,
rank_limit_relevance=0,
hitset=recIDs,
verbose=verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field,
sort_order, '', verbose, of, ln, rg,
jrec)
if sort_order == 'd':
bucket_numbers.reverse()
for bucket_no in bucket_numbers:
solution.union_update(input_recids & sort_cache['bucket_data'][bucket_no])
if len(solution) >= irec_max:
break
dict_solution = {}
missing_records = intbitset()
for recid in solution:
try:
dict_solution[recid] = sort_cache['data_dict_ordered'][recid]
except KeyError:
#recid is in buckets, but not in the bsrMETHODDATA,
#maybe because the value has been deleted, but the change has not yet been propagated to the buckets
missing_records.add(recid)
#check if there are recids that are not in any bucket -> to be added at the end/top, ordered by insertion date
if len(solution) < irec_max:
#some records have not been yet inserted in the bibsort structures
#or, some records have no value for the sort_method
missing_records += input_recids - solution
#the records need to be sorted in reverse order for the print record function
#the return statement should be equivalent with the following statements
#(these are clearer, but less efficient, since they revert the same list twice)
#sorted_solution = (missing_records + sorted(dict_solution, key=dict_solution.__getitem__, reverse=sort_order=='d'))[:irec_max]
#sorted_solution.reverse()
#return sorted_solution
reverse = sort_order == 'd'
if sort_method.strip().lower().startswith('latest') and reverse:
# If we want to sort the records on their insertion date, add the missing records at the top
solution = sorted(dict_solution, key=dict_solution.__getitem__, reverse=True) + sorted(missing_records, reverse=True)
else:
solution = sorted(missing_records) + sorted(dict_solution, key=dict_solution.__getitem__, reverse=reverse)
# Only keep records, we are going to display
index_min = jrec - 1
if rg:
index_max = index_min + rg
solution = solution[index_min:index_max]
else:
solution = solution[index_min:]
if sort_or_rank == 'r':
# We need the recids, with their ranking score
return solution, [dict_solution.get(record, 0) for record in solution]
else:
return solution
def slice_records(recIDs, jrec, rg):
if not jrec:
jrec = 1
if rg:
recIDs = recIDs[jrec-1:jrec-1+rg]
else:
recIDs = recIDs[jrec-1:]
return recIDs
def sort_records_bibxxx(req, recIDs, tags, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None):
"""OLD FASHION SORTING WITH NO CACHE, for sort fields that are not run in BibSort
Sort records in 'recIDs' list according sort field 'sort_field' in order 'sort_order'.
If more than one instance of 'sort_field' is found for a given record, try to choose that that is given by
'sort pattern', for example "sort by report number that starts by CERN-PS".
Note that 'sort_field' can be field code like 'author' or MARC tag like '100__a' directly."""
_ = gettext_set_language(ln)
## check arguments:
if not sort_field:
return slice_records(recIDs, jrec, rg)
if len(recIDs) > CFG_WEBSEARCH_NB_RECORDS_TO_SORT:
if of.startswith('h'):
write_warning(_("Sorry, sorting is allowed on sets of up to %d records only. Using default sort order.") % CFG_WEBSEARCH_NB_RECORDS_TO_SORT, "Warning", req=req)
return slice_records(recIDs, jrec, rg)
recIDs_dict = {}
recIDs_out = []
if not tags:
# tags have not been camputed yet
sort_fields = sort_field.split(',')
tags, error_field = get_tags_from_sort_fields(sort_fields)
if error_field:
if of.startswith('h'):
write_warning(_("Sorry, %s does not seem to be a valid sort option. The records will not be sorted.") % cgi.escape(error_field), "Error", req=req)
return slice_records(recIDs, jrec, rg)
if verbose >= 3 and of.startswith('h'):
write_warning("Sorting by tags %s." % cgi.escape(repr(tags)), req=req)
if sort_pattern:
write_warning("Sorting preferentially by %s." % cgi.escape(sort_pattern), req=req)
## check if we have sorting tag defined:
if tags:
# fetch the necessary field values:
for recID in recIDs:
val = "" # will hold value for recID according to which sort
vals = [] # will hold all values found in sorting tag for recID
for tag in tags:
if CFG_CERN_SITE and tag == '773__c':
# CERN hack: journal sorting
# 773__c contains page numbers, e.g. 3-13, and we want to sort by 3, and numerically:
vals.extend(["%050s" % x.split("-", 1)[0] for x in get_fieldvalues(recID, tag)])
else:
vals.extend(get_fieldvalues(recID, tag))
if sort_pattern:
# try to pick that tag value that corresponds to sort pattern
bingo = 0
for v in vals:
if v.lower().startswith(sort_pattern.lower()): # bingo!
bingo = 1
val = v
break
if not bingo: # sort_pattern not present, so add other vals after spaces
val = sort_pattern + " " + ''.join(vals)
else:
# no sort pattern defined, so join them all together
val = ''.join(vals)
val = strip_accents(val.lower()) # sort values regardless of accents and case
if val in recIDs_dict:
recIDs_dict[val].append(recID)
else:
recIDs_dict[val] = [recID]
# create output array:
for k in sorted(recIDs_dict.keys()):
recIDs_out.extend(recIDs_dict[k])
# ascending or descending?
if sort_order == 'd':
recIDs_out.reverse()
recIDs = recIDs_out
# return only up to the maximum that we need
return slice_records(recIDs, jrec, rg)
def get_interval_for_records_to_sort(nb_found, jrec=None, rg=None):
"""calculates in which interval should the sorted records be
a value of 'rg=-9999' means to print all records: to be used with care."""
if not jrec:
jrec = 1
if not rg:
#return all
return jrec-1, nb_found
if rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will sort records from irec_min to irec_max excluded
irec_min = jrec - 1
irec_max = irec_min + rg
if irec_min < 0:
irec_min = 0
if irec_max > nb_found:
irec_max = nb_found
return irec_min, irec_max
def print_records(req, recIDs, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, format='hb', ot='', ln=CFG_SITE_LANG,
relevances=[], relevances_prologue="(", relevances_epilogue="%%)",
decompress=zlib.decompress, search_pattern='', print_records_prologue_p=True,
print_records_epilogue_p=True, verbose=0, tab='', sf='', so='d', sp='',
rm='', em='', nb_found=-1):
"""
Prints list of records 'recIDs' formatted according to 'format' in
groups of 'rg' starting from 'jrec'.
Assumes that the input list 'recIDs' is sorted in reverse order,
so it counts records from tail to head.
A value of 'rg=-9999' means to print all records: to be used with care.
Print also list of RELEVANCES for each record (if defined), in
between RELEVANCE_PROLOGUE and RELEVANCE_EPILOGUE.
Print prologue and/or epilogue specific to 'format' if
'print_records_prologue_p' and/or print_records_epilogue_p' are
True.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if em != "" and EM_REPOSITORY["body"] not in em:
return
# load the right message language
_ = gettext_set_language(ln)
# sanity checking:
if req is None:
return
# get user_info (for formatting based on user)
if isinstance(req, cStringIO.OutputType):
user_info = {}
else:
user_info = collect_user_info(req)
if nb_found == -1:
nb_found = len(recIDs)
if nb_found:
if not rg or rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will print records from irec_max to irec_min excluded:
irec_max = nb_found - jrec
irec_min = nb_found - jrec - rg
if irec_min < 0:
irec_min = -1
if irec_max >= nb_found:
irec_max = nb_found - 1
#req.write("%s:%d-%d" % (recIDs, irec_min, irec_max))
if len(recIDs) > rg and rg != -9999:
recIDs = slice_records(recIDs, jrec, rg)
if format.startswith('x'):
# print header if needed
if print_records_prologue_p:
print_records_prologue(req, format)
if ot:
# asked to print some filtered fields only, so call print_record() on the fly:
for recid in recIDs:
x = print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm)
req.write(x)
if x:
req.write('\n')
else:
format_records(recIDs,
format,
ln=ln,
search_pattern=search_pattern,
record_separator="\n",
user_info=user_info,
req=req)
# print footer if needed
if print_records_epilogue_p:
print_records_epilogue(req, format)
elif format.startswith('t') or str(format[0:3]).isdigit():
# we are doing plain text output:
for recid in recIDs:
x = print_record(recid, format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
req.write(x)
if x:
req.write('\n')
elif format.startswith('recjson'):
# we are doing recjson output:
req.write('[')
for idx, recid in enumerate(recIDs):
if idx > 0:
req.write(',')
req.write(print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
req.write(']')
elif format == 'excel':
create_excel(recIDs=recIDs, req=req, ot=ot, user_info=user_info)
else:
# we are doing HTML output:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
# portfolio and on-the-fly formats:
for recid in recIDs:
req.write(print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm))
elif format.startswith("hb"):
# HTML brief format:
display_add_to_basket = True
if user_info:
if user_info['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_add_to_basket = False
else:
if not user_info['precached_usebaskets']:
display_add_to_basket = False
if em != "" and EM_REPOSITORY["basket"] not in em:
display_add_to_basket = False
req.write(websearch_templates.tmpl_record_format_htmlbrief_header(ln=ln))
for irec, recid in enumerate(recIDs):
row_number = jrec+irec
if relevances and relevances[irec]:
relevance = relevances[irec]
else:
relevance = ''
record = print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm)
req.write(websearch_templates.tmpl_record_format_htmlbrief_body(
ln=ln,
recid=recid,
row_number=row_number,
relevance=relevance,
record=record,
relevances_prologue=relevances_prologue,
relevances_epilogue=relevances_epilogue,
display_add_to_basket=display_add_to_basket
))
req.write(websearch_templates.tmpl_record_format_htmlbrief_footer(
ln=ln,
display_add_to_basket=display_add_to_basket))
elif format.startswith("hd"):
# HTML detailed format:
referer = user_info.get('referer', '')
for recid in recIDs:
if record_exists(recid) == -1:
write_warning(_("The record has been deleted."), req=req)
merged_recid = get_merged_recid(recid)
if merged_recid:
write_warning(_("The record %d replaces it." % merged_recid), req=req)
continue
unordered_tabs = get_detailed_page_tabs(get_colID(guess_collection_of_a_record(recid, referer, False)),
recid, ln=ln)
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if ln != CFG_SITE_LANG:
link_ln = '?ln=%s' % ln
recid_to_display = recid # Record ID used to build the URL.
if CFG_WEBSEARCH_USE_ALEPH_SYSNOS:
try:
recid_to_display = get_fieldvalues(recid,
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG)[0]
except IndexError:
# No external sysno is available, keep using
# internal recid.
pass
tabs = [(unordered_tabs[tab_id]['label'],
'%s/%s/%s/%s%s' % (CFG_BASE_URL, CFG_SITE_RECORD, recid_to_display, tab_id, link_ln),
tab_id == tab,
unordered_tabs[tab_id]['enabled'])
for (tab_id, dummy_order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] is True]
tabs_counts = get_detailed_page_tabs_counts(recid)
citedbynum = tabs_counts['Citations']
references = tabs_counts['References']
discussions = tabs_counts['Discussions']
# load content
if tab == 'usage':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
r = calculate_reading_similarity_list(recid, "downloads")
downloadsimilarity = None
downloadhistory = None
#if r:
# downloadsimilarity = r
if CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS:
downloadhistory = create_download_history_graph_and_box(recid, ln)
r = calculate_reading_similarity_list(recid, "pageviews")
viewsimilarity = None
if r:
viewsimilarity = r
content = websearch_templates.tmpl_detailed_record_statistics(recid,
ln,
downloadsimilarity=downloadsimilarity,
downloadhistory=downloadhistory,
viewsimilarity=viewsimilarity)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'citations':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(websearch_templates.tmpl_detailed_record_citations_prologue(recid, ln))
# Citing
citinglist = calculate_cited_by_list(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_citing_list(recid,
ln,
citinglist,
sf=sf,
so=so,
sp=sp,
rm=rm))
# Self-cited
selfcited = get_self_cited_by(recid)
selfcited = rank_by_citations(get_self_cited_by(recid), verbose=verbose)
selfcited = reversed(selfcited[0])
selfcited = [recid for recid, dummy in selfcited]
req.write(websearch_templates.tmpl_detailed_record_citations_self_cited(recid,
ln, selfcited=selfcited, citinglist=citinglist))
# Co-cited
s = calculate_co_cited_with_list(recid)
cociting = None
if s:
cociting = s
req.write(websearch_templates.tmpl_detailed_record_citations_co_citing(recid,
ln,
cociting=cociting))
# Citation history, if needed
citationhistory = None
if citinglist:
citationhistory = create_citation_history_graph_and_box(recid, ln)
#debug
if verbose > 3:
write_warning("Citation graph debug: " +
str(len(citationhistory)), req=req)
req.write(websearch_templates.tmpl_detailed_record_citations_citation_history(ln, citationhistory))
# Citation log
entries = get_citers_log(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_citation_log(ln, entries))
req.write(websearch_templates.tmpl_detailed_record_citations_epilogue(recid, ln))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'references':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(format_record(recid, 'HDREF', ln=ln, user_info=user_info, verbose=verbose, force_2nd_pass=True))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'keywords':
from invenio.bibclassify_webinterface import main_page
main_page(req, recid, tabs, ln,
webstyle_templates)
elif tab == 'plots':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln))
content = websearch_templates.tmpl_record_plots(recID=recid,
ln=ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'hepdata':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
include_jquery=True,
include_mathjax=True))
from invenio import hepdatautils
from invenio import hepdatadisplayutils
data = hepdatautils.retrieve_data_for_record(recid)
if data:
content = websearch_templates.tmpl_record_hepdata(data, recid, True)
else:
content = websearch_templates.tmpl_record_no_hepdata()
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
else:
# Metadata tab
req.write(webstyle_templates.detailed_record_container_top(
recid,
tabs,
ln,
show_short_rec_p=False,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
creationdate = None
modificationdate = None
if record_exists(recid) == 1:
creationdate = get_creation_date(recid)
modificationdate = get_modification_date(recid)
content = print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm)
content = websearch_templates.tmpl_detailed_record_metadata(
recID=recid,
ln=ln,
format=format,
creationdate=creationdate,
modificationdate=modificationdate,
content=content)
# display of the next-hit/previous-hit/back-to-search links
# on the detailed record pages
content += websearch_templates.tmpl_display_back_to_search(req,
recid,
ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln,
creationdate=creationdate,
modificationdate=modificationdate,
show_short_rec_p=False))
if len(tabs) > 0:
# Add the mini box at bottom of the page
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
from invenio.webcomment import get_mini_reviews
reviews = get_mini_reviews(recid=recid, ln=ln)
else:
reviews = ''
actions = format_record(recid, 'HDACT', ln=ln, user_info=user_info, verbose=verbose)
files = format_record(recid, 'HDFILE', ln=ln, user_info=user_info, verbose=verbose)
req.write(webstyle_templates.detailed_record_mini_panel(recid,
ln,
format,
files=files,
reviews=reviews,
actions=actions))
else:
# Other formats
for recid in recIDs:
req.write(print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
else:
write_warning(_("Use different search terms."), req=req)
def print_records_prologue(req, format, cc=None):
"""
Print the appropriate prologue for list of records in the given
format.
"""
prologue = "" # no prologue needed for HTML or Text formats
if format.startswith('xm'):
prologue = websearch_templates.tmpl_xml_marc_prologue()
elif format.startswith('xn'):
prologue = websearch_templates.tmpl_xml_nlm_prologue()
elif format.startswith('xw'):
prologue = websearch_templates.tmpl_xml_refworks_prologue()
elif format.startswith('xr'):
prologue = websearch_templates.tmpl_xml_rss_prologue(cc=cc)
elif format.startswith('xe8x'):
prologue = websearch_templates.tmpl_xml_endnote_8x_prologue()
elif format.startswith('xe'):
prologue = websearch_templates.tmpl_xml_endnote_prologue()
elif format.startswith('xo'):
prologue = websearch_templates.tmpl_xml_mods_prologue()
elif format.startswith('xp'):
prologue = websearch_templates.tmpl_xml_podcast_prologue(cc=cc)
elif format.startswith('x'):
prologue = websearch_templates.tmpl_xml_default_prologue()
req.write(prologue)
def print_records_epilogue(req, format):
"""
Print the appropriate epilogue for list of records in the given
format.
"""
epilogue = "" # no epilogue needed for HTML or Text formats
if format.startswith('xm'):
epilogue = websearch_templates.tmpl_xml_marc_epilogue()
elif format.startswith('xn'):
epilogue = websearch_templates.tmpl_xml_nlm_epilogue()
elif format.startswith('xw'):
epilogue = websearch_templates.tmpl_xml_refworks_epilogue()
elif format.startswith('xr'):
epilogue = websearch_templates.tmpl_xml_rss_epilogue()
elif format.startswith('xe8x'):
epilogue = websearch_templates.tmpl_xml_endnote_8x_epilogue()
elif format.startswith('xe'):
epilogue = websearch_templates.tmpl_xml_endnote_epilogue()
elif format.startswith('xo'):
epilogue = websearch_templates.tmpl_xml_mods_epilogue()
elif format.startswith('xp'):
epilogue = websearch_templates.tmpl_xml_podcast_epilogue()
elif format.startswith('x'):
epilogue = websearch_templates.tmpl_xml_default_epilogue()
req.write(epilogue)
def get_record(recid):
"""Directly the record object corresponding to the recid."""
if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE:
value = run_sql("SELECT value FROM bibfmt WHERE id_bibrec=%s AND FORMAT='recstruct'", (recid, ))
if value:
try:
val = value[0][0]
except IndexError:
### In case it does not exist, let's build it!
pass
else:
return deserialize_via_marshal(val)
return create_record(print_record(recid, 'xm'))[0]
def print_record(recID, format='hb', ot='', ln=CFG_SITE_LANG, decompress=zlib.decompress,
search_pattern=None, user_info=None, verbose=0, sf='', so='d', sp='', rm=''):
"""
Prints record 'recID' formatted according to 'format'.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if format == 'recstruct':
return get_record(recID)
#check from user information if the user has the right to see hidden fields/tags in the
#records as well
can_see_hidden = False
if user_info:
can_see_hidden = user_info.get('precached_canseehiddenmarctags', False)
if format == 'recjson':
import json
from invenio.bibfield import get_record as get_recjson
from invenio.bibfield_utils import SmartDict
recjson = get_recjson(recID)
record = SmartDict()
keys = ot or recjson.keys()
for key in keys:
if key == 'bibdocs':
continue
if not can_see_hidden and key in CFG_BIBFORMAT_HIDDEN_RECJSON_FIELDS:
continue
record[key] = recjson.get(key)
# skipkeys is True to skip e.g. the bibdocs key, which is a non
# primitive object.
return json.dumps(dict(record), skipkeys=True)
_ = gettext_set_language(ln)
# The 'attribute this paper' link is shown only if the session states it should and
# the record is included in the collections to which bibauthorid is limited.
if user_info:
display_claim_this_paper = (user_info.get("precached_viewclaimlink", False) and
recID in intbitset.union(*[get_collection_reclist(x)
for x in BIBAUTHORID_LIMIT_TO_COLLECTIONS]))
else:
display_claim_this_paper = False
can_edit_record = False
if check_user_can_edit_record(user_info, recID):
can_edit_record = True
out = ""
# sanity check:
record_exist_p = record_exists(recID)
if record_exist_p == 0: # doesn't exist
return out
# We must still check some special formats, but these
# should disappear when BibFormat improves.
if not (format.lower().startswith('t')
or format.lower().startswith('hm')
or str(format[0:3]).isdigit()
or ot):
# Unspecified format is hd
if format == '':
format = 'hd'
if record_exist_p == -1 and get_output_format_content_type(format) == 'text/html':
# HTML output displays a default value for deleted records.
# Other format have to deal with it.
out += _("The record has been deleted.")
# was record deleted-but-merged ?
merged_recid = get_merged_recid(recID)
if merged_recid:
out += ' ' + _("The record %d replaces it." % merged_recid)
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if format.lower().startswith('hb') and \
format.lower() != 'hb_p':
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper,
display_edit_link=can_edit_record)
return out
if format == "marcxml" or format == "oai_dc":
out += " <record>\n"
out += " <header>\n"
for oai_id in get_fieldvalues(recID, CFG_OAI_ID_FIELD):
out += " <identifier>%s</identifier>\n" % oai_id
out += " <datestamp>%s</datestamp>\n" % get_modification_date(recID)
out += " </header>\n"
out += " <metadata>\n"
if format.startswith("xm") or format == "marcxml":
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res and record_exist_p == 1 and not ot:
# record 'recID' is formatted in 'format', and we are not
# asking for field-filtered output; so print it:
out += "%s" % decompress(res[0][0])
elif ot:
# field-filtered output was asked for; print only some fields
record = get_record(recID)
if not can_see_hidden:
for tag in CFG_BIBFORMAT_HIDDEN_TAGS:
del record[tag]
ot = list(set(ot) - set(CFG_BIBFORMAT_HIDDEN_TAGS))
out += record_xml_output(record, ot)
else:
# record 'recID' is not formatted in 'format' or we ask
# for field-filtered output -- they are not in "bibfmt"
# table; so fetch all the data from "bibXXx" tables:
if format == "marcxml":
out += """ <record xmlns="http://www.loc.gov/MARC21/slim">\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
elif format.startswith("xm"):
out += """ <record>\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
if record_exist_p == -1:
# deleted record, so display only OAI ID and 980:
oai_ids = get_fieldvalues(recID, CFG_OAI_ID_FIELD)
if oai_ids:
out += "<datafield tag=\"%s\" ind1=\"%s\" ind2=\"%s\"><subfield code=\"%s\">%s</subfield></datafield>\n" % \
(CFG_OAI_ID_FIELD[0:3], CFG_OAI_ID_FIELD[3:4], CFG_OAI_ID_FIELD[4:5], CFG_OAI_ID_FIELD[5:6], oai_ids[0])
out += "<datafield tag=\"980\" ind1=\"\" ind2=\"\"><subfield code=\"c\">DELETED</subfield></datafield>\n"
else:
# controlfields
query = "SELECT b.tag,b.value,bb.field_number FROM bib00x AS b, bibrec_bib00x AS bb "\
"WHERE bb.id_bibrec=%s AND b.id=bb.id_bibxxx AND b.tag LIKE '00%%' "\
"ORDER BY bb.field_number, b.tag ASC"
res = run_sql(query, (recID, ))
for row in res:
field, value = row[0], row[1]
value = encode_for_xml(value)
out += """ <controlfield tag="%s">%s</controlfield>\n""" % \
(encode_for_xml(field[0:3]), value)
# datafields
i = 1 # Do not process bib00x and bibrec_bib00x, as
# they are controlfields. So start at bib01x and
# bibrec_bib00x (and set i = 0 at the end of
# first loop)
for digit1 in range(0, 10):
for digit2 in range(i, 10):
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(digit1)+str(digit2)+'%'))
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
if ind1 == "_" or ind1 == "":
ind1 = " "
if ind2 == "_" or ind2 == "":
ind2 = " "
# print field tag, unless hidden
printme = True
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if field_number_old != -999:
out += """ </datafield>\n"""
out += """ <datafield tag="%s" ind1="%s" ind2="%s">\n""" % \
(encode_for_xml(field[0:3]), encode_for_xml(ind1), encode_for_xml(ind2))
field_number_old = field_number
field_old = field
# print subfield value
value = encode_for_xml(value)
out += """ <subfield code="%s">%s</subfield>\n""" % \
(encode_for_xml(field[-1:]), value)
# all fields/subfields printed in this run, so close the tag:
if field_number_old != -999:
out += """ </datafield>\n"""
i = 0 # Next loop should start looking at bib%0 and bibrec_bib00x
# we are at the end of printing the record:
out += " </record>\n"
elif format == "xd" or format == "oai_dc":
# XML Dublin Core format, possibly OAI -- select only some bibXXx fields:
out += """ <dc xmlns="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://purl.org/dc/elements/1.1/
http://www.openarchives.org/OAI/1.1/dc.xsd">\n"""
if record_exist_p == -1:
out += ""
else:
for f in get_fieldvalues(recID, "041__a"):
out += " <language>%s</language>\n" % f
for f in get_fieldvalues(recID, "100__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "700__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "245__a"):
out += " <title>%s</title>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "65017a"):
out += " <subject>%s</subject>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "8564_u"):
if f.split('.') == 'png':
continue
out += " <identifier>%s</identifier>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "520__a"):
out += " <description>%s</description>\n" % encode_for_xml(f)
out += " <date>%s</date>\n" % get_creation_date(recID)
out += " </dc>\n"
elif len(format) == 6 and str(format[0:3]).isdigit():
# user has asked to print some fields only
if format == "001":
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, recID, format)
else:
vals = get_fieldvalues(recID, format)
for val in vals:
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, val, format)
elif format.startswith('t'):
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)
else:
out += get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)
elif format == "hm":
if record_exist_p == -1:
out += "\n<pre style=\"margin: 1em 0px;\">" + cgi.escape(get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)) + "</pre>"
else:
out += "\n<pre style=\"margin: 1em 0px;\">" + cgi.escape(get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)) + "</pre>"
elif format.startswith("h") and ot:
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden) + "</pre>"
else:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ot, can_see_hidden) + "</pre>"
elif format == "hd":
# HTML detailed format
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly or use default format:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_detailed(
ln = ln,
recID = recID,
)
elif format.startswith("hb_") or format.startswith("hd_"):
# underscore means that HTML brief/detailed formats should be called on-the-fly; suitable for testing formats
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hx"):
# BibTeX format, called on the fly:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hs"):
# for citation/download similarity navigation links:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += '<a href="%s">' % websearch_templates.build_search_url(recid=recID, ln=ln)
# firstly, title:
titles = get_fieldvalues(recID, "245__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# usual title not found, try conference title:
titles = get_fieldvalues(recID, "111__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# just print record ID:
out += "<strong>%s %d</strong>" % (get_field_i18nname("record ID", ln, False), recID)
out += "</a>"
# secondly, authors:
authors = get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a")
if authors:
out += " - %s" % authors[0]
if len(authors) > 1:
out += " <em>et al</em>"
# thirdly publication info:
publinfos = get_fieldvalues(recID, "773__s")
if not publinfos:
publinfos = get_fieldvalues(recID, "909C4s")
if not publinfos:
publinfos = get_fieldvalues(recID, "037__a")
if not publinfos:
publinfos = get_fieldvalues(recID, "088__a")
if publinfos:
out += " - %s" % publinfos[0]
else:
# fourthly publication year (if not publication info):
years = get_fieldvalues(recID, "773__y")
if not years:
years = get_fieldvalues(recID, "909C4y")
if not years:
years = get_fieldvalues(recID, "260__c")
if years:
out += " (%s)" % years[0]
else:
# HTML brief format by default
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format))
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly: or use default format:
if CFG_WEBSEARCH_CALL_BIBFORMAT:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
pass # do nothing for portfolio and on-the-fly formats
else:
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper,
display_edit_link=can_edit_record)
# print record closing tags, if needed:
if format == "marcxml" or format == "oai_dc":
out += " </metadata>\n"
out += " </record>\n"
return out
def call_bibformat(recID, format="HD", ln=CFG_SITE_LANG, search_pattern=None, user_info=None, verbose=0):
"""
Calls BibFormat and returns formatted record.
BibFormat will decide by itself if old or new BibFormat must be used.
"""
from invenio.bibformat_utils import get_pdf_snippets
keywords = []
if search_pattern is not None:
for unit in create_basic_search_units(None, str(search_pattern), None):
bsu_o, bsu_p, bsu_f, bsu_m = unit[0], unit[1], unit[2], unit[3]
if (bsu_o != '-' and bsu_f in [None, 'fulltext']):
if bsu_m == 'a' and bsu_p.startswith('%') and bsu_p.endswith('%'):
# remove leading and training `%' representing partial phrase search
keywords.append(bsu_p[1:-1])
else:
keywords.append(bsu_p)
out = format_record(recID,
of=format,
ln=ln,
search_pattern=keywords,
user_info=user_info,
verbose=verbose)
if CFG_WEBSEARCH_FULLTEXT_SNIPPETS and user_info and \
'fulltext' in user_info['uri'].lower():
# check snippets only if URL contains fulltext
# FIXME: make it work for CLI too, via new function arg
if keywords:
snippets = ''
try:
snippets = get_pdf_snippets(recID, keywords, user_info)
except:
register_exception()
if snippets:
out += snippets
return out
def log_query(hostname, query_args, uid=-1):
"""
Log query into the query and user_query tables.
Return id_query or None in case of problems.
"""
id_query = None
if uid >= 0:
# log the query only if uid is reasonable
res = run_sql("SELECT id FROM query WHERE urlargs=%s", (query_args,), 1)
try:
id_query = res[0][0]
except IndexError:
id_query = run_sql("INSERT INTO query (type, urlargs) VALUES ('r', %s)", (query_args,))
if id_query:
run_sql("INSERT INTO user_query (id_user, id_query, hostname, date) VALUES (%s, %s, %s, %s)",
(uid, id_query, hostname,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
return id_query
def log_query_info(action, p, f, colls, nb_records_found_total=-1):
"""Write some info to the log file for later analysis."""
try:
log = open(CFG_LOGDIR + "/search.log", "a")
log.write(time.strftime("%Y%m%d%H%M%S#", time.localtime()))
log.write(action+"#")
log.write(p+"#")
log.write(f+"#")
for coll in colls[:-1]:
log.write("%s," % coll)
log.write("%s#" % colls[-1])
log.write("%d" % nb_records_found_total)
log.write("\n")
log.close()
except:
pass
return
def clean_dictionary(dictionary, list_of_items):
"""Returns a copy of the dictionary with all the items
in the list_of_items as empty strings"""
out_dictionary = dictionary.copy()
out_dictionary.update((item, '') for item in list_of_items)
return out_dictionary
### CALLABLES
def perform_request_search(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=None, sf="", so="a", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="", sc=0, jrec=0,
recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG, ec=None, tab="",
wl=0, em=""):
"""Perform search or browse request, without checking for
authentication. Return list of recIDs found, if of=id.
Otherwise create web page.
The arguments are as follows:
req - mod_python Request class instance.
cc - current collection (e.g. "ATLAS"). The collection the
user started to search/browse from.
c - collection list (e.g. ["Theses", "Books"]). The
collections user may have selected/deselected when
starting to search from 'cc'.
p - pattern to search for (e.g. "ellis and muon or kaon").
f - field to search within (e.g. "author").
rg - records in groups of (e.g. "10"). Defines how many hits
per collection in the search results page are
displayed. (Note that `rg' is ignored in case of `of=id'.)
sf - sort field (e.g. "title").
so - sort order ("a"=ascending, "d"=descending).
sp - sort pattern (e.g. "CERN-") -- in case there are more
values in a sort field, this argument tells which one
to prefer
rm - ranking method (e.g. "jif"). Defines whether results
should be ranked by some known ranking method.
of - output format (e.g. "hb"). Usually starting "h" means
HTML output (and "hb" for HTML brief, "hd" for HTML
detailed), "x" means XML output, "t" means plain text
output, "id" means no output at all but to return list
of recIDs found, "intbitset" means to return an intbitset
representation of the recIDs found (no sorting or ranking
will be performed). (Suitable for high-level API.)
ot - output only these MARC tags (e.g. "100,700,909C0b").
Useful if only some fields are to be shown in the
output, e.g. for library to control some fields.
em - output only part of the page.
aas - advanced search ("0" means no, "1" means yes). Whether
search was called from within the advanced search
interface.
p1 - first pattern to search for in the advanced search
interface. Much like 'p'.
f1 - first field to search within in the advanced search
interface. Much like 'f'.
m1 - first matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op1 - first operator, to join the first and the second unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p2 - second pattern to search for in the advanced search
interface. Much like 'p'.
f2 - second field to search within in the advanced search
interface. Much like 'f'.
m2 - second matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op2 - second operator, to join the second and the third unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p3 - third pattern to search for in the advanced search
interface. Much like 'p'.
f3 - third field to search within in the advanced search
interface. Much like 'f'.
m3 - third matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
sc - split by collection ("0" no, "1" yes). Governs whether
we want to present the results in a single huge list,
or splitted by collection.
jrec - jump to record (e.g. "234"). Used for navigation
inside the search results. (Note that `jrec' is ignored
in case of `of=id'.)
recid - display record ID (e.g. "20000"). Do not
search/browse but go straight away to the Detailed
record page for the given recID.
recidb - display record ID bis (e.g. "20010"). If greater than
'recid', then display records from recid to recidb.
Useful for example for dumping records from the
database for reformatting.
sysno - display old system SYS number (e.g. ""). If you
migrate to Invenio from another system, and store your
old SYS call numbers, you can use them instead of recid
if you wish so.
id - the same as recid, in case recid is not set. For
backwards compatibility.
idb - the same as recid, in case recidb is not set. For
backwards compatibility.
sysnb - the same as sysno, in case sysno is not set. For
backwards compatibility.
action - action to do. "SEARCH" for searching, "Browse" for
browsing. Default is to search.
d1 - first datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-08-23 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd1' takes precedence over d1y, d1m,
d1d if these are defined.
d1y - first date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d1m - first date's month (e.g. "08"). Useful for search
limits on creation/modification date.
d1d - first date's day (e.g. "23"). Useful for search
limits on creation/modification date.
d2 - second datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-09-02 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd2' takes precedence over d2y, d2m,
d2d if these are defined.
d2y - second date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d2m - second date's month (e.g. "09"). Useful for search
limits on creation/modification date.
d2d - second date's day (e.g. "02"). Useful for search
limits on creation/modification date.
dt - first and second date's type (e.g. "c"). Specifies
whether to search in creation dates ("c") or in
modification dates ("m"). When dt is not set and d1*
and d2* are set, the default is "c".
verbose - verbose level (0=min, 9=max). Useful to print some
internal information on the searching process in case
something goes wrong.
ap - alternative patterns (0=no, 1=yes). In case no exact
match is found, the search engine can try alternative
patterns e.g. to replace non-alphanumeric characters by
a boolean query. ap defines if this is wanted.
ln - language of the search interface (e.g. "en"). Useful
for internationalization.
ec - list of external search engines to search as well
(e.g. "SPIRES HEP").
wl - wildcard limit (ex: 100) the wildcard queries will be
limited at 100 results
"""
kwargs = prs_wash_arguments(req=req, cc=cc, c=c, p=p, f=f, rg=rg, sf=sf, so=so, sp=sp, rm=rm, of=of, ot=ot, aas=aas,
p1=p1, f1=f1, m1=m1, op1=op1, p2=p2, f2=f2, m2=m2, op2=op2, p3=p3, f3=f3, m3=m3, sc=sc, jrec=jrec,
recid=recid, recidb=recidb, sysno=sysno, id=id, idb=idb, sysnb=sysnb, action=action, d1=d1,
d1y=d1y, d1m=d1m, d1d=d1d, d2=d2, d2y=d2y, d2m=d2m, d2d=d2d, dt=dt, verbose=verbose, ap=ap, ln=ln, ec=ec,
tab=tab, wl=wl, em=em)
return prs_perform_search(kwargs=kwargs, **kwargs)
def prs_perform_search(kwargs=None, **dummy):
"""Internal call which does the search, it is calling standard Invenio;
Unless you know what you are doing, don't use this call as an API
"""
# separately because we can call it independently
out = prs_wash_arguments_colls(kwargs=kwargs, **kwargs)
if not out:
return out
return prs_search(kwargs=kwargs, **kwargs)
def prs_wash_arguments_colls(kwargs=None, of=None, req=None, cc=None, c=None, sc=None, verbose=None,
aas=None, ln=None, em="", **dummy):
"""
Check and wash collection list argument before we start searching.
If there are troubles, e.g. a collection is not defined, print
warning to the browser.
@return: True if collection list is OK, and various False values
(empty string, empty list) if there was an error.
"""
# raise an exception when trying to print out html from the cli
if of.startswith("h"):
assert req
# for every search engine request asking for an HTML output, we
# first regenerate cache of collection and field I18N names if
# needed; so that later we won't bother checking timestamps for
# I18N names at all:
if of.startswith("h"):
collection_i18nname_cache.recreate_cache_if_needed()
field_i18nname_cache.recreate_cache_if_needed()
try:
(cc, colls_to_display, colls_to_search, hosted_colls, wash_colls_debug) = wash_colls(cc, c, sc, verbose) # which colls to search and to display?
kwargs['colls_to_display'] = colls_to_display
kwargs['colls_to_search'] = colls_to_search
kwargs['hosted_colls'] = hosted_colls
kwargs['wash_colls_debug'] = wash_colls_debug
except InvenioWebSearchUnknownCollectionError, exc:
colname = exc.colname
if of.startswith("h"):
page_start(req, of, cc, aas, ln, getUid(req),
websearch_templates.tmpl_collection_not_found_page_title(colname, ln))
req.write(websearch_templates.tmpl_collection_not_found_page_body(colname, ln))
page_end(req, of, ln, em)
return ''
elif of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
page_end(req, of, ln, em)
return ''
else:
page_end(req, of, ln, em)
return ''
return True
def prs_wash_arguments(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
sf="", so="d", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="",
sc=0, jrec=0, recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG,
ec=None, tab="", uid=None, wl=0, em="", **dummy):
"""
Sets the (default) values and checks others for the PRS call
"""
# wash output format:
of = wash_output_format(of, verbose=verbose, req=req)
# wash all arguments requiring special care
p = wash_pattern(p)
f = wash_field(f)
p1 = wash_pattern(p1)
f1 = wash_field(f1)
p2 = wash_pattern(p2)
f2 = wash_field(f2)
p3 = wash_pattern(p3)
f3 = wash_field(f3)
(d1y, d1m, d1d, d2y, d2m, d2d) = map(int, (d1y, d1m, d1d, d2y, d2m, d2d))
datetext1, datetext2 = wash_dates(d1, d1y, d1m, d1d, d2, d2y, d2m, d2d)
# wash ranking method:
if not is_method_valid(None, rm):
rm = ""
# backwards compatibility: id, idb, sysnb -> recid, recidb, sysno (if applicable)
if sysnb != "" and sysno == "":
sysno = sysnb
if id > 0 and recid == -1:
recid = id
if idb > 0 and recidb == -1:
recidb = idb
# TODO deduce passed search limiting criterias (if applicable)
pl, pl_in_url = "", "" # no limits by default
if action != "browse" and req and not isinstance(req, (cStringIO.OutputType, dict)) \
and getattr(req, 'args', None): # we do not want to add options while browsing or while calling via command-line
fieldargs = cgi.parse_qs(req.args)
for fieldcode in get_fieldcodes():
if fieldcode in fieldargs:
for val in fieldargs[fieldcode]:
pl += "+%s:\"%s\" " % (fieldcode, val)
pl_in_url += "&%s=%s" % (urllib.quote(fieldcode), urllib.quote(val))
# deduce recid from sysno argument (if applicable):
if sysno: # ALEPH SYS number was passed, so deduce DB recID for the record:
recid = get_mysql_recid_from_aleph_sysno(sysno)
if recid is None:
recid = 0 # use recid 0 to indicate that this sysno does not exist
# deduce collection we are in (if applicable):
if recid > 0:
referer = None
if req:
referer = req.headers_in.get('Referer')
cc = guess_collection_of_a_record(recid, referer)
# deduce user id (if applicable):
if uid is None:
try:
uid = getUid(req)
except:
uid = 0
_ = gettext_set_language(ln)
if aas == 2: #add-to-search interface
p = create_add_to_search_pattern(p, p1, f1, m1, op1)
default_addtosearch_args = websearch_templates.restore_search_args_to_default(['p1', 'f1', 'm1', 'op1'])
if req:
req.argd.update(default_addtosearch_args)
req.argd['p'] = p
kwargs = {'req': req, 'cc': cc, 'c': c, 'p': p, 'f': f, 'rg': rg, 'sf': sf,
'so': so, 'sp': sp, 'rm': rm, 'of': of, 'ot': ot, 'aas': aas,
'p1': p1, 'f1': f1, 'm1': m1, 'op1': op1, 'p2': p2, 'f2': f2,
'm2': m2, 'op2': op2, 'p3': p3, 'f3': f3, 'm3': m3, 'sc': sc,
'jrec': jrec, 'recid': recid, 'recidb': recidb, 'sysno': sysno,
'id': id, 'idb': idb, 'sysnb': sysnb, 'action': action, 'd1': d1,
'd1y': d1y, 'd1m': d1m, 'd1d': d1d, 'd2': d2, 'd2y': d2y,
'd2m': d2m, 'd2d': d2d, 'dt': dt, 'verbose': verbose, 'ap': ap,
'ln': ln, 'ec': ec, 'tab': tab, 'wl': wl, 'em': em,
'datetext1': datetext1, 'datetext2': datetext2, 'uid': uid,
'pl': pl, 'pl_in_url': pl_in_url, '_': _,
'selected_external_collections_infos': None,
}
kwargs.update(**dummy)
return kwargs
def prs_search(kwargs=None, recid=0, req=None, cc=None, p=None, p1=None, p2=None, p3=None,
f=None, ec=None, verbose=None, ln=None, selected_external_collections_infos=None,
action=None, rm=None, of=None, em=None,
**dummy):
"""
This function write various bits into the req object as the search
proceeds (so that pieces of a page are rendered even before the
search ended)
"""
## 0 - start output
if recid >= 0: # recid can be 0 if deduced from sysno and if such sysno does not exist
output = prs_detailed_record(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif action == "browse":
## 2 - browse needed
of = 'hb'
output = prs_browse(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif rm and p.startswith("recid:"):
## 3-ter - similarity search (or old-style citation search) needed
output = prs_search_similar_records(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif p.startswith("cocitedwith:"): #WAS EXPERIMENTAL
## 3-terter - cited by search needed
output = prs_search_cocitedwith(kwargs=kwargs, **kwargs)
if output is not None:
return output
else:
## 3 - common search needed
output = prs_search_common(kwargs=kwargs, **kwargs)
if output is not None:
return output
# External searches
if of.startswith("h"):
if not of in ['hcs', 'hcs2']:
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_detailed_record(kwargs=None, req=None, of=None, cc=None, aas=None, ln=None, uid=None, recid=None, recidb=None,
p=None, verbose=None, tab=None, sf=None, so=None, sp=None, rm=None, ot=None, _=None, em=None,
**dummy):
"""Formats and prints one record"""
## 1 - detailed record display
title, description, keywords = \
websearch_templates.tmpl_record_page_header_content(req, recid, ln)
if req is not None and not req.header_only:
page_start(req, of, cc, aas, ln, uid, title, description, keywords, recid, tab, em)
# Default format is hb but we are in detailed -> change 'of'
if of == "hb":
of = "hd"
if record_exists(recid):
if recidb <= recid: # sanity check
recidb = recid + 1
if of in ["id", "intbitset"]:
result = [recidx for recidx in range(recid, recidb) if record_exists(recidx)]
if of == "intbitset":
return intbitset(result)
else:
return result
else:
print_records(req, range(recid, recidb), -1, -9999, of, ot, ln,
search_pattern=p, verbose=verbose, tab=tab, sf=sf,
so=so, sp=sp, rm=rm, em=em, nb_found=len(range(recid, recidb)))
if req and of.startswith("h"): # register detailed record page view event
client_ip_address = str(req.remote_ip)
register_page_view_event(recid, uid, client_ip_address)
else: # record does not exist
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
elif of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND)
else:
write_warning(_("Requested record does not seem to exist."), req=req)
def prs_browse(kwargs=None, req=None, of=None, cc=None, aas=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None,
colls_to_search=None, verbose=None, em=None, **dummy):
page_start(req, of, cc, aas, ln, uid, _("Browse"), p=create_page_title_search_pattern_info(p, p1, p2, p3), em=em)
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
write_warning(create_exact_author_browse_help_link(p, p1, p2, p3, f, f1, f2, f3,
rm, cc, ln, jrec, rg, aas, action),
req=req)
try:
if aas == 1 or (p1 or p2 or p3):
browse_pattern(req, colls_to_search, p1, f1, rg, ln)
browse_pattern(req, colls_to_search, p2, f2, rg, ln)
browse_pattern(req, colls_to_search, p3, f3, rg, ln)
else:
browse_pattern(req, colls_to_search, p, f, rg, ln)
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_search_similar_records(kwargs=None, req=None, of=None, cc=None, pl_in_url=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None, em=None,
verbose=None, **dummy):
if req and not req.header_only:
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3),
em=em)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
recid = p[6:]
if record_exists(recid) != 1:
# record does not exist
if of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND)
else:
write_warning(_("Requested record does not seem to exist."), req=req)
if of == "id":
return []
if of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find similar ones to it
t1 = os.times()[4]
(results_similar_recIDs,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
results_similar_comments) = \
rank_records_bibrank(rank_method_code=rm,
rank_limit_relevance=0,
hitset=get_collection_reclist(cc),
related_to=[p],
verbose=verbose,
field=f,
rg=rg,
jrec=jrec)
if results_similar_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, cc, len(results_similar_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
write_warning(results_similar_comments, req=req)
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_similar_recIDs))
elif of == "id":
return results_similar_recIDs
elif of == "intbitset":
return intbitset(results_similar_recIDs)
elif of.startswith("x"):
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_similar_recIDs))
else:
# rank_records failed and returned some error message to display:
if of.startswith("h"):
write_warning(results_similar_relevances_prologue, req=req)
write_warning(results_similar_relevances_epilogue, req=req)
write_warning(results_similar_comments, req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
def prs_search_cocitedwith(kwargs=None, req=None, of=None, cc=None, pl_in_url=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None,
verbose=None, em=None, **dummy):
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3),
em=em)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
recID = p[12:]
if record_exists(recID) != 1:
# record does not exist
if of.startswith("h"):
write_warning(_("Requested record does not seem to exist."), req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find co-cited ones:
t1 = os.times()[4]
results_cocited_recIDs = [x[0] for x in calculate_co_cited_with_list(int(recID))]
if results_cocited_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, CFG_SITE_NAME, len(results_cocited_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_cocited_recIDs))
elif of == "id":
return results_cocited_recIDs
elif of == "intbitset":
return intbitset(results_cocited_recIDs)
elif of.startswith("x"):
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_cocited_recIDs))
else:
# cited rank_records failed and returned some error message to display:
if of.startswith("h"):
write_warning("nothing found", req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
def prs_search_hosted_collections(kwargs=None, req=None, of=None, ln=None, _=None, p=None,
p1=None, p2=None, p3=None, hosted_colls=None, f=None,
colls_to_search=None, hosted_colls_actual_or_potential_results_p=None,
verbose=None, **dummy):
hosted_colls_results = hosted_colls_timeouts = hosted_colls_true_results = None
# search into the hosted collections only if the output format is html or xml
if hosted_colls and (of.startswith("h") or of.startswith("x")) and not p.startswith("recid:"):
# hosted_colls_results : the hosted collections' searches that did not timeout
# hosted_colls_timeouts : the hosted collections' searches that timed out and will be searched later on again
(hosted_colls_results, hosted_colls_timeouts) = calculate_hosted_collections_results(req, [p, p1, p2, p3], f, hosted_colls, verbose, ln, CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH)
# successful searches
if hosted_colls_results:
hosted_colls_true_results = []
for result in hosted_colls_results:
# if the number of results is None or 0 (or False) then just do nothing
if result[1] is None or result[1] is False:
# these are the searches the returned no or zero results
if verbose:
write_warning("Hosted collections (perform_search_request): %s returned no results" % result[0][1].name, req=req)
else:
# these are the searches that actually returned results on time
hosted_colls_true_results.append(result)
if verbose:
write_warning("Hosted collections (perform_search_request): %s returned %s results in %s seconds" % (result[0][1].name, result[1], result[2]), req=req)
else:
if verbose:
write_warning("Hosted collections (perform_search_request): there were no hosted collections results to be printed at this time", req=req)
if hosted_colls_timeouts:
if verbose:
for timeout in hosted_colls_timeouts:
write_warning("Hosted collections (perform_search_request): %s timed out and will be searched again later" % timeout[0][1].name, req=req)
# we need to know for later use if there were any hosted collections to be searched even if they weren't in the end
elif hosted_colls and ((not (of.startswith("h") or of.startswith("x"))) or p.startswith("recid:")):
(hosted_colls_results, hosted_colls_timeouts) = (None, None)
else:
if verbose:
write_warning("Hosted collections (perform_search_request): there were no hosted collections to be searched", req=req)
## let's define some useful boolean variables:
# True means there are actual or potential hosted collections results to be printed
kwargs['hosted_colls_actual_or_potential_results_p'] = not (not hosted_colls or not ((hosted_colls_results and hosted_colls_true_results) or hosted_colls_timeouts))
# True means there are hosted collections timeouts to take care of later
# (useful for more accurate printing of results later)
kwargs['hosted_colls_potential_results_p'] = not (not hosted_colls or not hosted_colls_timeouts)
# True means we only have hosted collections to deal with
kwargs['only_hosted_colls_actual_or_potential_results_p'] = not colls_to_search and hosted_colls_actual_or_potential_results_p
kwargs['hosted_colls_results'] = hosted_colls_results
kwargs['hosted_colls_timeouts'] = hosted_colls_timeouts
kwargs['hosted_colls_true_results'] = hosted_colls_true_results
def prs_advanced_search(results_in_any_collection, kwargs=None, req=None, of=None,
cc=None, ln=None, _=None, p=None, p1=None, p2=None, p3=None,
f=None, f1=None, m1=None, op1=None, f2=None, m2=None,
op2=None, f3=None, m3=None, ap=None, ec=None,
selected_external_collections_infos=None, verbose=None,
wl=None, em=None, **dummy):
len_results_p1 = 0
len_results_p2 = 0
len_results_p3 = 0
try:
results_in_any_collection.union_update(search_pattern_parenthesised(req, p1, f1, m1, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl))
len_results_p1 = len(results_in_any_collection)
if len_results_p1 == 0:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec,
verbose, ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
if p2:
results_tmp = search_pattern_parenthesised(req, p2, f2, m2, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
len_results_p2 = len(results_tmp)
if op1 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op1 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op1 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(op1), "Error", req=req)
if len(results_in_any_collection) == 0:
if of.startswith("h"):
if len_results_p2:
#each individual query returned results, but the boolean operation did not
nearestterms = []
nearest_search_args = req.argd.copy()
if p1:
nearestterms.append((p1, len_results_p1, clean_dictionary(nearest_search_args, ['p2', 'f2', 'm2', 'p3', 'f3', 'm3'])))
nearestterms.append((p2, len_results_p2, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p3', 'f3', 'm3'])))
write_warning(websearch_templates.tmpl_search_no_boolean_hits(ln=ln, nearestterms=nearestterms), req=req)
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
if p3:
results_tmp = search_pattern_parenthesised(req, p3, f3, m3, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
len_results_p3 = len(results_tmp)
if op2 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op2 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op2 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(op2), "Error", req=req)
if len(results_in_any_collection) == 0 and len_results_p3 and of.startswith("h"):
#each individual query returned results but the boolean operation did not
nearestterms = []
nearest_search_args = req.argd.copy()
if p1:
nearestterms.append((p1, len_results_p1, clean_dictionary(nearest_search_args, ['p2', 'f2', 'm2', 'p3', 'f3', 'm3'])))
if p2:
nearestterms.append((p2, len_results_p2, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p3', 'f3', 'm3'])))
nearestterms.append((p3, len_results_p3, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p2', 'f2', 'm2'])))
write_warning(websearch_templates.tmpl_search_no_boolean_hits(ln=ln, nearestterms=nearestterms), req=req)
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_simple_search(results_in_any_collection, kwargs=None, req=None, of=None, cc=None, ln=None, p=None, f=None,
p1=None, p2=None, p3=None, ec=None, verbose=None, selected_external_collections_infos=None,
only_hosted_colls_actual_or_potential_results_p=None, query_representation_in_cache=None,
ap=None, hosted_colls_actual_or_potential_results_p=None, wl=None, em=None,
**dummy):
if query_representation_in_cache in search_results_cache.cache:
# query is not in the cache already, so reuse it:
results_in_any_collection.union_update(search_results_cache.cache[query_representation_in_cache])
if verbose and of.startswith("h"):
write_warning("Search stage 0: query found in cache, reusing cached results.", req=req)
else:
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there are results only in the hosted collections. Also added the if clause to avoid
# searching in case we know we only have actual or potential hosted collections results
if not only_hosted_colls_actual_or_potential_results_p:
results_in_any_collection.union_update(search_pattern_parenthesised(req, p, f, ap=ap, of=of, verbose=verbose, ln=ln,
display_nearest_terms_box=not hosted_colls_actual_or_potential_results_p,
wl=wl))
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_intersect_results_with_collrecs(results_final, results_in_any_collection,
kwargs=None, colls_to_search=None,
req=None, of=None, ln=None,
cc=None, p=None, p1=None, p2=None, p3=None, f=None,
ec=None, verbose=None, selected_external_collections_infos=None,
em=None, **dummy):
display_nearest_terms_box=not kwargs['hosted_colls_actual_or_potential_results_p']
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there results only in the hosted collections. Also added the if clause to avoid
# searching in case we know since the last stage that we have no results in any collection
if len(results_in_any_collection) != 0:
results_final.update(intersect_results_with_collrecs(req, results_in_any_collection, colls_to_search, of,
verbose, ln, display_nearest_terms_box=display_nearest_terms_box))
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_store_results_in_cache(query_representation_in_cache, results_in_any_collection, req=None, verbose=None, of=None, **dummy):
if CFG_WEBSEARCH_SEARCH_CACHE_SIZE and query_representation_in_cache not in search_results_cache.cache:
if len(search_results_cache.cache) > CFG_WEBSEARCH_SEARCH_CACHE_SIZE:
search_results_cache.clear()
search_results_cache.cache[query_representation_in_cache] = results_in_any_collection
if verbose and of.startswith("h"):
write_warning(req, "Search stage 3: storing query results in cache.", req=req)
def prs_apply_search_limits(results_final, kwargs=None, req=None, of=None, cc=None, ln=None, _=None,
p=None, p1=None, p2=None, p3=None, f=None, pl=None, ap=None, dt=None,
ec=None, selected_external_collections_infos=None,
hosted_colls_actual_or_potential_results_p=None,
datetext1=None, datetext2=None, verbose=None, wl=None, em=None,
**dummy):
if datetext1 != "" and results_final != {}:
if verbose and of.startswith("h"):
write_warning("Search stage 5: applying time etc limits, from %s until %s..." % (datetext1, datetext2), req=req)
try:
results_temp = intersect_results_with_hitset(
req,
results_final,
search_unit_in_bibrec(datetext1, datetext2, dt),
ap,
aptext= _("No match within your time limits, "
"discarding this condition..."),
of=of)
if results_temp:
results_final.update(results_temp)
else:
results_final.clear()
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
#if of.startswith("x"):
# # Print empty, but valid XML
# print_records_prologue(req, of)
# print_records_epilogue(req, of)
return page_end(req, of, ln, em)
if pl and results_final != {}:
pl = wash_pattern(pl)
if verbose and of.startswith("h"):
write_warning("Search stage 5: applying search pattern limit %s..." % cgi.escape(pl), req=req)
try:
results_temp = intersect_results_with_hitset(
req,
results_final,
search_pattern_parenthesised(req, pl, ap=0, ln=ln, wl=wl),
ap,
aptext=_("No match within your search limits, "
"discarding this condition..."),
of=of)
if results_temp:
results_final.update(results_temp)
else:
results_final.clear()
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_split_into_collections(kwargs=None, results_final=None, colls_to_search=None, hosted_colls_results=None,
cpu_time=0, results_final_nb_total=None, hosted_colls_actual_or_potential_results_p=None,
hosted_colls_true_results=None, hosted_colls_timeouts=None, **dummy):
results_final_nb_total = 0
results_final_nb = {} # will hold number of records found in each collection
# (in simple dict to display overview more easily)
for coll in results_final.keys():
results_final_nb[coll] = len(results_final[coll])
#results_final_nb_total += results_final_nb[coll]
# Now let us calculate results_final_nb_total more precisely,
# in order to get the total number of "distinct" hits across
# searched collections; this is useful because a record might
# have been attributed to more than one primary collection; so
# we have to avoid counting it multiple times. The price to
# pay for this accuracy of results_final_nb_total is somewhat
# increased CPU time.
if results_final.keys() == 1:
# only one collection; no need to union them
results_final_for_all_selected_colls = results_final.values()[0]
results_final_nb_total = results_final_nb.values()[0]
else:
# okay, some work ahead to union hits across collections:
results_final_for_all_selected_colls = intbitset()
for coll in results_final.keys():
results_final_for_all_selected_colls.union_update(results_final[coll])
results_final_nb_total = len(results_final_for_all_selected_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
for result in hosted_colls_true_results:
colls_to_search.append(result[0][1].name)
results_final_nb[result[0][1].name] = result[1]
results_final_nb_total += result[1]
cpu_time += result[2]
if hosted_colls_timeouts:
for timeout in hosted_colls_timeouts:
colls_to_search.append(timeout[1].name)
# use -963 as a special number to identify the collections that timed out
results_final_nb[timeout[1].name] = -963
kwargs['results_final_nb'] = results_final_nb
kwargs['results_final_nb_total'] = results_final_nb_total
kwargs['results_final_for_all_selected_colls'] = results_final_for_all_selected_colls
kwargs['cpu_time'] = cpu_time #rca TODO: check where the cpu_time is used, this line was missing
return (results_final_nb, results_final_nb_total, results_final_for_all_selected_colls)
def prs_summarize_records(kwargs=None, req=None, p=None, f=None, aas=None,
p1=None, p2=None, p3=None, f1=None, f2=None, f3=None, op1=None, op2=None,
ln=None, results_final_for_all_selected_colls=None, of='hcs', **dummy):
# feed the current search to be summarized:
from invenio.search_engine_summarizer import summarize_records
search_p = p
search_f = f
if not p and (aas == 1 or p1 or p2 or p3):
op_d = {'n': ' and not ', 'a': ' and ', 'o': ' or ', '': ''}
triples = ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, ''])
triples_len = len(triples)
for i in range(triples_len):
fi, pi, oi = triples[i] # e.g.:
if i < triples_len-1 and not triples[i+1][1]: # if p2 empty
triples[i+1][0] = '' # f2 must be too
oi = '' # and o1
if ' ' in pi:
pi = '"'+pi+'"'
if fi:
fi = fi + ':'
search_p += fi + pi + op_d[oi]
search_f = ''
summarize_records(results_final_for_all_selected_colls, of, ln, search_p, search_f, req)
def prs_print_records(kwargs=None, results_final=None, req=None, of=None, cc=None, pl_in_url=None,
ln=None, _=None, p=None, p1=None, p2=None, p3=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, d1y=None, d1m=None,
d1d=None, d2y=None, d2m=None, d2d=None, dt=None, jrec=None, colls_to_search=None,
hosted_colls_actual_or_potential_results_p=None, hosted_colls_results=None,
hosted_colls_true_results=None, hosted_colls_timeouts=None, results_final_nb=None,
cpu_time=None, verbose=None, em=None, **dummy):
if len(colls_to_search) > 1:
cpu_time = -1 # we do not want to have search time printed on each collection
print_records_prologue(req, of, cc=cc)
results_final_colls = []
wlqh_results_overlimit = 0
for coll in colls_to_search:
if coll in results_final and len(results_final[coll]):
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
results_final_recIDs = list(results_final[coll])
results_final_nb_found = len(results_final_recIDs)
results_final_relevances = []
results_final_relevances_prologue = ""
results_final_relevances_epilogue = ""
if rm: # do we have to rank?
results_final_recIDs_ranked, results_final_relevances, results_final_relevances_prologue, results_final_relevances_epilogue, results_final_comments = \
rank_records(req, rm, 0, results_final[coll],
string.split(p) + string.split(p1) +
string.split(p2) + string.split(p3), verbose, so, of, ln, rg, jrec, kwargs['f'])
if of.startswith("h"):
write_warning(results_final_comments, req=req)
if results_final_recIDs_ranked:
results_final_recIDs = results_final_recIDs_ranked
else:
# rank_records failed and returned some error message to display:
write_warning(results_final_relevances_prologue, req=req)
write_warning(results_final_relevances_epilogue, req=req)
else:
results_final_recIDs = sort_records(req, results_final_recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
if len(results_final_recIDs) < CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT:
results_final_colls.append(results_final_recIDs)
else:
wlqh_results_overlimit = 1
print_records(req, results_final_recIDs, jrec, rg, of, ot, ln,
results_final_relevances,
results_final_relevances_prologue,
results_final_relevances_epilogue,
search_pattern=p,
print_records_prologue_p=False,
print_records_epilogue_p=False,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm,
em=em,
nb_found=results_final_nb_found)
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1, em=em))
if req and not isinstance(req, cStringIO.OutputType):
# store the last search results page
session_param_set(req, 'websearch-last-query', req.unparsed_uri)
if wlqh_results_overlimit:
results_final_colls = None
# store list of results if user wants to display hits
# in a single list, or store list of collections of records
# if user displays hits split by collections:
session_param_set(req, 'websearch-last-query-hits', results_final_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
# TODO: add a verbose message here
for result in hosted_colls_true_results:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg, em=em))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts:
# TODO: add a verbose message here
# TODO: check if verbose messages still work when dealing with (re)calculations of timeouts
(hosted_colls_timeouts_results, hosted_colls_timeouts_timeouts) = do_calculate_hosted_collections_results(req, ln, None, verbose, None, hosted_colls_timeouts, CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH)
if hosted_colls_timeouts_results:
for result in hosted_colls_timeouts_results:
if result[1] is None or result[1] is False:
## these are the searches the returned no or zero results
## also print a nearest terms box, in case this is the only
## collection being searched and it returns no results?
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, no_records_found=True, limit=rg, em=em))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
else:
# these are the searches that actually returned results on time
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg, em=em))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts_timeouts:
for timeout in hosted_colls_timeouts_timeouts:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=timeout[0], ln=ln, of=of, req=req, search_timed_out=True, limit=rg, em=em))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
print_records_epilogue(req, of)
if f == "author" and of.startswith("h"):
req.write(create_similarly_named_authors_link_box(p, ln))
def prs_log_query(kwargs=None, req=None, uid=None, of=None, ln=None, p=None, f=None,
colls_to_search=None, results_final_nb_total=None, em=None, **dummy):
# log query:
try:
id_query = log_query(req.remote_host, req.args, uid)
if of.startswith("h") and id_query and (em == '' or EM_REPOSITORY["alert"] in em):
if not of in ['hcs', 'hcs2']:
# display alert/RSS teaser for non-summary formats:
user_info = collect_user_info(req)
display_email_alert_part = True
if user_info:
if user_info['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_email_alert_part = False
else:
if not user_info['precached_usealerts']:
display_email_alert_part = False
req.write(websearch_templates.tmpl_alert_rss_teaser_box_for_query(id_query,
ln=ln, display_email_alert_part=display_email_alert_part))
except:
# do not log query if req is None (used by CLI interface)
pass
log_query_info("ss", p, f, colls_to_search, results_final_nb_total)
try:
loaded_websearch_services is not None
except Exception:
loaded_websearch_services = get_search_services()
def prs_search_common(kwargs=None, req=None, of=None, cc=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None, colls_to_search=None, wash_colls_debug=None,
verbose=None, wl=None, em=None, **dummy):
query_representation_in_cache = repr((p, f, colls_to_search, wl))
page_start(req, of, cc, aas, ln, uid, p=create_page_title_search_pattern_info(p, p1, p2, p3), em=em)
if of.startswith("h") and verbose and wash_colls_debug:
write_warning("wash_colls debugging info : %s" % wash_colls_debug, req=req)
prs_search_hosted_collections(kwargs=kwargs, **kwargs)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
# WebSearch services
if jrec <= 1 and \
(em == "" and True or (EM_REPOSITORY["search_services"] in em)):
user_info = collect_user_info(req)
# display only on first search page, and only if wanted
# when 'em' param set.
if p:
search_units = create_basic_search_units(req, p, f)
else:
search_units = []
search_service_answers = [search_service.answer(req, user_info, of, cc, colls_to_search, p, f, search_units, ln) \
for search_service in loaded_websearch_services]
search_service_answers.sort(reverse=True)
nb_answers = 0
best_relevance = None
for answer_relevance, answer_html in search_service_answers:
nb_answers += 1
if best_relevance is None:
best_relevance = answer_relevance
if best_relevance <= CFG_WEBSEARCH_SERVICE_MIN_RELEVANCE_TO_DISPLAY:
# The answer is not relevant enough
if verbose > 8:
write_warning("Service relevance too low (%i). Answer would be: %s" % (answer_relevance, answer_html), req=req)
break
if nb_answers > CFG_WEBSEARCH_SERVICE_MAX_NB_SERVICE_DISPLAY:
# We have reached the max number of service to display
if verbose > 8:
write_warning("Max number of services (%i) reached." % CFG_WEBSEARCH_SERVICE_MAX_NB_SERVICE_DISPLAY, req=req)
break
if best_relevance - answer_relevance > CFG_WEBSEARCH_SERVICE_MAX_RELEVANCE_DIFFERENCE:
# The service gave an answer that is way less good than previous ones.
if verbose > 8:
write_warning("Service relevance too low (%i) compared to best one (%i). Answer would be: %s" % (answer_relevance, best_relevance, answer_html), req=req)
break
req.write('<div class="searchservicebox">')
req.write(answer_html)
if verbose > 8:
write_warning("Service relevance: %i" % answer_relevance, req=req)
req.write('</div>')
if answer_relevance == CFG_WEBSEARCH_SERVICE_MAX_SERVICE_ANSWER_RELEVANCE:
# The service assumes it has given the definitive answer
if verbose > 8:
write_warning("There cannot be a better answer. Leaving", req=req)
break
t1 = os.times()[4]
results_in_any_collection = intbitset()
if aas == 2 and not (p2 or p3):
## 3A add-to-search
output = prs_simple_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
elif aas == 1 or (p1 or p2 or p3):
## 3B - advanced search
output = prs_advanced_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
else:
## 3C - simple search
output = prs_simple_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
if len(results_in_any_collection) == 0 and not kwargs['hosted_colls_actual_or_potential_results_p']:
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return None
# store this search query results into search results cache if needed:
prs_store_results_in_cache(query_representation_in_cache, results_in_any_collection, **kwargs)
# search stage 4 and 5: intersection with collection universe and sorting/limiting
try:
output = prs_intersect_with_colls_and_apply_search_limits(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except: # no results to display
return None
t2 = os.times()[4]
cpu_time = t2 - t1
kwargs['cpu_time'] = cpu_time
## search stage 6: display results:
return prs_display_results(kwargs=kwargs, **kwargs)
def prs_intersect_with_colls_and_apply_search_limits(results_in_any_collection,
kwargs=None, req=None, of=None,
**dummy):
# search stage 4: intersection with collection universe:
results_final = {}
output = prs_intersect_results_with_collrecs(results_final, results_in_any_collection, kwargs, **kwargs)
if output is not None:
return output
# another external search if we still don't have something
if results_final == {} and not kwargs['hosted_colls_actual_or_potential_results_p']:
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
kwargs['results_final'] = results_final
raise Exception
# search stage 5: apply search option limits and restrictions:
output = prs_apply_search_limits(results_final, kwargs=kwargs, **kwargs)
kwargs['results_final'] = results_final
if output is not None:
return output
def prs_display_results(kwargs=None, results_final=None, req=None, of=None, sf=None,
so=None, sp=None, verbose=None, p=None, p1=None, p2=None, p3=None,
cc=None, ln=None, _=None, ec=None, colls_to_search=None, rm=None, cpu_time=None,
f=None, em=None, jrec=0, rg=None, **dummy
):
## search stage 6: display results:
# split result set into collections
(results_final_nb, results_final_nb_total, results_final_for_all_selected_colls) = prs_split_into_collections(kwargs=kwargs, **kwargs)
# we continue past this point only if there is a hosted collection that has timed out and might offer potential results
if results_final_nb_total == 0 and not kwargs['hosted_colls_potential_results_p']:
if of.startswith("h"):
write_warning("No match found, please enter different search terms.", req=req)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# print results overview:
if of == "intbitset":
#return the result as an intbitset
return results_final_for_all_selected_colls
elif of == "id":
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
p.split() + p1.split() +
p2.split() + p3.split(), verbose, so, of, ln, kwargs['rg'], kwargs['jrec'], kwargs['f'])
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of, ln)
if rg:
return recIDs[jrec:jrec+rg]
else:
return recIDs[jrec:]
elif of.startswith("h"):
if of not in ['hcs', 'hcs2', 'hcv', 'htcv', 'tlcv']:
# added the hosted_colls_potential_results_p parameter to help print out the overview more accurately
req.write(print_results_overview(colls_to_search, results_final_nb_total, results_final_nb, cpu_time,
ln, ec, hosted_colls_potential_results_p=kwargs['hosted_colls_potential_results_p'], em=em))
kwargs['selected_external_collections_infos'] = print_external_results_overview(req, cc, [p, p1, p2, p3],
f, ec, verbose, ln, print_overview=em == "" or EM_REPOSITORY["overview"] in em)
# print number of hits found for XML outputs:
if of.startswith("x") or of == 'mobb':
req.write("<!-- Search-Engine-Total-Number-Of-Results: %s -->\n" % kwargs['results_final_nb_total'])
# print records:
if of in ['hcs', 'hcs2']:
prs_summarize_records(kwargs=kwargs, **kwargs)
elif of in ['hcv', 'htcv', 'tlcv'] and CFG_INSPIRE_SITE:
from invenio.search_engine_cvifier import cvify_records
cvify_records(results_final_for_all_selected_colls, of, req, so)
else:
prs_print_records(kwargs=kwargs, **kwargs)
prs_log_query(kwargs=kwargs, **kwargs)
# this is a copy of the prs_display_results with output parts removed, needed for external modules
def prs_rank_results(kwargs=None, results_final=None, req=None, colls_to_search=None,
sf=None, so=None, sp=None, of=None, rm=None, p=None, p1=None, p2=None, p3=None,
verbose=None, **dummy
):
## search stage 6: display results:
# split result set into collections
dummy_results_final_nb, dummy_results_final_nb_total, results_final_for_all_selected_colls = prs_split_into_collections(kwargs=kwargs, **kwargs)
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
p.split() + p1.split() +
p2.split() + p3.split(), verbose, so, of, field=kwargs['f'])
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of)
return recIDs
def perform_request_cache(req, action="show"):
"""Manipulates the search engine cache."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
out = ""
out += "<h1>Search Cache</h1>"
# clear cache if requested:
if action == "clear":
search_results_cache.clear()
req.write(out)
# show collection reclist cache:
out = "<h3>Collection reclist cache</h3>"
out += "- collection table last updated: %s" % get_table_update_time('collection')
out += "<br />- reclist cache timestamp: %s" % collection_reclist_cache.timestamp
out += "<br />- reclist cache contents:"
out += "<blockquote>"
for coll in collection_reclist_cache.cache.keys():
if collection_reclist_cache.cache[coll]:
out += "%s (%d)<br />" % (coll, len(collection_reclist_cache.cache[coll]))
out += "</blockquote>"
req.write(out)
# show search results cache:
out = "<h3>Search Cache</h3>"
out += "- search cache usage: %d queries cached (max. ~%d)" % \
(len(search_results_cache.cache), CFG_WEBSEARCH_SEARCH_CACHE_SIZE)
if len(search_results_cache.cache):
out += "<br />- search cache contents:"
out += "<blockquote>"
for query, hitset in search_results_cache.cache.items():
out += "<br />%s ... %s" % (query, hitset)
out += """<p><a href="%s/search/cache?action=clear">clear search results cache</a>""" % CFG_SITE_URL
out += "</blockquote>"
req.write(out)
# show field i18nname cache:
out = "<h3>Field I18N names cache</h3>"
out += "- fieldname table last updated: %s" % get_table_update_time('fieldname')
out += "<br />- i18nname cache timestamp: %s" % field_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for field in field_i18nname_cache.cache.keys():
for ln in field_i18nname_cache.cache[field].keys():
out += "%s, %s = %s<br />" % (field, ln, field_i18nname_cache.cache[field][ln])
out += "</blockquote>"
req.write(out)
# show collection i18nname cache:
out = "<h3>Collection I18N names cache</h3>"
out += "- collectionname table last updated: %s" % get_table_update_time('collectionname')
out += "<br />- i18nname cache timestamp: %s" % collection_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for coll in collection_i18nname_cache.cache.keys():
for ln in collection_i18nname_cache.cache[coll].keys():
out += "%s, %s = %s<br />" % (coll, ln, collection_i18nname_cache.cache[coll][ln])
out += "</blockquote>"
req.write(out)
req.write("</html>")
return "\n"
def perform_request_log(req, date=""):
"""Display search log information for given date."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
req.write("<h1>Search Log</h1>")
if date: # case A: display stats for a day
yyyymmdd = string.atoi(date)
req.write("<p><big><strong>Date: %d</strong></big><p>" % yyyymmdd)
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td></tr>" % ("No.", "Time", "Pattern", "Field", "Collection", "Number of Hits"))
# read file:
p = os.popen("grep ^%d %s/search.log" % (yyyymmdd, CFG_LOGDIR), 'r')
lines = p.readlines()
p.close()
# process lines:
i = 0
for line in lines:
try:
datetime, dummy_aas, p, f, c, nbhits = line.split("#")
i += 1
req.write("<tr><td align=\"right\">#%d</td><td>%s:%s:%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>"
% (i, datetime[8:10], datetime[10:12], datetime[12:], p, f, c, nbhits))
except:
pass # ignore eventual wrong log lines
req.write("</table>")
else: # case B: display summary stats per day
yyyymm01 = int(time.strftime("%Y%m01", time.localtime()))
yyyymmdd = int(time.strftime("%Y%m%d", time.localtime()))
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></tr>" % ("Day", "Number of Queries"))
for day in range(yyyymm01, yyyymmdd + 1):
p = os.popen("grep -c ^%d %s/search.log" % (day, CFG_LOGDIR), 'r')
for line in p.readlines():
req.write("""<tr><td>%s</td><td align="right"><a href="%s/search/log?date=%d">%s</a></td></tr>""" %
(day, CFG_SITE_URL, day, line))
p.close()
req.write("</table>")
req.write("</html>")
return "\n"
def get_all_field_values(tag):
"""
Return all existing values stored for a given tag.
@param tag: the full tag, e.g. 909C0b
@type tag: string
@return: the list of values
@rtype: list of strings
"""
table = 'bib%02dx' % int(tag[:2])
return [row[0] for row in run_sql("SELECT DISTINCT(value) FROM %s WHERE tag=%%s" % table, (tag, ))]
def get_most_popular_field_values(recids, tags, exclude_values=None, count_repetitive_values=True, split_by=0):
"""
Analyze RECIDS and look for TAGS and return most popular values
and the frequency with which they occur sorted according to
descending frequency.
If a value is found in EXCLUDE_VALUES, then do not count it.
If COUNT_REPETITIVE_VALUES is True, then we count every occurrence
of value in the tags. If False, then we count the value only once
regardless of the number of times it may appear in a record.
(But, if the same value occurs in another record, we count it, of
course.)
@return: list of tuples containing tag and its frequency
Example:
>>> get_most_popular_field_values(range(11,20), '980__a')
[('PREPRINT', 10), ('THESIS', 7), ...]
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'))
[('Ellis, J', 10), ('Ellis, N', 7), ...]
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'), ('Ellis, J'))
[('Ellis, N', 7), ...]
"""
def _get_most_popular_field_values_helper_sorter(val1, val2):
"""Compare VAL1 and VAL2 according to, firstly, frequency, then
secondly, alphabetically."""
compared_via_frequencies = cmp(valuefreqdict[val2],
valuefreqdict[val1])
if compared_via_frequencies == 0:
return cmp(val1.lower(), val2.lower())
else:
return compared_via_frequencies
valuefreqdict = {}
## sanity check:
if not exclude_values:
exclude_values = []
if isinstance(tags, str):
tags = (tags,)
## find values to count:
vals_to_count = []
displaytmp = {}
if count_repetitive_values:
# counting technique A: can look up many records at once: (very fast)
for tag in tags:
vals_to_count.extend(get_fieldvalues(recids, tag, sort=False,
split_by=split_by))
else:
# counting technique B: must count record-by-record: (slow)
for recid in recids:
vals_in_rec = []
for tag in tags:
for val in get_fieldvalues(recid, tag, False):
vals_in_rec.append(val)
# do not count repetitive values within this record
# (even across various tags, so need to unify again):
dtmp = {}
for val in vals_in_rec:
dtmp[val.lower()] = 1
displaytmp[val.lower()] = val
vals_in_rec = dtmp.keys()
vals_to_count.extend(vals_in_rec)
## are we to exclude some of found values?
for val in vals_to_count:
if val not in exclude_values:
if val in valuefreqdict:
valuefreqdict[val] += 1
else:
valuefreqdict[val] = 1
## sort by descending frequency of values:
if not CFG_NUMPY_IMPORTABLE:
## original version
out = []
vals = valuefreqdict.keys()
vals.sort(_get_most_popular_field_values_helper_sorter)
for val in vals:
tmpdisplv = ''
if val in displaytmp:
tmpdisplv = displaytmp[val]
else:
tmpdisplv = val
out.append((tmpdisplv, valuefreqdict[val]))
return out
else:
f = [] # frequencies
n = [] # original names
ln = [] # lowercased names
## build lists within one iteration
for (val, freq) in valuefreqdict.iteritems():
f.append(-1 * freq)
if val in displaytmp:
n.append(displaytmp[val])
else:
n.append(val)
ln.append(val.lower())
## sort by frequency (desc) and then by lowercased name.
return [(n[i], -1 * f[i]) for i in numpy.lexsort([ln, f])]
def profile(p="", f="", c=CFG_SITE_NAME):
"""Profile search time."""
import profile as pyprofile
import pstats
pyprofile.run("perform_request_search(p='%s',f='%s', c='%s')" % (p, f, c), "perform_request_search_profile")
p = pstats.Stats("perform_request_search_profile")
p.strip_dirs().sort_stats("cumulative").print_stats()
return 0
def perform_external_collection_search_with_em(req, current_collection, pattern_list, field,
external_collection, verbosity_level=0, lang=CFG_SITE_LANG,
selected_external_collections_infos=None, em=""):
perform_external_collection_search(req, current_collection, pattern_list, field, external_collection,
verbosity_level, lang, selected_external_collections_infos,
print_overview=em == "" or EM_REPOSITORY["overview"] in em,
print_search_info=em == "" or EM_REPOSITORY["search_info"] in em,
print_see_also_box=em == "" or EM_REPOSITORY["see_also_box"] in em,
print_body=em == "" or EM_REPOSITORY["body"] in em)
def check_user_can_edit_record(req, recid):
""" Check if user has authorization to modify a collection
the recid belongs to
"""
record_collections = get_all_collections_of_a_record(recid)
if not record_collections:
# Check if user has access to all collections
auth_code, auth_message = acc_authorize_action(req, 'runbibedit',
collection='')
if auth_code == 0:
return True
else:
for collection in record_collections:
auth_code, auth_message = acc_authorize_action(req, 'runbibedit',
collection=collection)
if auth_code == 0:
return True
return False
| 1 | 16,890 | if we are seeking for the content of only one field from metadata, no need to check whether a user could edit record or not | inveniosoftware-invenio | py |
@@ -25,3 +25,4 @@ class ChromeRemoteConnection(RemoteConnection):
self._commands["launchApp"] = ('POST', '/session/$sessionId/chromium/launch_app')
self._commands["setNetworkConditions"] = ('POST', '/session/$sessionId/chromium/network_conditions')
self._commands["getNetworkConditions"] = ('GET', '/session/$sessionId/chromium/network_conditions')
+ self._commands['ExecuteCDP'] = ('POST', '/session/$sessionId/goog/cdp/execute') | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.remote.remote_connection import RemoteConnection
class ChromeRemoteConnection(RemoteConnection):
def __init__(self, remote_server_addr, keep_alive=True):
RemoteConnection.__init__(self, remote_server_addr, keep_alive)
self._commands["launchApp"] = ('POST', '/session/$sessionId/chromium/launch_app')
self._commands["setNetworkConditions"] = ('POST', '/session/$sessionId/chromium/network_conditions')
self._commands["getNetworkConditions"] = ('GET', '/session/$sessionId/chromium/network_conditions')
| 1 | 15,524 | Should this be camelCase to match the above commands? I am not the expert here so maybe Lucas or David can chime in. | SeleniumHQ-selenium | py |
@@ -254,7 +254,8 @@ namespace Datadog.Trace.ClrProfiler.Integrations
scope = tracer.StartActive(ValidateOperationName, serviceName: serviceName);
var span = scope.Span;
span.Type = SpanTypes.GraphQL;
-
+ span.SetTag(Tags.SpanKind, SpanKinds.Server);
+ span.SetTag(Tags.Language, TracerConstants.Language);
span.SetTag(Tags.GraphQLSource, source);
// set analytics sample rate if enabled | 1 | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Datadog.Trace.ClrProfiler.Emit;
using Datadog.Trace.ClrProfiler.Helpers;
using Datadog.Trace.Logging;
namespace Datadog.Trace.ClrProfiler.Integrations
{
/// <summary>
/// Tracing integration for GraphQL.Server.Transports.AspNetCore
/// </summary>
public static class GraphQLIntegration
{
private const string IntegrationName = "GraphQL";
private const string ServiceName = "graphql";
private const string Major2 = "2";
private const string Major2Minor3 = "2.3";
private const string ParseOperationName = "graphql.parse"; // Instrumentation not yet implemented
private const string ValidateOperationName = "graphql.validate";
private const string ExecuteOperationName = "graphql.execute";
private const string ResolveOperationName = "graphql.resolve"; // Instrumentation not yet implemented
private const string GraphQLAssemblyName = "GraphQL";
private const string GraphQLDocumentValidatorInterfaceName = "GraphQL.Validation.IDocumentValidator";
private const string GraphQLExecutionResultName = "GraphQL.ExecutionResult";
private const string GraphQLExecutionStrategyInterfaceName = "GraphQL.Execution.IExecutionStrategy";
private const string GraphQLValidationResultInterfaceName = "GraphQL.Validation.IValidationResult";
private const string TaskOfGraphQLExecutionResult = "System.Threading.Tasks.Task`1<" + GraphQLExecutionResultName + ">";
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.GetLogger(typeof(GraphQLIntegration));
/// <summary>
/// Wrap the original method by adding instrumentation code around it.
/// </summary>
/// <param name="documentValidator">The instance of GraphQL.Validation.IDocumentValidator.</param>
/// <param name="originalQuery">The source of the original GraphQL query.</param>
/// <param name="schema">The GraphQL schema.</param>
/// <param name="document">The GraphQL document.</param>
/// <param name="rules">The list of validation rules.</param>
/// <param name="userContext">The user context.</param>
/// <param name="inputs">The input variables.</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
/// <returns>The original method's return value.</returns>
[InterceptMethod(
TargetAssembly = GraphQLAssemblyName,
TargetType = GraphQLDocumentValidatorInterfaceName,
TargetSignatureTypes = new[] { GraphQLValidationResultInterfaceName, ClrNames.String, "GraphQL.Types.ISchema", "GraphQL.Language.AST.Document", "System.Collections.Generic.IEnumerable`1<GraphQL.Validation.IValidationRule>", ClrNames.Ignore, "GraphQL.Inputs" },
TargetMinimumVersion = Major2Minor3,
TargetMaximumVersion = Major2)]
public static object Validate(
object documentValidator,
object originalQuery,
object schema,
object document,
object rules,
object userContext,
object inputs,
int opCode,
int mdToken,
long moduleVersionPtr)
{
if (documentValidator == null) { throw new ArgumentNullException(nameof(documentValidator)); }
const string methodName = nameof(Validate);
// At runtime, get a Type object for GraphQL.ExecutionResult
var documentValidatorInstanceType = documentValidator.GetType();
try
{
var graphQLAssembly = AppDomain.CurrentDomain
.GetAssemblies()
.Single(a => a.GetName().Name.Equals(GraphQLAssemblyName));
}
catch (Exception ex)
{
// This shouldn't happen because the GraphQL assembly should have been loaded to construct various other types
// profiled app will not continue working as expected without this method
Log.Error(ex, $"Error finding types in the GraphQL assembly.");
throw;
}
Func<object, object, object, object, object, object, object, object> instrumentedMethod;
try
{
instrumentedMethod =
MethodBuilder<Func<object, object, object, object, object, object, object, object>>
.Start(moduleVersionPtr, mdToken, opCode, methodName)
.WithConcreteType(documentValidatorInstanceType)
.WithParameters(originalQuery, schema, document, rules, userContext, inputs)
.WithNamespaceAndNameFilters(
GraphQLValidationResultInterfaceName,
ClrNames.String,
"GraphQL.Types.ISchema",
"GraphQL.Language.AST.Document",
"System.Collections.Generic.IEnumerable`1",
ClrNames.Ignore,
"GraphQL.Inputs")
.Build();
}
catch (Exception ex)
{
Log.ErrorRetrievingMethod(
exception: ex,
moduleVersionPointer: moduleVersionPtr,
mdToken: mdToken,
opCode: opCode,
instrumentedType: GraphQLDocumentValidatorInterfaceName,
methodName: methodName,
instanceType: documentValidator.GetType().AssemblyQualifiedName);
throw;
}
using (var scope = CreateScopeFromValidate(document))
{
try
{
var validationResult = instrumentedMethod(documentValidator, originalQuery, schema, document, rules, userContext, inputs);
RecordExecutionErrorsIfPresent(scope.Span, "GraphQL.Validation.ValidationError", validationResult.GetProperty("Errors").GetValueOrDefault());
return validationResult;
}
catch (Exception ex)
{
scope?.Span.SetException(ex);
throw;
}
}
}
/// <summary>
/// Wrap the original method by adding instrumentation code around it.
/// </summary>
/// <param name="executionStrategy">The instance of GraphQL.Execution.IExecutionStrategy.</param>
/// <param name="context">The execution context of the GraphQL operation.</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
/// <returns>The original method's return value.</returns>
[InterceptMethod(
TargetAssembly = GraphQLAssemblyName,
TargetType = GraphQLExecutionStrategyInterfaceName,
TargetSignatureTypes = new[] { TaskOfGraphQLExecutionResult, "GraphQL.Execution.ExecutionContext" },
TargetMinimumVersion = Major2Minor3,
TargetMaximumVersion = Major2)]
public static object ExecuteAsync(object executionStrategy, object context, int opCode, int mdToken, long moduleVersionPtr)
{
if (executionStrategy == null) { throw new ArgumentNullException(nameof(executionStrategy)); }
const string methodName = nameof(ExecuteAsync);
// At runtime, get a Type object for GraphQL.ExecutionResult
var executionStrategyInstanceType = executionStrategy.GetType();
Type graphQLExecutionResultType;
Type executionStrategyInterfaceType;
try
{
var graphQLAssembly = AppDomain.CurrentDomain.GetAssemblies()
.Single(a => a.GetName().Name.Equals(GraphQLAssemblyName));
graphQLExecutionResultType = graphQLAssembly.GetType(GraphQLExecutionResultName, throwOnError: true);
executionStrategyInterfaceType = graphQLAssembly.GetType(GraphQLExecutionStrategyInterfaceName, throwOnError: true);
}
catch (Exception ex)
{
// This shouldn't happen because the GraphQL assembly should have been loaded to construct various other types
// profiled app will not continue working as expected without this method
Log.Error(ex, "Error finding types in the GraphQL assembly.");
throw;
}
Func<object, object, object> instrumentedMethod;
try
{
instrumentedMethod =
MethodBuilder<Func<object, object, object>>
.Start(moduleVersionPtr, mdToken, opCode, methodName)
.WithConcreteType(executionStrategyInstanceType)
.WithParameters(context)
.WithNamespaceAndNameFilters(ClrNames.GenericTask, "GraphQL.Execution.ExecutionContext")
.Build();
}
catch (Exception ex)
{
Log.ErrorRetrievingMethod(
exception: ex,
moduleVersionPointer: moduleVersionPtr,
mdToken: mdToken,
opCode: opCode,
instrumentedType: GraphQLExecutionStrategyInterfaceName,
methodName: methodName,
instanceType: executionStrategy.GetType().AssemblyQualifiedName);
throw;
}
return AsyncHelper.InvokeGenericTaskDelegate(
owningType: executionStrategyInterfaceType,
taskResultType: graphQLExecutionResultType,
nameOfIntegrationMethod: nameof(CallGraphQLExecuteAsyncInternal),
integrationType: typeof(GraphQLIntegration),
executionStrategy,
context,
instrumentedMethod);
}
private static async Task<T> CallGraphQLExecuteAsyncInternal<T>(
object executionStrategy,
object executionContext,
Func<object, object, object> originalMethod)
{
using (var scope = CreateScopeFromExecuteAsync(executionContext))
{
try
{
var task = (Task<T>)originalMethod(executionStrategy, executionContext);
var executionResult = await task.ConfigureAwait(false);
RecordExecutionErrorsIfPresent(scope.Span, "GraphQL.ExecutionError", executionContext.GetProperty("Errors").GetValueOrDefault());
return executionResult;
}
catch (Exception ex)
{
scope?.Span.SetException(ex);
throw;
}
}
}
private static Scope CreateScopeFromValidate(object document)
{
if (!Tracer.Instance.Settings.IsIntegrationEnabled(IntegrationName))
{
// integration disabled, don't create a scope, skip this trace
return null;
}
Tracer tracer = Tracer.Instance;
string source = document.GetProperty<string>("OriginalQuery")
.GetValueOrDefault();
string serviceName = string.Join("-", tracer.DefaultServiceName, ServiceName);
Scope scope = null;
try
{
scope = tracer.StartActive(ValidateOperationName, serviceName: serviceName);
var span = scope.Span;
span.Type = SpanTypes.GraphQL;
span.SetTag(Tags.GraphQLSource, source);
// set analytics sample rate if enabled
var analyticsSampleRate = tracer.Settings.GetIntegrationAnalyticsSampleRate(IntegrationName, enabledWithGlobalSetting: false);
span.SetMetric(Tags.Analytics, analyticsSampleRate);
}
catch (Exception ex)
{
Log.Error(ex, "Error creating or populating scope.");
}
return scope;
}
private static Scope CreateScopeFromExecuteAsync(object executionContext)
{
if (!Tracer.Instance.Settings.IsIntegrationEnabled(IntegrationName))
{
// integration disabled, don't create a scope, skip this trace
return null;
}
Tracer tracer = Tracer.Instance;
string source = executionContext.GetProperty("Document")
.GetProperty<string>("OriginalQuery")
.GetValueOrDefault();
string operationName = executionContext.GetProperty("Operation")
.GetProperty<string>("Name")
.GetValueOrDefault();
string operationType = executionContext.GetProperty("Operation")
.GetProperty<Enum>("OperationType")
.GetValueOrDefault()
.ToString();
string serviceName = string.Join("-", tracer.DefaultServiceName, ServiceName);
Scope scope = null;
try
{
scope = tracer.StartActive(ExecuteOperationName, serviceName: serviceName);
var span = scope.Span;
span.Type = SpanTypes.GraphQL;
span.ResourceName = $"{operationType} {operationName ?? "operation"}";
span.SetTag(Tags.GraphQLSource, source);
span.SetTag(Tags.GraphQLOperationName, operationName);
span.SetTag(Tags.GraphQLOperationType, operationType);
// set analytics sample rate if enabled
var analyticsSampleRate = tracer.Settings.GetIntegrationAnalyticsSampleRate(IntegrationName, enabledWithGlobalSetting: false);
span.SetMetric(Tags.Analytics, analyticsSampleRate);
}
catch (Exception ex)
{
Log.Error(ex, "Error creating or populating scope.");
}
return scope;
}
private static void RecordExecutionErrorsIfPresent(Span span, string errorType, object executionErrors)
{
var errorCount = executionErrors.GetProperty<int>("Count").GetValueOrDefault();
if (errorCount > 0)
{
span.Error = true;
span.SetTag(Trace.Tags.ErrorMsg, $"{errorCount} error(s)");
span.SetTag(Trace.Tags.ErrorType, errorType);
span.SetTag(Trace.Tags.ErrorStack, ConstructErrorMessage(executionErrors));
}
}
private static string ConstructErrorMessage(object executionErrors)
{
if (executionErrors == null)
{
return string.Empty;
}
var builder = new StringBuilder();
var tab = " ";
builder.AppendLine("errors: [");
var enumerator = executionErrors.CallMethod<IEnumerator<object>>("GetEnumerator").GetValueOrDefault();
if (enumerator != null)
{
try
{
while (enumerator.MoveNext())
{
var executionError = enumerator.GetProperty("Current").GetValueOrDefault();
builder.AppendLine($"{tab}{{");
var message = executionError.GetProperty<string>("Message").GetValueOrDefault();
if (message != null)
{
builder.AppendLine($"{tab + tab}\"message\": \"{message.Replace("\r", "\\r").Replace("\n", "\\n")}\",");
}
var path = executionError.GetProperty<IEnumerable<string>>("Path").GetValueOrDefault();
if (path != null)
{
builder.AppendLine($"{tab + tab}\"path\": \"{string.Join(".", path)}\",");
}
var code = executionError.GetProperty<string>("Code").GetValueOrDefault();
if (code != null)
{
builder.AppendLine($"{tab + tab}\"code\": \"{code}\",");
}
builder.AppendLine($"{tab + tab}\"locations\": [");
var locations = executionError.GetProperty<IEnumerable<object>>("Locations").GetValueOrDefault();
if (locations != null)
{
foreach (var location in locations)
{
var line = location.GetProperty<int>("Line").GetValueOrDefault();
var column = location.GetProperty<int>("Column").GetValueOrDefault();
builder.AppendLine($"{tab + tab + tab}{{");
builder.AppendLine($"{tab + tab + tab + tab}\"line\": {line},");
builder.AppendLine($"{tab + tab + tab + tab}\"column\": {column}");
builder.AppendLine($"{tab + tab + tab}}},");
}
}
builder.AppendLine($"{tab + tab}]");
builder.AppendLine($"{tab}}},");
}
enumerator.Dispose();
}
catch (Exception ex)
{
Log.Error(ex, "Error creating GraphQL error message.");
return "errors: []";
}
}
builder.AppendLine("]");
return builder.ToString();
}
}
}
| 1 | 16,084 | This is also missing in CreateScopeFromExecuteAsync. Can you add that there too? | DataDog-dd-trace-dotnet | .cs |
@@ -30,9 +30,12 @@ public class DateUtils {
}
String date = input.trim().replace('/', '-').replaceAll("( ){2,}+", " ");
+ // remove colon from timezone to avoid differences between Android and Java SimpleDateFormat
+ date = date.replaceAll("([+-]\\d\\d):(\\d\\d)$", "$1$2");
+
// CEST is widely used but not in the "ISO 8601 Time zone" list. Let's hack around.
- date = date.replaceAll("CEST$", "+02:00");
- date = date.replaceAll("CET$", "+01:00");
+ date = date.replaceAll("CEST$", "+0200");
+ date = date.replaceAll("CET$", "+0100");
// some generators use "Sept" for September
date = date.replaceAll("\\bSept\\b", "Sep"); | 1 | package de.danoeh.antennapod.core.util;
import android.content.Context;
import android.util.Log;
import org.apache.commons.lang3.StringUtils;
import java.text.DateFormat;
import java.text.ParsePosition;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.Locale;
import java.util.TimeZone;
/**
* Parses several date formats.
*/
public class DateUtils {
private DateUtils(){}
private static final String TAG = "DateUtils";
private static final TimeZone defaultTimezone = TimeZone.getTimeZone("GMT");
public static Date parse(final String input) {
if (input == null) {
throw new IllegalArgumentException("Date must not be null");
}
String date = input.trim().replace('/', '-').replaceAll("( ){2,}+", " ");
// CEST is widely used but not in the "ISO 8601 Time zone" list. Let's hack around.
date = date.replaceAll("CEST$", "+02:00");
date = date.replaceAll("CET$", "+01:00");
// some generators use "Sept" for September
date = date.replaceAll("\\bSept\\b", "Sep");
// if datetime is more precise than seconds, make sure the value is in ms
if (date.contains(".")) {
int start = date.indexOf('.');
int current = start + 1;
while (current < date.length() && Character.isDigit(date.charAt(current))) {
current++;
}
// even more precise than microseconds: discard further decimal places
if (current - start > 4) {
if (current < date.length() - 1) {
date = date.substring(0, start + 4) + date.substring(current);
} else {
date = date.substring(0, start + 4);
}
// less than 4 decimal places: pad to have a consistent format for the parser
} else if (current - start < 4) {
if (current < date.length() - 1) {
date = date.substring(0, current) + StringUtils.repeat("0", 4 - (current - start)) + date.substring(current);
} else {
date = date.substring(0, current) + StringUtils.repeat("0", 4 - (current - start));
}
}
}
final String[] patterns = {
"dd MMM yy HH:mm:ss Z",
"dd MMM yy HH:mm Z",
"EEE, dd MMM yyyy HH:mm:ss Z",
"EEE, dd MMM yyyy HH:mm:ss",
"EEE, dd MMMM yyyy HH:mm:ss Z",
"EEE, dd MMMM yyyy HH:mm:ss",
"EEEE, dd MMM yyyy HH:mm:ss Z",
"EEEE, dd MMM yy HH:mm:ss Z",
"EEEE, dd MMM yyyy HH:mm:ss",
"EEEE, dd MMM yy HH:mm:ss",
"EEE MMM d HH:mm:ss yyyy",
"EEE, dd MMM yyyy HH:mm Z",
"EEE, dd MMM yyyy HH:mm",
"EEE, dd MMMM yyyy HH:mm Z",
"EEE, dd MMMM yyyy HH:mm",
"EEEE, dd MMM yyyy HH:mm Z",
"EEEE, dd MMM yy HH:mm Z",
"EEEE, dd MMM yyyy HH:mm",
"EEEE, dd MMM yy HH:mm",
"EEE MMM d HH:mm yyyy",
"yyyy-MM-dd'T'HH:mm:ss",
"yyyy-MM-dd'T'HH:mm:ss.SSS Z",
"yyyy-MM-dd'T'HH:mm:ss.SSS",
"yyyy-MM-dd'T'HH:mm:ssZ",
"yyyy-MM-dd'T'HH:mm:ss'Z'",
"yyyy-MM-dd'T'HH:mm:ss.SSSZ",
"yyyy-MM-ddZ",
"yyyy-MM-dd",
"EEE d MMM yyyy HH:mm:ss 'GMT'Z (z)"
};
SimpleDateFormat parser = new SimpleDateFormat("", Locale.US);
parser.setLenient(false);
parser.setTimeZone(defaultTimezone);
ParsePosition pos = new ParsePosition(0);
for (String pattern : patterns) {
parser.applyPattern(pattern);
pos.setIndex(0);
try {
Date result = parser.parse(date, pos);
if (result != null && pos.getIndex() == date.length()) {
return result;
}
} catch (Exception e) {
Log.e(TAG, Log.getStackTraceString(e));
}
}
// if date string starts with a weekday, try parsing date string without it
if (date.matches("^\\w+, .*$")) {
return parse(date.substring(date.indexOf(',') + 1));
}
Log.d(TAG, "Could not parse date string \"" + input + "\" [" + date + "]");
return null;
}
/**
* Takes a string of the form [HH:]MM:SS[.mmm] and converts it to
* milliseconds.
*
* @throws java.lang.NumberFormatException if the number segments contain invalid numbers.
*/
public static long parseTimeString(final String time) {
String[] parts = time.split(":");
long result = 0;
int idx = 0;
if (parts.length == 3) {
// string has hours
result += Integer.parseInt(parts[idx]) * 3600000L;
idx++;
}
if (parts.length >= 2) {
result += Integer.parseInt(parts[idx]) * 60000L;
idx++;
result += (long) (Float.parseFloat(parts[idx]) * 1000L);
}
return result;
}
public static String formatRFC822Date(Date date) {
SimpleDateFormat format = new SimpleDateFormat("dd MMM yy HH:mm:ss Z", Locale.US);
return format.format(date);
}
public static String formatRFC3339Local(Date date) {
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.US);
return format.format(date);
}
public static String formatRFC3339UTC(Date date) {
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'", Locale.US);
format.setTimeZone(defaultTimezone);
return format.format(date);
}
public static String formatAbbrev(final Context context, final Date date) {
if (date == null) {
return "";
}
GregorianCalendar now = new GregorianCalendar();
GregorianCalendar cal = new GregorianCalendar();
cal.setTime(date);
boolean withinLastYear = now.get(Calendar.YEAR) == cal.get(Calendar.YEAR);
int format = android.text.format.DateUtils.FORMAT_ABBREV_ALL;
if (withinLastYear) {
format |= android.text.format.DateUtils.FORMAT_NO_YEAR;
}
return android.text.format.DateUtils.formatDateTime(context, date.getTime(), format);
}
public static String formatForAccessibility(final Context context, final Date date) {
if (date == null) {
return "";
}
return DateFormat.getDateInstance(DateFormat.LONG).format(date);
}
}
| 1 | 18,089 | I hope that this does not break anything... Probably needs detailed beta tests. | AntennaPod-AntennaPod | java |
@@ -1518,6 +1518,17 @@ module RSpec::Core
end
end
+ describe 'recording spec start time (for measuring load)' do
+ it 'returns a time' do
+ expect(config.start_time).to be_an_instance_of ::Time
+ end
+
+ it 'is configurable' do
+ config.start_time = 42
+ expect(config.start_time).to eq 42
+ end
+ end
+
# assigns files_or_directories_to_run and triggers post-processing
# via `files_to_run`.
def assign_files_or_directories_to_run(*value) | 1 | require 'spec_helper'
require 'tmpdir'
module RSpec::Core
RSpec.describe Configuration do
let(:config) { Configuration.new }
let(:exclusion_filter) { config.exclusion_filter.rules }
let(:inclusion_filter) { config.inclusion_filter.rules }
describe '#deprecation_stream' do
it 'defaults to standard error' do
expect($rspec_core_without_stderr_monkey_patch.deprecation_stream).to eq STDERR
end
it 'is configurable' do
io = double 'deprecation io'
config.deprecation_stream = io
expect(config.deprecation_stream).to eq io
end
context 'when the reporter has already been initialized' do
before do
config.reporter
allow(config).to receive(:warn)
end
it 'prints a notice indicating the reconfigured output_stream will be ignored' do
config.deprecation_stream = double("IO")
expect(config).to have_received(:warn).with(/deprecation_stream.*#{__FILE__}:#{__LINE__ - 1}/)
end
it 'does not change the value of `output_stream`' do
value = config.deprecation_stream
config.deprecation_stream = double("IO")
expect(config.deprecation_stream).to equal(value)
end
it 'does not print a warning if set to the value it already has' do
config.deprecation_stream = config.deprecation_stream
expect(config).not_to have_received(:warn)
end
end
end
describe "#output_stream" do
it 'defaults to standard output' do
expect(config.output_stream).to eq $stdout
end
it 'is configurable' do
io = double 'output io'
config.output_stream = io
expect(config.output_stream).to eq io
end
context 'when the reporter has already been initialized' do
before do
config.reporter
allow(config).to receive(:warn)
end
it 'prints a notice indicating the reconfigured output_stream will be ignored' do
config.output_stream = StringIO.new
expect(config).to have_received(:warn).with(/output_stream.*#{__FILE__}:#{__LINE__ - 1}/)
end
it 'does not change the value of `output_stream`' do
config.output_stream = StringIO.new
expect(config.output_stream).to eq($stdout)
end
it 'does not print a warning if set to the value it already has' do
config.output_stream = config.output_stream
expect(config).not_to have_received(:warn)
end
end
end
describe "#requires=" do
include_context "isolate load path mutation"
def absolute_path_to(dir)
File.expand_path("../../../../#{dir}", __FILE__)
end
it 'adds `lib` to the load path' do
lib_dir = absolute_path_to("lib")
$LOAD_PATH.delete(lib_dir)
expect($LOAD_PATH).not_to include(lib_dir)
config.requires = []
expect($LOAD_PATH).to include(lib_dir)
end
it 'adds the configured `default_path` to the load path' do
config.default_path = 'features'
foo_dir = absolute_path_to("features")
expect($LOAD_PATH).not_to include(foo_dir)
config.requires = []
expect($LOAD_PATH).to include(foo_dir)
end
it 'stores the required files' do
expect(config).to receive(:require).with('a/path')
config.requires = ['a/path']
expect(config.requires).to eq ['a/path']
end
context "when `default_path` refers to a file rather than a directory" do
it 'does not add it to the load path' do
config.default_path = 'Rakefile'
config.requires = []
expect($LOAD_PATH).not_to include(match(/Rakefile/))
end
end
end
describe "#load_spec_files" do
it "loads files using load" do
config.files_to_run = ["foo.bar", "blah_spec.rb"]
expect(config).to receive(:load).twice
config.load_spec_files
end
it "loads each file once, even if duplicated in list" do
config.files_to_run = ["a_spec.rb", "a_spec.rb"]
expect(config).to receive(:load).once
config.load_spec_files
end
end
describe "#mock_framework" do
it "defaults to :rspec" do
expect(RSpec::Support).to receive(:require_rspec_core).with('mocking_adapters/rspec')
config.mock_framework
end
end
describe "#mock_framework="do
it "delegates to mock_with" do
expect(config).to receive(:mock_with).with(:rspec)
config.mock_framework = :rspec
end
end
shared_examples "a configurable framework adapter" do |m|
it "yields a config object if the framework_module supports it" do
custom_config = Struct.new(:custom_setting).new
mod = Module.new
allow(mod).to receive_messages(:configuration => custom_config)
config.send m, mod do |mod_config|
mod_config.custom_setting = true
end
expect(custom_config.custom_setting).to be_truthy
end
it "raises if framework module doesn't support configuration" do
mod = Module.new
expect {
config.send m, mod do |mod_config|
end
}.to raise_error(/must respond to `configuration`/)
end
end
describe "#mock_with" do
before { allow(config).to receive(:require) }
it_behaves_like "a configurable framework adapter", :mock_with
it "allows rspec-mocks to be configured with a provided block" do
mod = Module.new
expect(RSpec::Mocks.configuration).to receive(:add_stub_and_should_receive_to).with(mod)
config.mock_with :rspec do |c|
c.add_stub_and_should_receive_to mod
end
end
context "with a module" do
it "sets the mock_framework_adapter to that module" do
mod = Module.new
config.mock_with mod
expect(config.mock_framework).to eq(mod)
end
end
it 'uses the named adapter' do
expect(RSpec::Support).to receive(:require_rspec_core).with('mocking_adapters/mocha')
stub_const("RSpec::Core::MockingAdapters::Mocha", Module.new)
config.mock_with :mocha
end
it "uses the null adapter when given :nothing" do
expect(RSpec::Support).to receive(:require_rspec_core).with('mocking_adapters/null').and_call_original
config.mock_with :nothing
end
it "raises an error when given an unknown key" do
expect {
config.mock_with :crazy_new_mocking_framework_ive_not_yet_heard_of
}.to raise_error(ArgumentError, /unknown mocking framework/i)
end
it "raises an error when given another type of object" do
expect {
config.mock_with Object.new
}.to raise_error(ArgumentError, /unknown mocking framework/i)
end
context 'when there are already some example groups defined' do
before { allow(RSpec::Support).to receive(:require_rspec_core) }
it 'raises an error since this setting must be applied before any groups are defined' do
allow(RSpec.world).to receive(:example_groups).and_return([double.as_null_object])
mocha = stub_const("RSpec::Core::MockingAdapters::Mocha", Module.new)
allow(mocha).to receive_messages(:framework_name => :mocha)
expect {
config.mock_with :mocha
}.to raise_error(/must be configured before any example groups are defined/)
end
it 'does not raise an error if the default `mock_with :rspec` is re-configured' do
config.mock_framework # called by RSpec when configuring the first example group
allow(RSpec.world).to receive(:example_groups).and_return([double.as_null_object])
config.mock_with :rspec
end
it 'does not raise an error if re-setting the same config' do
mocha = stub_const("RSpec::Core::MockingAdapters::Mocha", Module.new)
allow(mocha).to receive_messages(:framework_name => :mocha)
groups = []
allow(RSpec.world).to receive_messages(:example_groups => groups)
config.mock_with :mocha
groups << double.as_null_object
config.mock_with :mocha
end
end
end
describe "#expectation_framework" do
it "defaults to :rspec" do
expect(config).to receive(:require).with('rspec/expectations')
config.expectation_frameworks
end
end
describe "#expectation_framework=" do
it "delegates to expect_with=" do
expect(config).to receive(:expect_with).with(:rspec)
config.expectation_framework = :rspec
end
end
def stub_stdlib_adapter
stub_const("Test::Unit::Assertions", Module.new)
allow(config).to receive(:require).with("test/unit/assertions")
allow(config).to receive(:require).with("rspec/expectations")
allow(config).to receive(:require).
with("rspec/core/stdlib_assertions_adapter").and_call_original
end
describe "#expect_with" do
before do
stub_stdlib_adapter
end
it_behaves_like "a configurable framework adapter", :expect_with
[
[:rspec, 'rspec/expectations'],
[:stdlib, 'test/unit/assertions']
].each do |framework, required_file|
context "with #{framework}" do
it "requires #{required_file}" do
expect(config).to receive(:require).with(required_file)
config.expect_with framework
end
end
end
it "supports multiple calls" do
config.expect_with :rspec
config.expect_with :stdlib
expect(config.expectation_frameworks).to eq [RSpec::Matchers, RSpec::Core::StdlibAssertionsAdapter]
end
it "raises if block given with multiple args" do
expect {
config.expect_with :rspec, :stdlib do |mod_config|
end
}.to raise_error(/expect_with only accepts/)
end
it "raises ArgumentError if framework is not supported" do
expect do
config.expect_with :not_supported
end.to raise_error(ArgumentError)
end
context 'when there are already some example groups defined' do
it 'raises an error since this setting must be applied before any groups are defined' do
allow(RSpec.world).to receive(:example_groups).and_return([double.as_null_object])
expect {
config.expect_with :rspec
}.to raise_error(/must be configured before any example groups are defined/)
end
it 'does not raise an error if the default `expect_with :rspec` is re-configured' do
config.expectation_frameworks # called by RSpec when configuring the first example group
allow(RSpec.world).to receive(:example_groups).and_return([double.as_null_object])
config.expect_with :rspec
end
it 'does not raise an error if re-setting the same config' do
groups = []
allow(RSpec.world).to receive_messages(:example_groups => groups)
config.expect_with :stdlib
groups << double.as_null_object
config.expect_with :stdlib
end
end
end
describe "#expecting_with_rspec?" do
before do
stub_stdlib_adapter
end
it "returns false by default" do
expect(config).not_to be_expecting_with_rspec
end
it "returns true when `expect_with :rspec` has been configured" do
config.expect_with :rspec
expect(config).to be_expecting_with_rspec
end
it "returns true when `expect_with :rspec, :stdlib` has been configured" do
config.expect_with :rspec, :stdlib
expect(config).to be_expecting_with_rspec
end
it "returns true when `expect_with :stdlib, :rspec` has been configured" do
config.expect_with :stdlib, :rspec
expect(config).to be_expecting_with_rspec
end
it "returns false when `expect_with :stdlib` has been configured" do
config.expect_with :stdlib
expect(config).not_to be_expecting_with_rspec
end
end
describe "#files_to_run" do
it "loads files not following pattern if named explicitly" do
assign_files_or_directories_to_run "spec/rspec/core/resources/a_bar.rb"
expect(config.files_to_run).to eq([ "spec/rspec/core/resources/a_bar.rb"])
end
it "prevents repetition of dir when start of the pattern" do
config.pattern = "spec/**/a_spec.rb"
assign_files_or_directories_to_run "spec"
expect(config.files_to_run).to eq(["spec/rspec/core/resources/a_spec.rb"])
end
it "does not prevent repetition of dir when later of the pattern" do
config.pattern = "rspec/**/a_spec.rb"
assign_files_or_directories_to_run "spec"
expect(config.files_to_run).to eq(["spec/rspec/core/resources/a_spec.rb"])
end
it 'reloads when `files_or_directories_to_run` is reassigned' do
config.pattern = "spec/**/a_spec.rb"
config.files_or_directories_to_run = "empty_dir"
expect {
config.files_or_directories_to_run = "spec"
}.to change { config.files_to_run }.
to(["spec/rspec/core/resources/a_spec.rb"])
end
context "with <path>:<line_number>" do
it "overrides inclusion filters set on config" do
config.filter_run_including :foo => :bar
assign_files_or_directories_to_run "path/to/file.rb:37"
expect(inclusion_filter.size).to eq(1)
expect(inclusion_filter[:locations].keys.first).to match(/path\/to\/file\.rb$/)
expect(inclusion_filter[:locations].values.first).to eq([37])
end
it "overrides inclusion filters set before config" do
config.force(:inclusion_filter => {:foo => :bar})
assign_files_or_directories_to_run "path/to/file.rb:37"
expect(inclusion_filter.size).to eq(1)
expect(inclusion_filter[:locations].keys.first).to match(/path\/to\/file\.rb$/)
expect(inclusion_filter[:locations].values.first).to eq([37])
end
it "clears exclusion filters set on config" do
config.exclusion_filter = { :foo => :bar }
assign_files_or_directories_to_run "path/to/file.rb:37"
expect(exclusion_filter).to be_empty,
"expected exclusion filter to be empty:\n#{exclusion_filter}"
end
it "clears exclusion filters set before config" do
config.force(:exclusion_filter => { :foo => :bar })
assign_files_or_directories_to_run "path/to/file.rb:37"
expect(config.exclusion_filter).to be_empty,
"expected exclusion filter to be empty:\n#{config.exclusion_filter}"
end
end
context "with default pattern" do
it "loads files named _spec.rb" do
assign_files_or_directories_to_run "spec/rspec/core/resources"
expect(config.files_to_run).to eq(["spec/rspec/core/resources/a_spec.rb"])
end
it "loads files in Windows", :if => RSpec.world.windows_os? do
assign_files_or_directories_to_run "C:\\path\\to\\project\\spec\\sub\\foo_spec.rb"
expect(config.files_to_run).to eq(["C:/path/to/project/spec/sub/foo_spec.rb"])
end
it "loads files in Windows when directory is specified", :if => RSpec.world.windows_os? do
assign_files_or_directories_to_run "spec\\rspec\\core\\resources"
expect(config.files_to_run).to eq(["spec/rspec/core/resources/a_spec.rb"])
end
end
context "with default default_path" do
it "loads files in the default path when run by rspec" do
allow(config).to receive(:command) { 'rspec' }
assign_files_or_directories_to_run []
expect(config.files_to_run).not_to be_empty
end
it "loads files in the default path when run with DRB (e.g., spork)" do
allow(config).to receive(:command) { 'spork' }
allow(RSpec::Core::Runner).to receive(:running_in_drb?) { true }
assign_files_or_directories_to_run []
expect(config.files_to_run).not_to be_empty
end
it "does not load files in the default path when run by ruby" do
allow(config).to receive(:command) { 'ruby' }
assign_files_or_directories_to_run []
expect(config.files_to_run).to be_empty
end
end
def specify_consistent_ordering_of_files_to_run
allow(File).to receive(:directory?).with('a') { true }
globbed_files = nil
allow(Dir).to receive(:[]).with(/^\{?a/) { globbed_files }
orderings = [
%w[ a/1.rb a/2.rb a/3.rb ],
%w[ a/2.rb a/1.rb a/3.rb ],
%w[ a/3.rb a/2.rb a/1.rb ]
].map do |files|
globbed_files = files
yield
config.files_to_run
end
expect(orderings.uniq.size).to eq(1)
end
context 'when the given directories match the pattern' do
it 'orders the files in a consistent ordering, regardless of the underlying OS ordering' do
specify_consistent_ordering_of_files_to_run do
config.pattern = 'a/*.rb'
assign_files_or_directories_to_run 'a'
end
end
end
context 'when the pattern is given relative to the given directories' do
it 'orders the files in a consistent ordering, regardless of the underlying OS ordering' do
specify_consistent_ordering_of_files_to_run do
config.pattern = '*.rb'
assign_files_or_directories_to_run 'a'
end
end
end
context 'when given multiple file paths' do
it 'orders the files in a consistent ordering, regardless of the given order' do
allow(File).to receive(:directory?) { false } # fake it into thinking these a full file paths
files = ['a/b/c_spec.rb', 'c/b/a_spec.rb']
assign_files_or_directories_to_run(*files)
ordering_1 = config.files_to_run
assign_files_or_directories_to_run(*files.reverse)
ordering_2 = config.files_to_run
expect(ordering_1).to eq(ordering_2)
end
end
end
describe "#pattern" do
context "with single pattern" do
before { config.pattern = "**/*_foo.rb" }
it "loads files following pattern" do
file = File.expand_path(File.dirname(__FILE__) + "/resources/a_foo.rb")
assign_files_or_directories_to_run file
expect(config.files_to_run).to include(file)
end
it "loads files in directories following pattern" do
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
assign_files_or_directories_to_run dir
expect(config.files_to_run).to include("#{dir}/a_foo.rb")
end
it "does not load files in directories not following pattern" do
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
assign_files_or_directories_to_run dir
expect(config.files_to_run).not_to include("#{dir}/a_bar.rb")
end
end
context "with multiple patterns" do
it "supports comma separated values" do
config.pattern = "**/*_foo.rb,**/*_bar.rb"
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
assign_files_or_directories_to_run dir
expect(config.files_to_run).to include("#{dir}/a_foo.rb")
expect(config.files_to_run).to include("#{dir}/a_bar.rb")
end
it "supports comma separated values with spaces" do
config.pattern = "**/*_foo.rb, **/*_bar.rb"
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
assign_files_or_directories_to_run dir
expect(config.files_to_run).to include("#{dir}/a_foo.rb")
expect(config.files_to_run).to include("#{dir}/a_bar.rb")
end
it "supports curly braces glob syntax" do
config.pattern = "**/*_{foo,bar}.rb"
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
assign_files_or_directories_to_run dir
expect(config.files_to_run).to include("#{dir}/a_foo.rb")
expect(config.files_to_run).to include("#{dir}/a_bar.rb")
end
end
context "after files have already been loaded" do
it 'will warn that it will have no effect' do
expect_warning_with_call_site(__FILE__, __LINE__ + 2, /has no effect/)
config.load_spec_files
config.pattern = "rspec/**/*.spec"
end
it 'will not warn if reset is called after load_spec_files' do
config.load_spec_files
config.reset
expect(RSpec).to_not receive(:warning)
config.pattern = "rspec/**/*.spec"
end
end
end
describe "path with line number" do
it "assigns the line number as a location filter" do
assign_files_or_directories_to_run "path/to/a_spec.rb:37"
expect(inclusion_filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [37]}})
end
end
context "with full_description set" do
it "overrides filters" do
config.filter_run :focused => true
config.full_description = "foo"
expect(inclusion_filter).not_to have_key(:focused)
end
it 'is possible to access the full description regular expression' do
config.full_description = "foo"
expect(config.full_description).to eq(/foo/)
end
end
context "without full_description having been set" do
it 'returns nil from #full_description' do
expect(config.full_description).to eq nil
end
end
context "with line number" do
it "assigns the file and line number as a location filter" do
assign_files_or_directories_to_run "path/to/a_spec.rb:37"
expect(inclusion_filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [37]}})
end
it "assigns multiple files with line numbers as location filters" do
assign_files_or_directories_to_run "path/to/a_spec.rb:37", "other_spec.rb:44"
expect(inclusion_filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [37],
File.expand_path("other_spec.rb") => [44]}})
end
it "assigns files with multiple line numbers as location filters" do
assign_files_or_directories_to_run "path/to/a_spec.rb:37", "path/to/a_spec.rb:44"
expect(inclusion_filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [37, 44]}})
end
end
context "with multiple line numbers" do
it "assigns the file and line numbers as a location filter" do
assign_files_or_directories_to_run "path/to/a_spec.rb:1:3:5:7"
expect(inclusion_filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [1,3,5,7]}})
end
end
it "assigns the example name as the filter on description" do
config.full_description = "foo"
expect(inclusion_filter).to eq({:full_description => /foo/})
end
it "assigns the example names as the filter on description if description is an array" do
config.full_description = [ "foo", "bar" ]
expect(inclusion_filter).to eq({:full_description => Regexp.union(/foo/, /bar/)})
end
it 'is possible to access the full description regular expression' do
config.full_description = "foo","bar"
expect(config.full_description).to eq Regexp.union(/foo/,/bar/)
end
describe "#default_path" do
it 'defaults to "spec"' do
expect(config.default_path).to eq('spec')
end
end
describe "#include" do
module InstanceLevelMethods
def you_call_this_a_blt?
"egad man, where's the mayo?!?!?"
end
end
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.include(InstanceLevelMethods, *args)
config.include_or_extend_modules.last.last
end
end
context "with no filter" do
it "includes the given module into each example group" do
RSpec.configure do |c|
c.include(InstanceLevelMethods)
end
group = ExampleGroup.describe('does like, stuff and junk', :magic_key => :include) { }
expect(group).not_to respond_to(:you_call_this_a_blt?)
expect(group.new.you_call_this_a_blt?).to eq("egad man, where's the mayo?!?!?")
end
end
context "with a filter" do
it "includes the given module into each matching example group" do
RSpec.configure do |c|
c.include(InstanceLevelMethods, :magic_key => :include)
end
group = ExampleGroup.describe('does like, stuff and junk', :magic_key => :include) { }
expect(group).not_to respond_to(:you_call_this_a_blt?)
expect(group.new.you_call_this_a_blt?).to eq("egad man, where's the mayo?!?!?")
end
end
end
describe "#extend" do
module ThatThingISentYou
def that_thing
end
end
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.extend(ThatThingISentYou, *args)
config.include_or_extend_modules.last.last
end
end
it "extends the given module into each matching example group" do
RSpec.configure do |c|
c.extend(ThatThingISentYou, :magic_key => :extend)
end
group = ExampleGroup.describe(ThatThingISentYou, :magic_key => :extend) { }
expect(group).to respond_to(:that_thing)
end
end
describe "#run_all_when_everything_filtered?" do
it "defaults to false" do
expect(config.run_all_when_everything_filtered?).to be_falsey
end
it "can be queried with question method" do
config.run_all_when_everything_filtered = true
expect(config.run_all_when_everything_filtered?).to be_truthy
end
end
describe "#color=" do
context "given true" do
before { config.color = true }
context "with config.tty? and output.tty?" do
it "sets color_enabled?" do
output = StringIO.new
config.output_stream = output
config.tty = true
allow(config.output_stream).to receive_messages :tty? => true
expect(config.color_enabled?).to be_truthy
expect(config.color_enabled?(output)).to be_truthy
end
end
context "with config.tty? and !output.tty?" do
it "sets color_enabled?" do
output = StringIO.new
config.output_stream = output
config.tty = true
allow(config.output_stream).to receive_messages :tty? => false
expect(config.color_enabled?).to be_truthy
expect(config.color_enabled?(output)).to be_truthy
end
end
context "with config.tty? and !output.tty?" do
it "does not set color_enabled?" do
output = StringIO.new
config.output_stream = output
config.tty = false
allow(config.output_stream).to receive_messages :tty? => true
expect(config.color_enabled?).to be_truthy
expect(config.color_enabled?(output)).to be_truthy
end
end
context "with !config.tty? and !output.tty?" do
it "does not set color_enabled?" do
output = StringIO.new
config.output_stream = output
config.tty = false
allow(config.output_stream).to receive_messages :tty? => false
expect(config.color_enabled?).to be_falsey
expect(config.color_enabled?(output)).to be_falsey
end
end
context "on windows" do
before do
@original_host = RbConfig::CONFIG['host_os']
RbConfig::CONFIG['host_os'] = 'mingw'
allow(config).to receive(:require)
end
after do
RbConfig::CONFIG['host_os'] = @original_host
end
context "with ANSICON available" do
around(:each) { |e| with_env_vars('ANSICON' => 'ANSICON', &e) }
it "enables colors" do
config.output_stream = StringIO.new
allow(config.output_stream).to receive_messages :tty? => true
config.color = true
expect(config.color).to be_truthy
end
it "leaves output stream intact" do
config.output_stream = $stdout
allow(config).to receive(:require) do |what|
config.output_stream = 'foo' if what =~ /Win32/
end
config.color = true
expect(config.output_stream).to eq($stdout)
end
end
context "with ANSICON NOT available" do
before do
allow_warning
end
it "warns to install ANSICON" do
allow(config).to receive(:require) { raise LoadError }
expect_warning_with_call_site(__FILE__, __LINE__ + 1, /You must use ANSICON/)
config.color = true
end
it "sets color to false" do
allow(config).to receive(:require) { raise LoadError }
config.color = true
expect(config.color).to be_falsey
end
end
end
end
it "prefers incoming cli_args" do
config.output_stream = StringIO.new
allow(config.output_stream).to receive_messages :tty? => true
config.force :color => true
config.color = false
expect(config.color).to be_truthy
end
end
%w[formatter= add_formatter].each do |config_method|
describe "##{config_method}" do
it "delegates to formatters#add" do
expect(config.formatter_loader).to receive(:add).with('these','options')
config.send(config_method,'these','options')
end
end
end
describe "#default_formatter" do
it 'defaults to `progress`' do
expect(config.default_formatter).to eq('progress')
end
it 'remembers changes' do
config.default_formatter = 'doc'
expect(config.default_formatter).to eq('doc')
end
context 'when another formatter has been set' do
it 'does not get used' do
config.default_formatter = 'doc'
config.add_formatter 'progress'
expect(used_formatters).to include(an_instance_of Formatters::ProgressFormatter)
expect(used_formatters).not_to include(an_instance_of Formatters::DocumentationFormatter)
end
end
context 'when no other formatter has been set' do
it 'gets used' do
config.default_formatter = 'doc'
expect(used_formatters).not_to include(an_instance_of Formatters::ProgressFormatter)
expect(used_formatters).to include(an_instance_of Formatters::DocumentationFormatter)
end
end
def used_formatters
config.reporter # to force freezing of formatters
config.formatters
end
end
describe "#filter_run_including" do
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.filter_run_including(*args)
config.inclusion_filter.rules
end
end
it "sets the filter with a hash" do
config.filter_run_including :foo => true
expect(inclusion_filter).to eq( {:foo => true} )
end
it "sets the filter with a symbol" do
config.filter_run_including :foo
expect(inclusion_filter).to eq( {:foo => true} )
end
it "merges with existing filters" do
config.filter_run_including :foo => true
config.filter_run_including :bar => false
expect(inclusion_filter).to eq( {:foo => true, :bar => false} )
end
end
describe "#filter_run_excluding" do
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.filter_run_excluding(*args)
config.exclusion_filter.rules
end
end
it "sets the filter" do
config.filter_run_excluding :foo => true
expect(exclusion_filter).to eq( {:foo => true} )
end
it "sets the filter using a symbol" do
config.filter_run_excluding :foo
expect(exclusion_filter).to eq( {:foo => true} )
end
it "merges with existing filters" do
config.filter_run_excluding :foo => true
config.filter_run_excluding :bar => false
expect(exclusion_filter).to eq( {:foo => true, :bar => false} )
end
end
describe "#inclusion_filter" do
it "returns {} even if set to nil" do
config.inclusion_filter = nil
expect(inclusion_filter).to eq({})
end
end
describe "#inclusion_filter=" do
it "treats symbols as hash keys with true values when told to" do
config.inclusion_filter = :foo
expect(inclusion_filter).to eq( {:foo => true} )
end
it "overrides any inclusion filters set on the command line or in configuration files" do
config.force(:inclusion_filter => { :foo => :bar })
config.inclusion_filter = {:want => :this}
expect(inclusion_filter).to eq( {:want => :this} )
end
end
describe "#exclusion_filter" do
it "returns {} even if set to nil" do
config.exclusion_filter = nil
expect(exclusion_filter).to eq( {} )
end
describe "the default :if filter" do
it "does not exclude a spec with { :if => true } metadata" do
expect(config.exclusion_filter[:if].call(true)).to be_falsey
end
it "excludes a spec with { :if => false } metadata" do
expect(config.exclusion_filter[:if].call(false)).to be_truthy
end
it "excludes a spec with { :if => nil } metadata" do
expect(config.exclusion_filter[:if].call(nil)).to be_truthy
end
end
describe "the default :unless filter" do
it "excludes a spec with { :unless => true } metadata" do
expect(config.exclusion_filter[:unless].call(true)).to be_truthy
end
it "does not exclude a spec with { :unless => false } metadata" do
expect(config.exclusion_filter[:unless].call(false)).to be_falsey
end
it "does not exclude a spec with { :unless => nil } metadata" do
expect(config.exclusion_filter[:unless].call(nil)).to be_falsey
end
end
end
describe "#treat_symbols_as_metadata_keys_with_true_values=" do
it 'is deprecated' do
expect_deprecation_with_call_site(__FILE__, __LINE__ + 1)
config.treat_symbols_as_metadata_keys_with_true_values = true
end
end
describe "#exclusion_filter=" do
it "treats symbols as hash keys with true values when told to" do
config.exclusion_filter = :foo
expect(exclusion_filter).to eq({:foo => true})
end
it "overrides any exclusion filters set on the command line or in configuration files" do
config.force(:exclusion_filter => { :foo => :bar })
config.exclusion_filter = {:want => :this}
expect(exclusion_filter).to eq({:want => :this})
end
end
describe "#full_backtrace=" do
it "doesn't impact other instances of config" do
config_1 = Configuration.new
config_2 = Configuration.new
config_1.full_backtrace = true
expect(config_2.full_backtrace?).to be_falsey
end
end
describe "#backtrace_exclusion_patterns=" do
it "actually receives the new filter values" do
config = Configuration.new
config.backtrace_exclusion_patterns = [/.*/]
expect(config.backtrace_formatter.exclude? "this").to be_truthy
end
end
describe 'full_backtrace' do
it 'returns true when backtrace patterns is empty' do
config.backtrace_exclusion_patterns = []
expect(config.full_backtrace?).to eq true
end
it 'returns false when backtrace patterns isnt empty' do
config.backtrace_exclusion_patterns = [:lib]
expect(config.full_backtrace?).to eq false
end
end
describe "#backtrace_exclusion_patterns" do
it "can be appended to" do
config = Configuration.new
config.backtrace_exclusion_patterns << /.*/
expect(config.backtrace_formatter.exclude? "this").to be_truthy
end
end
describe "#libs=" do
include_context "isolate load path mutation"
it "adds directories to the LOAD_PATH" do
expect($LOAD_PATH).to receive(:unshift).with("a/dir")
config.libs = ["a/dir"]
end
end
describe "libs" do
include_context "isolate load path mutation"
it 'records paths added to the load path' do
config.libs = ["a/dir"]
expect(config.libs).to eq ["a/dir"]
end
end
describe "#add_setting" do
describe "with no modifiers" do
context "with no additional options" do
before do
config.add_setting :custom_option
end
it "defaults to nil" do
expect(config.custom_option).to be_nil
end
it "adds a predicate" do
expect(config.custom_option?).to be_falsey
end
it "can be overridden" do
config.custom_option = "a value"
expect(config.custom_option).to eq("a value")
end
end
context "with :default => 'a value'" do
before do
config.add_setting :custom_option, :default => 'a value'
end
it "defaults to 'a value'" do
expect(config.custom_option).to eq("a value")
end
it "returns true for the predicate" do
expect(config.custom_option?).to be_truthy
end
it "can be overridden with a truthy value" do
config.custom_option = "a new value"
expect(config.custom_option).to eq("a new value")
end
it "can be overridden with nil" do
config.custom_option = nil
expect(config.custom_option).to eq(nil)
end
it "can be overridden with false" do
config.custom_option = false
expect(config.custom_option).to eq(false)
end
end
end
context "with :alias_with => " do
before do
config.add_setting :custom_option, :alias_with => :another_custom_option
end
it "delegates the getter to the other option" do
config.another_custom_option = "this value"
expect(config.custom_option).to eq("this value")
end
it "delegates the setter to the other option" do
config.custom_option = "this value"
expect(config.another_custom_option).to eq("this value")
end
it "delegates the predicate to the other option" do
config.custom_option = true
expect(config.another_custom_option?).to be_truthy
end
end
end
describe "#configure_group" do
it "extends with 'extend'" do
mod = Module.new
group = ExampleGroup.describe("group", :foo => :bar)
config.extend(mod, :foo => :bar)
config.configure_group(group)
expect(group).to be_a(mod)
end
it "extends with 'module'" do
mod = Module.new
group = ExampleGroup.describe("group", :foo => :bar)
config.include(mod, :foo => :bar)
config.configure_group(group)
expect(group.included_modules).to include(mod)
end
it "requires only one matching filter" do
mod = Module.new
group = ExampleGroup.describe("group", :foo => :bar)
config.include(mod, :foo => :bar, :baz => :bam)
config.configure_group(group)
expect(group.included_modules).to include(mod)
end
it "includes each one before deciding whether to include the next" do
mod1 = Module.new do
def self.included(host)
host.metadata[:foo] = :bar
end
end
mod2 = Module.new
group = ExampleGroup.describe("group")
config.include(mod1)
config.include(mod2, :foo => :bar)
config.configure_group(group)
expect(group.included_modules).to include(mod1)
expect(group.included_modules).to include(mod2)
end
module IncludeOrExtendMeOnce
def self.included(host)
raise "included again" if host.instance_methods.include?(:foobar)
host.class_exec { def foobar; end }
end
def self.extended(host)
raise "extended again" if host.respond_to?(:foobar)
def host.foobar; end
end
end
it "doesn't include a module when already included in ancestor" do
config.include(IncludeOrExtendMeOnce, :foo => :bar)
group = ExampleGroup.describe("group", :foo => :bar)
child = group.describe("child")
config.configure_group(group)
config.configure_group(child)
end
it "doesn't extend when ancestor is already extended with same module" do
config.extend(IncludeOrExtendMeOnce, :foo => :bar)
group = ExampleGroup.describe("group", :foo => :bar)
child = group.describe("child")
config.configure_group(group)
config.configure_group(child)
end
end
describe "#alias_example_group_to" do
after do
RSpec::Core::DSL.example_group_aliases.delete(:my_group_method)
RSpec.module_exec do
class << self
undef :my_group_method if method_defined? :my_group_method
end
end
RSpec::Core::ExampleGroup.module_exec do
class << self
undef :my_group_method if method_defined? :my_group_method
end
end
end
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.alias_example_group_to :my_group_method, *args
group = ExampleGroup.my_group_method("a group")
group.metadata
end
end
it "allows adding additional metadata" do
config.alias_example_group_to :my_group_method, { :some => "thing" }
group = ExampleGroup.my_group_method("a group", :another => "thing")
expect(group.metadata).to include(:some => "thing", :another => "thing")
end
it "passes `nil` as the description arg when no args are given" do
config.alias_example_group_to :my_group_method, { :some => "thing" }
group = ExampleGroup.my_group_method
expect(group.metadata).to include(
:description_args => [nil],
:description => "",
:some => "thing"
)
end
context 'when the aliased method is used' do
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.alias_example_group_to :my_group_method
group = ExampleGroup.my_group_method("a group", *args)
group.metadata
end
end
end
end
describe "#alias_example_to" do
it_behaves_like "metadata hash builder" do
after do
RSpec::Core::ExampleGroup.module_exec do
class << self
undef :my_example_method if method_defined? :my_example_method
end
end
end
def metadata_hash(*args)
config.alias_example_to :my_example_method, *args
group = ExampleGroup.describe("group")
example = group.my_example_method("description")
example.metadata
end
end
end
describe "#reset" do
it "clears the reporter" do
expect(config.reporter).not_to be_nil
config.reset
expect(config.instance_variable_get("@reporter")).to be_nil
end
it "clears the formatters" do
config.add_formatter "doc"
config.reset
expect(config.formatters).to be_empty
end
end
describe "#force" do
context "for ordering options" do
let(:list) { [1, 2, 3, 4] }
let(:ordering_strategy) { config.ordering_registry.fetch(:global) }
let(:rng) { RSpec::Core::RandomNumberGenerator.new config.seed }
let(:shuffled) { Ordering::Random.new(config).shuffle list, rng }
specify "CLI `--order defined` takes precedence over `config.order = rand`" do
config.force :order => "defined"
config.order = "rand"
expect(ordering_strategy.order(list)).to eq([1, 2, 3, 4])
end
specify "CLI `--order rand:37` takes precedence over `config.order = defined`" do
config.force :order => "rand:37"
config.order = "defined"
expect(ordering_strategy.order(list)).to eq(shuffled)
end
specify "CLI `--seed 37` forces order and seed" do
config.force :seed => 37
config.order = "defined"
config.seed = 145
expect(ordering_strategy.order(list)).to eq(shuffled)
expect(config.seed).to eq(37)
end
specify "CLI `--order defined` takes precedence over `config.register_ordering(:global)`" do
config.force :order => "defined"
config.register_ordering(:global, &:reverse)
expect(ordering_strategy.order(list)).to eq([1, 2, 3, 4])
end
end
it "forces 'false' value" do
config.add_setting :custom_option
config.custom_option = true
expect(config.custom_option?).to be_truthy
config.force :custom_option => false
expect(config.custom_option?).to be_falsey
config.custom_option = true
expect(config.custom_option?).to be_falsey
end
end
describe '#seed' do
it 'returns the seed as an int' do
config.seed = '123'
expect(config.seed).to eq(123)
end
end
describe "#seed_used?" do
def use_seed_on(registry)
registry.fetch(:random).order([1, 2])
end
it 'returns false if neither ordering registry used the seed' do
expect(config.seed_used?).to be false
end
it 'returns true if the ordering registry used the seed' do
use_seed_on(config.ordering_registry)
expect(config.seed_used?).to be true
end
end
describe '#order=' do
context 'given "random"' do
before do
config.seed = 7654
config.order = 'random'
end
it 'does not change the seed' do
expect(config.seed).to eq(7654)
end
it 'sets up random ordering' do
allow(RSpec).to receive_messages(:configuration => config)
global_ordering = config.ordering_registry.fetch(:global)
expect(global_ordering).to be_an_instance_of(Ordering::Random)
end
end
context 'given "random:123"' do
before { config.order = 'random:123' }
it 'sets seed to 123' do
expect(config.seed).to eq(123)
end
it 'sets up random ordering' do
allow(RSpec).to receive_messages(:configuration => config)
global_ordering = config.ordering_registry.fetch(:global)
expect(global_ordering).to be_an_instance_of(Ordering::Random)
end
end
context 'given "defined"' do
before do
config.order = 'rand:123'
config.order = 'defined'
end
it "does not change the seed" do
expect(config.seed).to eq(123)
end
it 'clears the random ordering' do
allow(RSpec).to receive_messages(:configuration => config)
list = [1, 2, 3, 4]
ordering_strategy = config.ordering_registry.fetch(:global)
expect(ordering_strategy.order(list)).to eq([1, 2, 3, 4])
end
end
end
describe "#register_ordering" do
def register_reverse_ordering
config.register_ordering(:reverse, &:reverse)
end
it 'stores the ordering for later use' do
register_reverse_ordering
list = [1, 2, 3]
strategy = config.ordering_registry.fetch(:reverse)
expect(strategy).to be_a(Ordering::Custom)
expect(strategy.order(list)).to eq([3, 2, 1])
end
it 'can register an ordering object' do
strategy = Object.new
def strategy.order(list)
list.reverse
end
config.register_ordering(:reverse, strategy)
list = [1, 2, 3]
fetched = config.ordering_registry.fetch(:reverse)
expect(fetched).to be(strategy)
expect(fetched.order(list)).to eq([3, 2, 1])
end
end
describe '#warnings' do
around do |example|
original_setting = $VERBOSE
example.run
$VERBOSE = original_setting
end
it "sets verbose to true when true" do
config.warnings = true
expect($VERBOSE).to eq true
end
it "sets verbose to false when true" do
config.warnings = false
expect($VERBOSE).to eq false
end
it 'returns the verbosity setting' do
config.warnings = true
expect(config.warnings?).to eq true
config.warnings = false
expect(config.warnings?).to eq false
end
it 'is loaded from config by #force' do
config.force :warnings => true
expect($VERBOSE).to eq true
end
end
describe "#raise_errors_for_deprecations!" do
it 'causes deprecations to raise errors rather than printing to the deprecation stream' do
config.deprecation_stream = stream = StringIO.new
config.raise_errors_for_deprecations!
expect {
config.reporter.deprecation(:deprecated => "foo", :call_site => "foo.rb:1")
}.to raise_error(RSpec::Core::DeprecationError, /foo is deprecated/)
expect(stream.string).to eq("")
end
end
describe "#expose_current_running_example_as" do
before { stub_const(Configuration::ExposeCurrentExample.name, Module.new) }
it 'exposes the current example via the named method' do
RSpec.configuration.expose_current_running_example_as :the_example
RSpec.configuration.expose_current_running_example_as :another_example_helper
value_1 = value_2 = nil
ExampleGroup.describe "Group" do
it "works" do
value_1 = the_example
value_2 = another_example_helper
end
end.run
expect(value_1).to be_an(RSpec::Core::Example)
expect(value_1.description).to eq("works")
expect(value_2).to be(value_1)
end
end
# assigns files_or_directories_to_run and triggers post-processing
# via `files_to_run`.
def assign_files_or_directories_to_run(*value)
config.files_or_directories_to_run = value
config.files_to_run
end
end
end
| 1 | 12,268 | @JonRowe in what scenario do you see this being manually set? | rspec-rspec-core | rb |
@@ -307,6 +307,10 @@ void nano::block_processor::process_live (nano::block_hash const & hash_a, std::
election.election->try_generate_votes (block_a->hash ());
}
}
+ else
+ {
+ node.active.check_inactive_votes_cache_election (block_a);
+ }
// Announce block contents to the network
if (origin_a == nano::block_origin::local) | 1 | #include <nano/lib/threading.hpp>
#include <nano/lib/timer.hpp>
#include <nano/node/blockprocessor.hpp>
#include <nano/node/election.hpp>
#include <nano/node/node.hpp>
#include <nano/node/websocket.hpp>
#include <nano/secure/blockstore.hpp>
#include <boost/format.hpp>
std::chrono::milliseconds constexpr nano::block_processor::confirmation_request_delay;
nano::block_post_events::~block_post_events ()
{
for (auto const & i : events)
{
i ();
}
}
nano::block_processor::block_processor (nano::node & node_a, nano::write_database_queue & write_database_queue_a) :
next_log (std::chrono::steady_clock::now ()),
node (node_a),
write_database_queue (write_database_queue_a),
state_block_signature_verification (node.checker, node.ledger.network_params.ledger.epochs, node.config, node.logger, node.flags.block_processor_verification_size)
{
state_block_signature_verification.blocks_verified_callback = [this](std::deque<nano::unchecked_info> & items, std::vector<int> const & verifications, std::vector<nano::block_hash> const & hashes, std::vector<nano::signature> const & blocks_signatures) {
this->process_verified_state_blocks (items, verifications, hashes, blocks_signatures);
};
state_block_signature_verification.transition_inactive_callback = [this]() {
if (this->flushing)
{
{
// Prevent a race with condition.wait in block_processor::flush
nano::lock_guard<std::mutex> guard (this->mutex);
}
this->condition.notify_all ();
}
};
}
nano::block_processor::~block_processor ()
{
stop ();
}
void nano::block_processor::stop ()
{
{
nano::lock_guard<std::mutex> lock (mutex);
stopped = true;
}
condition.notify_all ();
state_block_signature_verification.stop ();
}
void nano::block_processor::flush ()
{
node.checker.flush ();
flushing = true;
nano::unique_lock<std::mutex> lock (mutex);
while (!stopped && (have_blocks () || active || state_block_signature_verification.is_active ()))
{
condition.wait (lock);
}
flushing = false;
}
size_t nano::block_processor::size ()
{
nano::unique_lock<std::mutex> lock (mutex);
return (blocks.size () + state_block_signature_verification.size () + forced.size ());
}
bool nano::block_processor::full ()
{
return size () >= node.flags.block_processor_full_size;
}
bool nano::block_processor::half_full ()
{
return size () >= node.flags.block_processor_full_size / 2;
}
void nano::block_processor::add (std::shared_ptr<nano::block> block_a, uint64_t origination)
{
nano::unchecked_info info (block_a, 0, origination, nano::signature_verification::unknown);
add (info);
}
void nano::block_processor::add (nano::unchecked_info const & info_a, const bool push_front_preference_a)
{
debug_assert (!nano::work_validate_entry (*info_a.block));
bool quarter_full (size () > node.flags.block_processor_full_size / 4);
if (info_a.verified == nano::signature_verification::unknown && (info_a.block->type () == nano::block_type::state || info_a.block->type () == nano::block_type::open || !info_a.account.is_zero ()))
{
state_block_signature_verification.add (info_a);
}
else if (push_front_preference_a && !quarter_full)
{
/* Push blocks from unchecked to front of processing deque to keep more operations with unchecked inside of single write transaction.
It's designed to help with realtime blocks traffic if block processor is not performing large task like bootstrap.
If deque is a quarter full then push back to allow other blocks processing. */
{
nano::lock_guard<std::mutex> guard (mutex);
blocks.push_front (info_a);
}
condition.notify_all ();
}
else
{
{
nano::lock_guard<std::mutex> guard (mutex);
blocks.push_back (info_a);
}
condition.notify_all ();
}
}
void nano::block_processor::force (std::shared_ptr<nano::block> block_a)
{
{
nano::lock_guard<std::mutex> lock (mutex);
forced.push_back (block_a);
}
condition.notify_all ();
}
void nano::block_processor::wait_write ()
{
nano::lock_guard<std::mutex> lock (mutex);
awaiting_write = true;
}
void nano::block_processor::process_blocks ()
{
nano::unique_lock<std::mutex> lock (mutex);
while (!stopped)
{
if (!blocks.empty () || !forced.empty ())
{
active = true;
lock.unlock ();
process_batch (lock);
lock.lock ();
active = false;
}
else
{
condition.notify_one ();
condition.wait (lock);
}
}
}
bool nano::block_processor::should_log ()
{
auto result (false);
auto now (std::chrono::steady_clock::now ());
if (next_log < now)
{
next_log = now + (node.config.logging.timing_logging () ? std::chrono::seconds (2) : std::chrono::seconds (15));
result = true;
}
return result;
}
bool nano::block_processor::have_blocks ()
{
debug_assert (!mutex.try_lock ());
return !blocks.empty () || !forced.empty () || state_block_signature_verification.size () != 0;
}
void nano::block_processor::process_verified_state_blocks (std::deque<nano::unchecked_info> & items, std::vector<int> const & verifications, std::vector<nano::block_hash> const & hashes, std::vector<nano::signature> const & blocks_signatures)
{
{
nano::unique_lock<std::mutex> lk (mutex);
for (auto i (0); i < verifications.size (); ++i)
{
debug_assert (verifications[i] == 1 || verifications[i] == 0);
auto & item (items.front ());
if (!item.block->link ().is_zero () && node.ledger.is_epoch_link (item.block->link ()))
{
// Epoch blocks
if (verifications[i] == 1)
{
item.verified = nano::signature_verification::valid_epoch;
blocks.push_back (std::move (item));
}
else
{
// Possible regular state blocks with epoch link (send subtype)
item.verified = nano::signature_verification::unknown;
blocks.push_back (std::move (item));
}
}
else if (verifications[i] == 1)
{
// Non epoch blocks
item.verified = nano::signature_verification::valid;
blocks.push_back (std::move (item));
}
else
{
requeue_invalid (hashes[i], item);
}
items.pop_front ();
}
}
condition.notify_all ();
}
void nano::block_processor::process_batch (nano::unique_lock<std::mutex> & lock_a)
{
auto scoped_write_guard = write_database_queue.wait (nano::writer::process_batch);
block_post_events post_events;
auto transaction (node.store.tx_begin_write ({ tables::accounts, tables::blocks, tables::frontiers, tables::pending, tables::unchecked }, { tables::confirmation_height }));
nano::timer<std::chrono::milliseconds> timer_l;
lock_a.lock ();
timer_l.start ();
// Processing blocks
unsigned number_of_blocks_processed (0), number_of_forced_processed (0);
while ((!blocks.empty () || !forced.empty ()) && (timer_l.before_deadline (node.config.block_processor_batch_max_time) || (number_of_blocks_processed < node.flags.block_processor_batch_size)) && !awaiting_write)
{
if ((blocks.size () + state_block_signature_verification.size () + forced.size () > 64) && should_log ())
{
node.logger.always_log (boost::str (boost::format ("%1% blocks (+ %2% state blocks) (+ %3% forced) in processing queue") % blocks.size () % state_block_signature_verification.size () % forced.size ()));
}
nano::unchecked_info info;
nano::block_hash hash (0);
bool force (false);
if (forced.empty ())
{
info = blocks.front ();
blocks.pop_front ();
hash = info.block->hash ();
}
else
{
info = nano::unchecked_info (forced.front (), 0, nano::seconds_since_epoch (), nano::signature_verification::unknown);
forced.pop_front ();
hash = info.block->hash ();
force = true;
number_of_forced_processed++;
}
lock_a.unlock ();
if (force)
{
auto successor (node.ledger.successor (transaction, info.block->qualified_root ()));
if (successor != nullptr && successor->hash () != hash)
{
// Replace our block with the winner and roll back any dependent blocks
node.logger.always_log (boost::str (boost::format ("Rolling back %1% and replacing with %2%") % successor->hash ().to_string () % hash.to_string ()));
std::vector<std::shared_ptr<nano::block>> rollback_list;
if (node.ledger.rollback (transaction, successor->hash (), rollback_list))
{
node.logger.always_log (nano::severity_level::error, boost::str (boost::format ("Failed to roll back %1% because it or a successor was confirmed") % successor->hash ().to_string ()));
}
else
{
node.logger.always_log (boost::str (boost::format ("%1% blocks rolled back") % rollback_list.size ()));
}
// Deleting from votes cache & wallet work watcher, stop active transaction
for (auto & i : rollback_list)
{
node.history.erase (i->root ());
node.wallets.watcher->remove (*i);
// Stop all rolled back active transactions except initial
if (i->hash () != successor->hash ())
{
node.active.erase (*i);
}
}
}
}
number_of_blocks_processed++;
process_one (transaction, post_events, info);
lock_a.lock ();
}
awaiting_write = false;
lock_a.unlock ();
if (node.config.logging.timing_logging () && number_of_blocks_processed != 0 && timer_l.stop () > std::chrono::milliseconds (100))
{
node.logger.always_log (boost::str (boost::format ("Processed %1% blocks (%2% blocks were forced) in %3% %4%") % number_of_blocks_processed % number_of_forced_processed % timer_l.value ().count () % timer_l.unit ()));
}
}
void nano::block_processor::process_live (nano::block_hash const & hash_a, std::shared_ptr<nano::block> block_a, nano::process_return const & process_return_a, const bool watch_work_a, nano::block_origin const origin_a)
{
// Add to work watcher to prevent dropping the election
if (watch_work_a)
{
node.wallets.watcher->add (block_a);
}
// Start collecting quorum on block
if (watch_work_a || node.ledger.can_vote (node.store.tx_begin_read (), *block_a))
{
auto election = node.active.insert (block_a, process_return_a.previous_balance.number ());
if (election.inserted)
{
election.election->transition_passive ();
}
else if (election.election)
{
election.election->try_generate_votes (block_a->hash ());
}
}
// Announce block contents to the network
if (origin_a == nano::block_origin::local)
{
node.network.flood_block_initial (block_a);
}
else if (!node.flags.disable_block_processor_republishing)
{
node.network.flood_block (block_a, nano::buffer_drop_policy::no_limiter_drop);
}
if (node.websocket_server && node.websocket_server->any_subscriber (nano::websocket::topic::new_unconfirmed_block))
{
node.websocket_server->broadcast (nano::websocket::message_builder ().new_block_arrived (*block_a));
}
}
nano::process_return nano::block_processor::process_one (nano::write_transaction const & transaction_a, block_post_events & events_a, nano::unchecked_info info_a, const bool watch_work_a, nano::block_origin const origin_a)
{
nano::process_return result;
auto block (info_a.block);
auto hash (block->hash ());
result = node.ledger.process (transaction_a, *block, info_a.verified);
switch (result.code)
{
case nano::process_result::progress:
{
release_assert (info_a.account.is_zero () || info_a.account == node.store.block_account_calculated (*block));
if (node.config.logging.ledger_logging ())
{
std::string block_string;
block->serialize_json (block_string, node.config.logging.single_line_record ());
node.logger.try_log (boost::str (boost::format ("Processing block %1%: %2%") % hash.to_string () % block_string));
}
if (info_a.modified > nano::seconds_since_epoch () - 300 && node.block_arrival.recent (hash))
{
events_a.events.emplace_back ([this, hash, block, result, watch_work_a, origin_a]() { process_live (hash, block, result, watch_work_a, origin_a); });
}
queue_unchecked (transaction_a, hash);
break;
}
case nano::process_result::gap_previous:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Gap previous for: %1%") % hash.to_string ()));
}
info_a.verified = result.verified;
if (info_a.modified == 0)
{
info_a.modified = nano::seconds_since_epoch ();
}
nano::unchecked_key unchecked_key (block->previous (), hash);
node.store.unchecked_put (transaction_a, unchecked_key, info_a);
node.gap_cache.add (hash);
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::gap_previous);
break;
}
case nano::process_result::gap_source:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Gap source for: %1%") % hash.to_string ()));
}
info_a.verified = result.verified;
if (info_a.modified == 0)
{
info_a.modified = nano::seconds_since_epoch ();
}
nano::unchecked_key unchecked_key (node.ledger.block_source (transaction_a, *(block)), hash);
node.store.unchecked_put (transaction_a, unchecked_key, info_a);
node.gap_cache.add (hash);
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::gap_source);
break;
}
case nano::process_result::old:
{
if (node.config.logging.ledger_duplicate_logging ())
{
node.logger.try_log (boost::str (boost::format ("Old for: %1%") % hash.to_string ()));
}
process_old (transaction_a, block, origin_a);
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::old);
break;
}
case nano::process_result::bad_signature:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Bad signature for: %1%") % hash.to_string ()));
}
requeue_invalid (hash, info_a);
break;
}
case nano::process_result::negative_spend:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Negative spend for: %1%") % hash.to_string ()));
}
break;
}
case nano::process_result::unreceivable:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Unreceivable for: %1%") % hash.to_string ()));
}
break;
}
case nano::process_result::fork:
{
node.process_fork (transaction_a, block, info_a.modified);
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::fork);
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Fork for: %1% root: %2%") % hash.to_string () % block->root ().to_string ()));
}
break;
}
case nano::process_result::opened_burn_account:
{
node.logger.always_log (boost::str (boost::format ("*** Rejecting open block for burn account ***: %1%") % hash.to_string ()));
break;
}
case nano::process_result::balance_mismatch:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Balance mismatch for: %1%") % hash.to_string ()));
}
break;
}
case nano::process_result::representative_mismatch:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Representative mismatch for: %1%") % hash.to_string ()));
}
break;
}
case nano::process_result::block_position:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Block %1% cannot follow predecessor %2%") % hash.to_string () % block->previous ().to_string ()));
}
break;
}
case nano::process_result::insufficient_work:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Insufficient work for %1% : %2% (difficulty %3%)") % hash.to_string () % nano::to_string_hex (block->block_work ()) % nano::to_string_hex (block->difficulty ())));
}
break;
}
}
return result;
}
nano::process_return nano::block_processor::process_one (nano::write_transaction const & transaction_a, block_post_events & events_a, std::shared_ptr<nano::block> block_a, const bool watch_work_a)
{
nano::unchecked_info info (block_a, block_a->account (), 0, nano::signature_verification::unknown);
auto result (process_one (transaction_a, events_a, info, watch_work_a));
return result;
}
void nano::block_processor::process_old (nano::write_transaction const & transaction_a, std::shared_ptr<nano::block> const & block_a, nano::block_origin const origin_a)
{
// First try to update election difficulty, then attempt to restart an election
if (!node.active.update_difficulty (*block_a) || !node.active.restart (block_a, transaction_a))
{
// Let others know about the difficulty update
if (origin_a == nano::block_origin::local)
{
node.network.flood_block_initial (block_a);
}
}
}
void nano::block_processor::queue_unchecked (nano::write_transaction const & transaction_a, nano::block_hash const & hash_a)
{
auto unchecked_blocks (node.store.unchecked_get (transaction_a, hash_a));
for (auto & info : unchecked_blocks)
{
if (!node.flags.disable_block_processor_unchecked_deletion)
{
node.store.unchecked_del (transaction_a, nano::unchecked_key (hash_a, info.block->hash ()));
}
add (info, true);
}
node.gap_cache.erase (hash_a);
}
void nano::block_processor::requeue_invalid (nano::block_hash const & hash_a, nano::unchecked_info const & info_a)
{
debug_assert (hash_a == info_a.block->hash ());
node.bootstrap_initiator.lazy_requeue (hash_a, info_a.block->previous (), info_a.confirmed);
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (block_processor & block_processor, const std::string & name)
{
size_t blocks_count;
size_t forced_count;
{
nano::lock_guard<std::mutex> guard (block_processor.mutex);
blocks_count = block_processor.blocks.size ();
forced_count = block_processor.forced.size ();
}
auto composite = std::make_unique<container_info_composite> (name);
composite->add_component (collect_container_info (block_processor.state_block_signature_verification, "state_block_signature_verification"));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "blocks", blocks_count, sizeof (decltype (block_processor.blocks)::value_type) }));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "forced", forced_count, sizeof (decltype (block_processor.forced)::value_type) }));
return composite;
}
| 1 | 16,492 | I think the work "trigger" makes more sense than "check" because this is taking an action based on status. | nanocurrency-nano-node | cpp |
@@ -41,7 +41,7 @@ public class AuthHandlerBoot implements BootListener {
RSAKeypair4Auth.INSTANCE.setPrivateKey(rsaKeyPairEntry.getPrivateKey());
RSAKeypair4Auth.INSTANCE.setPublicKey(rsaKeyPairEntry.getPublicKey());
RSAKeypair4Auth.INSTANCE.setPublicKeyEncoded(rsaKeyPairEntry.getPublicKeyEncoded());
- RegistrationManager.INSTANCE.getMicroserviceInstance().getProperties().put(DefinitionConst.INSTANCE_PUBKEY_PRO,
+ RegistrationManager.INSTANCE.getMicroservice().getProperties().put(DefinitionConst.INSTANCE_PUBKEY_PRO,
rsaKeyPairEntry.getPublicKeyEncoded());
}
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb;
import org.apache.servicecomb.core.BootListener;
import org.apache.servicecomb.foundation.common.utils.RSAKeyPairEntry;
import org.apache.servicecomb.foundation.common.utils.RSAUtils;
import org.apache.servicecomb.foundation.token.RSAKeypair4Auth;
import org.apache.servicecomb.registry.RegistrationManager;
import org.apache.servicecomb.registry.definition.DefinitionConst;
import org.springframework.stereotype.Component;
/**
*
* initialize public and private key pair when system boot before registry instance to service center
*
*
*/
@Component
public class AuthHandlerBoot implements BootListener {
@Override
public void onBootEvent(BootEvent event) {
if (EventType.BEFORE_REGISTRY.equals(event.getEventType())) {
RSAKeyPairEntry rsaKeyPairEntry = RSAUtils.generateRSAKeyPair();
RSAKeypair4Auth.INSTANCE.setPrivateKey(rsaKeyPairEntry.getPrivateKey());
RSAKeypair4Auth.INSTANCE.setPublicKey(rsaKeyPairEntry.getPublicKey());
RSAKeypair4Auth.INSTANCE.setPublicKeyEncoded(rsaKeyPairEntry.getPublicKeyEncoded());
RegistrationManager.INSTANCE.getMicroserviceInstance().getProperties().put(DefinitionConst.INSTANCE_PUBKEY_PRO,
rsaKeyPairEntry.getPublicKeyEncoded());
}
}
}
| 1 | 12,441 | add a configuration , default put public key in Microservice, if the configuration is set, then put in Instance. | apache-servicecomb-java-chassis | java |
@@ -20,8 +20,8 @@ import com.google.common.collect.ImmutableList;
public interface OutputView {
public enum Kind {
- ASSIGNMENT,
COMMENT,
+ DEFINITION,
LOOP,
PRINT
} | 1 | /* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.viewmodel;
import com.google.auto.value.AutoValue;
import com.google.common.collect.ImmutableList;
public interface OutputView {
public enum Kind {
ASSIGNMENT,
COMMENT,
LOOP,
PRINT
}
Kind kind();
@AutoValue
abstract class AssignmentView implements OutputView {
public abstract String variableType(); // TODO: Replace with appropriate type type
public abstract String variableName();
public abstract VariableView reference();
public Kind kind() {
return Kind.ASSIGNMENT;
}
public static Builder newBuilder() {
return new AutoValue_OutputView_AssignmentView.Builder();
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder variableType(String val);
public abstract Builder variableName(String val);
public abstract Builder reference(VariableView val);
public abstract AssignmentView build();
}
}
@AutoValue
abstract class CommentView implements OutputView {
public abstract ImmutableList<String> lines();
public Kind kind() {
return Kind.COMMENT;
}
public static Builder newBuilder() {
return new AutoValue_OutputView_CommentView.Builder();
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder lines(ImmutableList<String> val);
public abstract CommentView build();
}
}
@AutoValue
abstract class LoopView implements OutputView {
public abstract String variableType(); // TODO: Replace with appropriate type type
public abstract String variableName();
public abstract VariableView collection();
public abstract ImmutableList<OutputView> body();
public Kind kind() {
return Kind.LOOP;
}
public static Builder newBuilder() {
return new AutoValue_OutputView_LoopView.Builder();
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder variableType(String val);
public abstract Builder variableName(String val);
public abstract Builder collection(VariableView val);
public abstract Builder body(ImmutableList<OutputView> val);
public abstract LoopView build();
}
}
@AutoValue
abstract class PrintView implements OutputView {
public abstract String format();
public abstract ImmutableList<VariableView> args();
public Kind kind() {
return Kind.PRINT;
}
public static Builder newBuilder() {
return new AutoValue_OutputView_PrintView.Builder();
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder format(String val);
public abstract Builder args(ImmutableList<VariableView> val);
public abstract PrintView build();
}
}
@AutoValue
abstract class VariableView {
public abstract String variable();
public abstract ImmutableList<String> accessors();
public static Builder newBuilder() {
return new AutoValue_OutputView_VariableView.Builder();
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder variable(String val);
public abstract Builder accessors(ImmutableList<String> val);
public abstract VariableView build();
}
}
}
| 1 | 25,976 | WDYT about the idea above of making these all verbs that correspond to the input spec? | googleapis-gapic-generator | java |
@@ -77,7 +77,7 @@ import static com.fsck.k9.mail.K9MailLib.PUSH_WAKE_LOCK_TIMEOUT;
* </pre>
*/
public class ImapStore extends RemoteStore {
- public static final String STORE_TYPE = "IMAP";
+ public static final ServerSettings.Type STORE_TYPE = ServerSettings.Type.IMAP;
private static final int IDLE_READ_TIMEOUT_INCREMENT = 5 * 60 * 1000;
private static final int IDLE_FAILURE_COUNT_LIMIT = 10; | 1 |
package com.fsck.k9.mail.store.imap;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.CharacterCodingException;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CodingErrorAction;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.Deque;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import android.net.ConnectivityManager;
import android.os.PowerManager;
import android.text.TextUtils;
import android.util.Log;
import com.fsck.k9.mail.internet.MimeMessageHelper;
import com.fsck.k9.mail.power.TracingPowerManager;
import com.fsck.k9.mail.power.TracingPowerManager.TracingWakeLock;
import com.fsck.k9.mail.AuthType;
import com.fsck.k9.mail.Body;
import com.fsck.k9.mail.ConnectionSecurity;
import com.fsck.k9.mail.FetchProfile;
import com.fsck.k9.mail.Flag;
import com.fsck.k9.mail.Folder;
import com.fsck.k9.mail.K9MailLib;
import com.fsck.k9.mail.Message;
import com.fsck.k9.mail.MessageRetrievalListener;
import com.fsck.k9.mail.MessagingException;
import com.fsck.k9.mail.Part;
import com.fsck.k9.mail.PushReceiver;
import com.fsck.k9.mail.Pusher;
import com.fsck.k9.mail.ServerSettings;
import com.fsck.k9.mail.filter.EOLConvertingOutputStream;
import com.fsck.k9.mail.filter.FixedLengthInputStream;
import com.fsck.k9.mail.internet.MimeBodyPart;
import com.fsck.k9.mail.internet.MimeHeader;
import com.fsck.k9.mail.internet.MimeMessage;
import com.fsck.k9.mail.internet.MimeMultipart;
import com.fsck.k9.mail.internet.MimeUtility;
import com.fsck.k9.mail.ssl.TrustedSocketFactory;
import com.fsck.k9.mail.store.RemoteStore;
import com.fsck.k9.mail.store.StoreConfig;
import com.beetstra.jutf7.CharsetProvider;
import static com.fsck.k9.mail.K9MailLib.LOG_TAG;
import static com.fsck.k9.mail.K9MailLib.PUSH_WAKE_LOCK_TIMEOUT;
/**
* <pre>
* TODO Need to start keeping track of UIDVALIDITY
* TODO Need a default response handler for things like folder updates
* </pre>
*/
public class ImapStore extends RemoteStore {
public static final String STORE_TYPE = "IMAP";
private static final int IDLE_READ_TIMEOUT_INCREMENT = 5 * 60 * 1000;
private static final int IDLE_FAILURE_COUNT_LIMIT = 10;
private static final int MAX_DELAY_TIME = 5 * 60 * 1000; // 5 minutes
private static final int NORMAL_DELAY_TIME = 5000;
private static final String[] EMPTY_STRING_ARRAY = new String[0];
private static final int FETCH_WINDOW_SIZE = 100;
private Set<Flag> mPermanentFlagsIndex = EnumSet.noneOf(Flag.class);
private ConnectivityManager mConnectivityManager;
private String mHost;
private int mPort;
private String mUsername;
private String mPassword;
private String mClientCertificateAlias;
private ConnectionSecurity mConnectionSecurity;
private AuthType mAuthType;
private String mPathPrefix;
private String mCombinedPrefix = null;
private String mPathDelimiter = null;
/**
* Decodes an ImapStore URI.
*
* <p>Possible forms:</p>
* <pre>
* imap://auth:user:password@server:port ConnectionSecurity.NONE
* imap+tls+://auth:user:password@server:port ConnectionSecurity.STARTTLS_REQUIRED
* imap+ssl+://auth:user:password@server:port ConnectionSecurity.SSL_TLS_REQUIRED
* </pre>
*
* @param uri the store uri. NOTE: this method expects the userinfo part of the uri to be
* encoded twice, due to a bug in {@link #createUri(ServerSettings)}.
*/
public static ImapStoreSettings decodeUri(String uri) {
String host;
int port;
ConnectionSecurity connectionSecurity;
AuthType authenticationType = null;
String username = null;
String password = null;
String clientCertificateAlias = null;
String pathPrefix = null;
boolean autoDetectNamespace = true;
URI imapUri;
try {
imapUri = new URI(uri);
} catch (URISyntaxException use) {
throw new IllegalArgumentException("Invalid ImapStore URI", use);
}
String scheme = imapUri.getScheme();
/*
* Currently available schemes are:
* imap
* imap+tls+
* imap+ssl+
*
* The following are obsolete schemes that may be found in pre-existing
* settings from earlier versions or that may be found when imported. We
* continue to recognize them and re-map them appropriately:
* imap+tls
* imap+ssl
*/
if (scheme.equals("imap")) {
connectionSecurity = ConnectionSecurity.NONE;
port = 143;
} else if (scheme.startsWith("imap+tls")) {
connectionSecurity = ConnectionSecurity.STARTTLS_REQUIRED;
port = 143;
} else if (scheme.startsWith("imap+ssl")) {
connectionSecurity = ConnectionSecurity.SSL_TLS_REQUIRED;
port = 993;
} else {
throw new IllegalArgumentException("Unsupported protocol (" + scheme + ")");
}
host = imapUri.getHost();
if (imapUri.getPort() != -1) {
port = imapUri.getPort();
}
if (imapUri.getUserInfo() != null) {
String userinfo = imapUri.getUserInfo();
String[] userInfoParts = userinfo.split(":");
if (userinfo.endsWith(":")) {
// Password is empty. This can only happen after an account was imported.
authenticationType = AuthType.valueOf(userInfoParts[0]);
username = decodeUtf8(userInfoParts[1]);
} else if (userInfoParts.length == 2) {
authenticationType = AuthType.PLAIN;
username = decodeUtf8(userInfoParts[0]);
password = decodeUtf8(userInfoParts[1]);
} else if (userInfoParts.length == 3) {
authenticationType = AuthType.valueOf(userInfoParts[0]);
username = decodeUtf8(userInfoParts[1]);
if (AuthType.EXTERNAL == authenticationType) {
clientCertificateAlias = decodeUtf8(userInfoParts[2]);
} else {
password = decodeUtf8(userInfoParts[2]);
}
}
}
String path = imapUri.getPath();
if (path != null && path.length() > 1) {
// Strip off the leading "/"
String cleanPath = path.substring(1);
if (cleanPath.length() >= 2 && cleanPath.charAt(1) == '|') {
autoDetectNamespace = cleanPath.charAt(0) == '1';
if (!autoDetectNamespace) {
pathPrefix = cleanPath.substring(2);
}
} else {
if (cleanPath.length() > 0) {
pathPrefix = cleanPath;
autoDetectNamespace = false;
}
}
}
return new ImapStoreSettings(host, port, connectionSecurity, authenticationType, username,
password, clientCertificateAlias, autoDetectNamespace, pathPrefix);
}
/**
* Creates an ImapStore URI with the supplied settings.
*
* @param server
* The {@link ServerSettings} object that holds the server settings.
*
* @return An ImapStore URI that holds the same information as the {@code server} parameter.
*
* @see com.fsck.k9.mail.store.StoreConfig#getStoreUri()
* @see ImapStore#decodeUri(String)
*/
public static String createUri(ServerSettings server) {
String userEnc = encodeUtf8(server.username);
String passwordEnc = (server.password != null) ?
encodeUtf8(server.password) : "";
String clientCertificateAliasEnc = (server.clientCertificateAlias != null) ?
encodeUtf8(server.clientCertificateAlias) : "";
String scheme;
switch (server.connectionSecurity) {
case SSL_TLS_REQUIRED:
scheme = "imap+ssl+";
break;
case STARTTLS_REQUIRED:
scheme = "imap+tls+";
break;
default:
case NONE:
scheme = "imap";
break;
}
AuthType authType = server.authenticationType;
String userInfo;
if (authType == AuthType.EXTERNAL) {
userInfo = authType.name() + ":" + userEnc + ":" + clientCertificateAliasEnc;
} else {
userInfo = authType.name() + ":" + userEnc + ":" + passwordEnc;
}
try {
Map<String, String> extra = server.getExtra();
String path;
if (extra != null) {
boolean autoDetectNamespace = Boolean.TRUE.toString().equals(
extra.get(ImapStoreSettings.AUTODETECT_NAMESPACE_KEY));
String pathPrefix = (autoDetectNamespace) ?
null : extra.get(ImapStoreSettings.PATH_PREFIX_KEY);
path = "/" + (autoDetectNamespace ? "1" : "0") + "|" +
((pathPrefix == null) ? "" : pathPrefix);
} else {
path = "/1|";
}
return new URI(scheme, userInfo, server.host, server.port,
path,
null, null).toString();
} catch (URISyntaxException e) {
throw new IllegalArgumentException("Can't create ImapStore URI", e);
}
}
/**
* This class is used to store the decoded contents of an ImapStore URI.
*
* @see ImapStore#decodeUri(String)
*/
public static class ImapStoreSettings extends ServerSettings {
public static final String AUTODETECT_NAMESPACE_KEY = "autoDetectNamespace";
public static final String PATH_PREFIX_KEY = "pathPrefix";
public final boolean autoDetectNamespace;
public final String pathPrefix;
protected ImapStoreSettings(String host, int port, ConnectionSecurity connectionSecurity,
AuthType authenticationType, String username, String password, String clientCertificateAlias,
boolean autodetectNamespace, String pathPrefix) {
super(STORE_TYPE, host, port, connectionSecurity, authenticationType, username,
password, clientCertificateAlias);
this.autoDetectNamespace = autodetectNamespace;
this.pathPrefix = pathPrefix;
}
@Override
public Map<String, String> getExtra() {
Map<String, String> extra = new HashMap<String, String>();
extra.put(AUTODETECT_NAMESPACE_KEY, Boolean.valueOf(autoDetectNamespace).toString());
putIfNotNull(extra, PATH_PREFIX_KEY, pathPrefix);
return extra;
}
@Override
public ServerSettings newPassword(String newPassword) {
return new ImapStoreSettings(host, port, connectionSecurity, authenticationType,
username, newPassword, clientCertificateAlias, autoDetectNamespace, pathPrefix);
}
}
protected static final SimpleDateFormat RFC3501_DATE = new SimpleDateFormat("dd-MMM-yyyy", Locale.US);
private final Deque<ImapConnection> mConnections =
new LinkedList<ImapConnection>();
/**
* Charset used for converting folder names to and from UTF-7 as defined by RFC 3501.
*/
private Charset mModifiedUtf7Charset;
/**
* Cache of ImapFolder objects. ImapFolders are attached to a given folder on the server
* and as long as their associated connection remains open they are reusable between
* requests. This cache lets us make sure we always reuse, if possible, for a given
* folder name.
*/
private final Map<String, ImapFolder> mFolderCache = new HashMap<String, ImapFolder>();
public ImapStore(StoreConfig storeConfig,
TrustedSocketFactory trustedSocketFactory,
ConnectivityManager connectivityManager)
throws MessagingException {
super(storeConfig, trustedSocketFactory);
ImapStoreSettings settings;
try {
settings = decodeUri(storeConfig.getStoreUri());
} catch (IllegalArgumentException e) {
throw new MessagingException("Error while decoding store URI", e);
}
mHost = settings.host;
mPort = settings.port;
mConnectionSecurity = settings.connectionSecurity;
mConnectivityManager = connectivityManager;
mAuthType = settings.authenticationType;
mUsername = settings.username;
mPassword = settings.password;
mClientCertificateAlias = settings.clientCertificateAlias;
// Make extra sure mPathPrefix is null if "auto-detect namespace" is configured
mPathPrefix = (settings.autoDetectNamespace) ? null : settings.pathPrefix;
mModifiedUtf7Charset = new CharsetProvider().charsetForName("X-RFC-3501");
}
@Override
public Folder getFolder(String name) {
ImapFolder folder;
synchronized (mFolderCache) {
folder = mFolderCache.get(name);
if (folder == null) {
folder = new ImapFolder(this, name);
mFolderCache.put(name, folder);
}
}
return folder;
}
private String getCombinedPrefix() {
if (mCombinedPrefix == null) {
if (mPathPrefix != null) {
String tmpPrefix = mPathPrefix.trim();
String tmpDelim = (mPathDelimiter != null ? mPathDelimiter.trim() : "");
if (tmpPrefix.endsWith(tmpDelim)) {
mCombinedPrefix = tmpPrefix;
} else if (tmpPrefix.length() > 0) {
mCombinedPrefix = tmpPrefix + tmpDelim;
} else {
mCombinedPrefix = "";
}
} else {
mCombinedPrefix = "";
}
}
return mCombinedPrefix;
}
@Override
public List <? extends Folder > getPersonalNamespaces(boolean forceListAll) throws MessagingException {
ImapConnection connection = getConnection();
try {
List <? extends Folder > allFolders = listFolders(connection, false);
if (forceListAll || !mStoreConfig.subscribedFoldersOnly()) {
return allFolders;
} else {
List<Folder> resultFolders = new LinkedList<Folder>();
Set<String> subscribedFolderNames = new HashSet<String>();
List <? extends Folder > subscribedFolders = listFolders(connection, true);
for (Folder subscribedFolder : subscribedFolders) {
subscribedFolderNames.add(subscribedFolder.getName());
}
for (Folder folder : allFolders) {
if (subscribedFolderNames.contains(folder.getName())) {
resultFolders.add(folder);
}
}
return resultFolders;
}
} catch (IOException ioe) {
connection.close();
throw new MessagingException("Unable to get folder list.", ioe);
} catch (MessagingException me) {
connection.close();
throw new MessagingException("Unable to get folder list.", me);
} finally {
releaseConnection(connection);
}
}
private List <? extends Folder > listFolders(ImapConnection connection, boolean LSUB) throws IOException, MessagingException {
String commandResponse = LSUB ? "LSUB" : "LIST";
LinkedList<Folder> folders = new LinkedList<Folder>();
List<ImapResponse> responses =
connection.executeSimpleCommand(String.format("%s \"\" %s", commandResponse,
encodeString(getCombinedPrefix() + "*")));
for (ImapResponse response : responses) {
if (ImapResponseParser.equalsIgnoreCase(response.get(0), commandResponse)) {
boolean includeFolder = true;
if (response.size() > 4 || !(response.getObject(3) instanceof String)) {
Log.w(LOG_TAG, "Skipping incorrectly parsed " + commandResponse +
" reply: " + response);
continue;
}
String decodedFolderName;
try {
decodedFolderName = decodeFolderName(response.getString(3));
} catch (CharacterCodingException e) {
Log.w(LOG_TAG, "Folder name not correctly encoded with the UTF-7 variant " +
"as defined by RFC 3501: " + response.getString(3), e);
//TODO: Use the raw name returned by the server for all commands that require
// a folder name. Use the decoded name only for showing it to the user.
// We currently just skip folders with malformed names.
continue;
}
String folder = decodedFolderName;
if (mPathDelimiter == null) {
mPathDelimiter = response.getString(2);
mCombinedPrefix = null;
}
if (folder.equalsIgnoreCase(mStoreConfig.getInboxFolderName())) {
continue;
} else if (folder.equals(mStoreConfig.getOutboxFolderName())) {
/*
* There is a folder on the server with the same name as our local
* outbox. Until we have a good plan to deal with this situation
* we simply ignore the folder on the server.
*/
continue;
} else {
int prefixLength = getCombinedPrefix().length();
if (prefixLength > 0) {
// Strip prefix from the folder name
if (folder.length() >= prefixLength) {
folder = folder.substring(prefixLength);
}
if (!decodedFolderName.equalsIgnoreCase(getCombinedPrefix() + folder)) {
includeFolder = false;
}
}
}
ImapList attributes = response.getList(1);
for (int i = 0, count = attributes.size(); i < count; i++) {
String attribute = attributes.getString(i);
if (attribute.equalsIgnoreCase("\\NoSelect")) {
includeFolder = false;
}
}
if (includeFolder) {
folders.add(getFolder(folder));
}
}
}
folders.add(getFolder(mStoreConfig.getInboxFolderName()));
return folders;
}
/**
* Attempt to auto-configure folders by attributes if the server advertises that capability.
*
* The parsing here is essentially the same as
* {@link #listFolders(ImapConnection, boolean)}; we should try to consolidate
* this at some point. :(
* @param connection IMAP Connection
* @throws IOException uh oh!
* @throws MessagingException uh oh!
*/
private void autoconfigureFolders(final ImapConnection connection) throws IOException, MessagingException {
String commandResponse;
String commandOptions = "";
if (connection.getCapabilities().contains("XLIST")) {
if (K9MailLib.isDebug()) Log.d(LOG_TAG, "Folder auto-configuration: Using XLIST.");
commandResponse = "XLIST";
} else if(connection.getCapabilities().contains("SPECIAL-USE")) {
if (K9MailLib.isDebug()) Log.d(LOG_TAG, "Folder auto-configuration: Using RFC6154/SPECIAL-USE.");
commandResponse = "LIST";
commandOptions = " (SPECIAL-USE)";
} else {
if (K9MailLib.isDebug()) Log.d(LOG_TAG, "No detected folder auto-configuration methods.");
return;
}
final List<ImapResponse> responses =
connection.executeSimpleCommand(String.format("%s%s \"\" %s", commandResponse, commandOptions,
encodeString(getCombinedPrefix() + "*")));
for (ImapResponse response : responses) {
if (ImapResponseParser.equalsIgnoreCase(response.get(0), commandResponse)) {
String decodedFolderName;
try {
decodedFolderName = decodeFolderName(response.getString(3));
} catch (CharacterCodingException e) {
Log.w(LOG_TAG, "Folder name not correctly encoded with the UTF-7 variant " +
"as defined by RFC 3501: " + response.getString(3), e);
// We currently just skip folders with malformed names.
continue;
}
if (mPathDelimiter == null) {
mPathDelimiter = response.getString(2);
mCombinedPrefix = null;
}
ImapList attributes = response.getList(1);
for (int i = 0, count = attributes.size(); i < count; i++) {
String attribute = attributes.getString(i);
if (attribute.equals("\\Drafts")) {
mStoreConfig.setDraftsFolderName(decodedFolderName);
if (K9MailLib.isDebug()) Log.d(LOG_TAG, "Folder auto-configuration detected draft folder: " + decodedFolderName);
} else if (attribute.equals("\\Sent")) {
mStoreConfig.setSentFolderName(decodedFolderName);
if (K9MailLib.isDebug()) Log.d(LOG_TAG, "Folder auto-configuration detected sent folder: " + decodedFolderName);
} else if (attribute.equals("\\Spam") || attribute.equals("\\Junk")) {
//rfc6154 just mentions \Junk
mStoreConfig.setSpamFolderName(decodedFolderName);
if (K9MailLib.isDebug()) Log.d(LOG_TAG, "Folder auto-configuration detected spam folder: " + decodedFolderName);
} else if (attribute.equals("\\Trash")) {
mStoreConfig.setTrashFolderName(decodedFolderName);
if (K9MailLib.isDebug()) Log.d(LOG_TAG, "Folder auto-configuration detected trash folder: " + decodedFolderName);
}
}
}
}
}
@Override
public void checkSettings() throws MessagingException {
try {
ImapConnection connection = new ImapConnection(
new StoreImapSettings(),
mTrustedSocketFactory,
mConnectivityManager);
connection.open();
autoconfigureFolders(connection);
connection.close();
} catch (IOException ioe) {
throw new MessagingException("Unable to connect", ioe);
}
}
private ImapConnection getConnection() throws MessagingException {
synchronized (mConnections) {
ImapConnection connection;
while ((connection = mConnections.poll()) != null) {
try {
connection.executeSimpleCommand("NOOP");
break;
} catch (IOException ioe) {
connection.close();
}
}
if (connection == null) {
connection = new ImapConnection(new StoreImapSettings(),
mTrustedSocketFactory,
mConnectivityManager);
}
return connection;
}
}
private void releaseConnection(ImapConnection connection) {
if (connection != null && connection.isOpen()) {
synchronized (mConnections) {
mConnections.offer(connection);
}
}
}
/**
* Encode a string to be able to use it in an IMAP command.
*
* "A quoted string is a sequence of zero or more 7-bit characters,
* excluding CR and LF, with double quote (<">) characters at each
* end." - Section 4.3, RFC 3501
*
* Double quotes and backslash are escaped by prepending a backslash.
*
* @param str
* The input string (only 7-bit characters allowed).
* @return
* The string encoded as quoted (IMAP) string.
*/
private static String encodeString(String str) {
return "\"" + str.replace("\\", "\\\\").replace("\"", "\\\"") + "\"";
}
private String encodeFolderName(String name) {
ByteBuffer bb = mModifiedUtf7Charset.encode(name);
byte[] b = new byte[bb.limit()];
bb.get(b);
return new String(b, Charset.forName("US-ASCII"));
}
private String decodeFolderName(String name) throws CharacterCodingException {
/*
* Convert the encoded name to US-ASCII, then pass it through the modified UTF-7
* decoder and return the Unicode String.
*/
// Make sure the decoder throws an exception if it encounters an invalid encoding.
CharsetDecoder decoder = mModifiedUtf7Charset.newDecoder().onMalformedInput(CodingErrorAction.REPORT);
CharBuffer cb = decoder.decode(ByteBuffer.wrap(name.getBytes(Charset.forName("US-ASCII"))));
return cb.toString();
}
@Override
public boolean isMoveCapable() {
return true;
}
@Override
public boolean isCopyCapable() {
return true;
}
@Override
public boolean isPushCapable() {
return true;
}
@Override
public boolean isExpungeCapable() {
return true;
}
protected class ImapFolder extends Folder<ImapMessage> {
private String mName;
protected volatile int mMessageCount = -1;
protected volatile long uidNext = -1L;
protected volatile ImapConnection mConnection;
private int mMode;
private volatile boolean mExists;
private ImapStore store = null;
Map<Long, String> msgSeqUidMap = new ConcurrentHashMap<Long, String>();
private boolean mInSearch = false;
public ImapFolder(ImapStore nStore, String name) {
super();
store = nStore;
this.mName = name;
}
public String getPrefixedName() throws MessagingException {
String prefixedName = "";
if (!mStoreConfig.getInboxFolderName().equalsIgnoreCase(mName)) {
ImapConnection connection;
synchronized (this) {
if (mConnection == null) {
connection = getConnection();
} else {
connection = mConnection;
}
}
try {
connection.open();
} catch (IOException ioe) {
throw new MessagingException("Unable to get IMAP prefix", ioe);
} finally {
if (mConnection == null) {
releaseConnection(connection);
}
}
prefixedName = getCombinedPrefix();
}
prefixedName += mName;
return prefixedName;
}
protected List<ImapResponse> executeSimpleCommand(String command) throws MessagingException, IOException {
return handleUntaggedResponses(mConnection.executeSimpleCommand(command));
}
protected List<ImapResponse> executeSimpleCommand(String command, boolean sensitve, UntaggedHandler untaggedHandler) throws MessagingException, IOException {
return handleUntaggedResponses(mConnection.executeSimpleCommand(command, sensitve, untaggedHandler));
}
@Override
public void open(int mode) throws MessagingException {
internalOpen(mode);
if (mMessageCount == -1) {
throw new MessagingException(
"Did not find message count during open");
}
}
public List<ImapResponse> internalOpen(int mode) throws MessagingException {
if (isOpen() && mMode == mode) {
// Make sure the connection is valid. If it's not we'll close it down and continue
// on to get a new one.
try {
return executeSimpleCommand("NOOP");
} catch (IOException ioe) {
/* don't throw */ ioExceptionHandler(mConnection, ioe);
}
}
releaseConnection(mConnection);
synchronized (this) {
mConnection = getConnection();
}
// * FLAGS (\Answered \Flagged \Deleted \Seen \Draft NonJunk
// $MDNSent)
// * OK [PERMANENTFLAGS (\Answered \Flagged \Deleted \Seen \Draft
// NonJunk $MDNSent \*)] Flags permitted.
// * 23 EXISTS
// * 0 RECENT
// * OK [UIDVALIDITY 1125022061] UIDs valid
// * OK [UIDNEXT 57576] Predicted next UID
// 2 OK [READ-WRITE] Select completed.
try {
msgSeqUidMap.clear();
String command = String.format("%s %s", mode == OPEN_MODE_RW ? "SELECT"
: "EXAMINE", encodeString(encodeFolderName(getPrefixedName())));
List<ImapResponse> responses = executeSimpleCommand(command);
/*
* If the command succeeds we expect the folder has been opened read-write unless we
* are notified otherwise in the responses.
*/
mMode = mode;
for (ImapResponse response : responses) {
if (response.size() >= 2) {
Object bracketedObj = response.get(1);
if (!(bracketedObj instanceof ImapList)) {
continue;
}
ImapList bracketed = (ImapList) bracketedObj;
if (bracketed.isEmpty()) {
continue;
}
ImapList flags = bracketed.getKeyedList("PERMANENTFLAGS");
if (flags != null) {
// parse: * OK [PERMANENTFLAGS (\Answered \Flagged \Deleted
// \Seen \Draft NonJunk $label1 \*)] Flags permitted.
parseFlags(flags);
} else {
Object keyObj = bracketed.get(0);
if (keyObj instanceof String) {
String key = (String) keyObj;
if (response.getTag() != null) {
if ("READ-ONLY".equalsIgnoreCase(key)) {
mMode = OPEN_MODE_RO;
} else if ("READ-WRITE".equalsIgnoreCase(key)) {
mMode = OPEN_MODE_RW;
}
}
}
}
}
}
mExists = true;
return responses;
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
} catch (MessagingException me) {
Log.e(LOG_TAG, "Unable to open connection for " + getLogId(), me);
throw me;
}
}
/**
* Parses an string like PERMANENTFLAGS (\Answered \Flagged \Deleted // \Seen \Draft NonJunk
* $label1 \*)
*
* the parsed flags are stored in the mPermanentFlagsIndex
* @param flags
* the imapflags as strings
*/
private void parseFlags(ImapList flags) {
for (Object flag : flags) {
flag = flag.toString().toLowerCase(Locale.US);
if (flag.equals("\\deleted")) {
mPermanentFlagsIndex.add(Flag.DELETED);
} else if (flag.equals("\\answered")) {
mPermanentFlagsIndex.add(Flag.ANSWERED);
} else if (flag.equals("\\seen")) {
mPermanentFlagsIndex.add(Flag.SEEN);
} else if (flag.equals("\\flagged")) {
mPermanentFlagsIndex.add(Flag.FLAGGED);
} else if (flag.equals("$forwarded")) {
mPermanentFlagsIndex.add(Flag.FORWARDED);
} else if (flag.equals("\\*")) {
mCanCreateKeywords = true;
}
}
}
@Override
public boolean isOpen() {
return mConnection != null;
}
@Override
public int getMode() {
return mMode;
}
@Override
public void close() {
if (mMessageCount != -1) {
mMessageCount = -1;
}
if (!isOpen()) {
return;
}
synchronized (this) {
// If we are mid-search and we get a close request, we gotta trash the connection.
if (mInSearch && mConnection != null) {
Log.i(LOG_TAG, "IMAP search was aborted, shutting down connection.");
mConnection.close();
} else {
releaseConnection(mConnection);
}
mConnection = null;
}
}
@Override
public String getName() {
return mName;
}
/**
* Check if a given folder exists on the server.
*
* @param folderName
* The name of the folder encoded as quoted string.
* See {@link ImapStore#encodeString}
*
* @return
* {@code True}, if the folder exists. {@code False}, otherwise.
*/
private boolean exists(String folderName) throws MessagingException {
try {
// Since we don't care about RECENT, we'll use that for the check, because we're checking
// a folder other than ourself, and don't want any untagged responses to cause a change
// in our own fields
mConnection.executeSimpleCommand(String.format("STATUS %s (RECENT)", folderName));
return true;
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
} catch (ImapException ie) {
// We got a response, but it was not "OK"
return false;
}
}
@Override
public boolean exists() throws MessagingException {
if (mExists) {
return true;
}
/*
* This method needs to operate in the unselected mode as well as the selected mode
* so we must get the connection ourselves if it's not there. We are specifically
* not calling checkOpen() since we don't care if the folder is open.
*/
ImapConnection connection;
synchronized (this) {
if (mConnection == null) {
connection = getConnection();
} else {
connection = mConnection;
}
}
try {
connection.executeSimpleCommand(String.format("STATUS %s (UIDVALIDITY)",
encodeString(encodeFolderName(getPrefixedName()))));
mExists = true;
return true;
} catch (ImapException ie) {
// We got a response, but it was not "OK"
return false;
} catch (IOException ioe) {
throw ioExceptionHandler(connection, ioe);
} finally {
if (mConnection == null) {
releaseConnection(connection);
}
}
}
@Override
public boolean create(FolderType type) throws MessagingException {
/*
* This method needs to operate in the unselected mode as well as the selected mode
* so we must get the connection ourselves if it's not there. We are specifically
* not calling checkOpen() since we don't care if the folder is open.
*/
ImapConnection connection;
synchronized (this) {
if (mConnection == null) {
connection = getConnection();
} else {
connection = mConnection;
}
}
try {
connection.executeSimpleCommand(String.format("CREATE %s",
encodeString(encodeFolderName(getPrefixedName()))));
return true;
} catch (ImapException ie) {
// We got a response, but it was not "OK"
return false;
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
} finally {
if (mConnection == null) {
releaseConnection(connection);
}
}
}
/**
* Copies the given messages to the specified folder.
*
* <p>
* <strong>Note:</strong>
* Only the UIDs of the given {@link Message} instances are used. It is assumed that all
* UIDs represent valid messages in this folder.
* </p>
*
* @param messages
* The messages to copy to the specfied folder.
* @param folder
* The name of the target folder.
*
* @return The mapping of original message UIDs to the new server UIDs.
*/
@Override
public Map<String, String> copyMessages(List<? extends Message> messages, Folder folder)
throws MessagingException {
if (!(folder instanceof ImapFolder)) {
throw new MessagingException("ImapFolder.copyMessages passed non-ImapFolder");
}
if (messages.isEmpty()) {
return null;
}
ImapFolder iFolder = (ImapFolder)folder;
checkOpen(); //only need READ access
String[] uids = new String[messages.size()];
for (int i = 0, count = messages.size(); i < count; i++) {
uids[i] = messages.get(i).getUid();
}
try {
String remoteDestName = encodeString(encodeFolderName(iFolder.getPrefixedName()));
//TODO: Try to copy/move the messages first and only create the folder if the
// operation fails. This will save a roundtrip if the folder already exists.
if (!exists(remoteDestName)) {
/*
* If the remote folder doesn't exist we try to create it.
*/
if (K9MailLib.isDebug()) {
Log.i(LOG_TAG, "ImapFolder.copyMessages: attempting to create remote " +
"folder '" + remoteDestName + "' for " + getLogId());
}
iFolder.create(FolderType.HOLDS_MESSAGES);
}
//TODO: Split this into multiple commands if the command exceeds a certain length.
List<ImapResponse> responses = executeSimpleCommand(String.format("UID COPY %s %s",
combine(uids, ','),
remoteDestName));
// Get the tagged response for the UID COPY command
ImapResponse response = responses.get(responses.size() - 1);
Map<String, String> uidMap = null;
if (response.size() > 1) {
/*
* If the server supports UIDPLUS, then along with the COPY response it will
* return an COPYUID response code, e.g.
*
* 24 OK [COPYUID 38505 304,319:320 3956:3958] Success
*
* COPYUID is followed by UIDVALIDITY, the set of UIDs of copied messages from
* the source folder and the set of corresponding UIDs assigned to them in the
* destination folder.
*
* We can use the new UIDs included in this response to update our records.
*/
Object responseList = response.get(1);
if (responseList instanceof ImapList) {
final ImapList copyList = (ImapList) responseList;
if (copyList.size() >= 4 && copyList.getString(0).equals("COPYUID")) {
List<String> srcUids = ImapUtility.getImapSequenceValues(
copyList.getString(2));
List<String> destUids = ImapUtility.getImapSequenceValues(
copyList.getString(3));
if (srcUids != null && destUids != null) {
if (srcUids.size() == destUids.size()) {
Iterator<String> srcUidsIterator = srcUids.iterator();
Iterator<String> destUidsIterator = destUids.iterator();
uidMap = new HashMap<String, String>();
while (srcUidsIterator.hasNext() &&
destUidsIterator.hasNext()) {
String srcUid = srcUidsIterator.next();
String destUid = destUidsIterator.next();
uidMap.put(srcUid, destUid);
}
} else {
if (K9MailLib.isDebug()) {
Log.v(LOG_TAG, "Parse error: size of source UIDs " +
"list is not the same as size of destination " +
"UIDs list.");
}
}
} else {
if (K9MailLib.isDebug()) {
Log.v(LOG_TAG, "Parsing of the sequence set failed.");
}
}
}
}
}
return uidMap;
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
@Override
public Map<String, String> moveMessages(List<? extends Message> messages, Folder folder) throws MessagingException {
if (messages.isEmpty())
return null;
Map<String, String> uidMap = copyMessages(messages, folder);
setFlags(messages, Collections.singleton(Flag.DELETED), true);
return uidMap;
}
@Override
public void delete(List<? extends Message> messages, String trashFolderName) throws MessagingException {
if (messages.isEmpty())
return;
if (trashFolderName == null || getName().equalsIgnoreCase(trashFolderName)) {
setFlags(messages, Collections.singleton(Flag.DELETED), true);
} else {
ImapFolder remoteTrashFolder = (ImapFolder)getStore().getFolder(trashFolderName);
String remoteTrashName = encodeString(encodeFolderName(remoteTrashFolder.getPrefixedName()));
if (!exists(remoteTrashName)) {
/*
* If the remote trash folder doesn't exist we try to create it.
*/
if (K9MailLib.isDebug())
Log.i(LOG_TAG, "IMAPMessage.delete: attempting to create remote '" + trashFolderName + "' folder for " + getLogId());
remoteTrashFolder.create(FolderType.HOLDS_MESSAGES);
}
if (exists(remoteTrashName)) {
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "IMAPMessage.delete: copying remote " + messages.size() + " messages to '" + trashFolderName + "' for " + getLogId());
moveMessages(messages, remoteTrashFolder);
} else {
throw new MessagingException("IMAPMessage.delete: remote Trash folder " + trashFolderName + " does not exist and could not be created for " + getLogId()
, true);
}
}
}
@Override
public int getMessageCount() {
return mMessageCount;
}
private int getRemoteMessageCount(String criteria) throws MessagingException {
checkOpen(); //only need READ access
try {
int count = 0;
int start = 1;
List<ImapResponse> responses = executeSimpleCommand(String.format(Locale.US, "SEARCH %d:* %s", start, criteria));
for (ImapResponse response : responses) {
if (ImapResponseParser.equalsIgnoreCase(response.get(0), "SEARCH")) {
count += response.size() - 1;
}
}
return count;
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
@Override
public int getUnreadMessageCount() throws MessagingException {
return getRemoteMessageCount("UNSEEN NOT DELETED");
}
@Override
public int getFlaggedMessageCount() throws MessagingException {
return getRemoteMessageCount("FLAGGED NOT DELETED");
}
protected long getHighestUid() {
try {
ImapSearcher searcher = new ImapSearcher() {
@Override
public List<ImapResponse> search() throws IOException, MessagingException {
return executeSimpleCommand("UID SEARCH *:*");
}
};
List<? extends Message> messages = search(searcher, null);
if (messages.size() > 0) {
return Long.parseLong(messages.get(0).getUid());
}
} catch (Exception e) {
Log.e(LOG_TAG, "Unable to find highest UID in folder " + getName(), e);
}
return -1L;
}
@Override
public void delete(boolean recurse) throws MessagingException {
throw new Error("ImapStore.delete() not yet implemented");
}
@Override
public ImapMessage getMessage(String uid) throws MessagingException {
return new ImapMessage(uid, this);
}
@Override
public List<ImapMessage> getMessages(int start, int end, Date earliestDate, MessageRetrievalListener<ImapMessage> listener)
throws MessagingException {
return getMessages(start, end, earliestDate, false, listener);
}
protected List<ImapMessage> getMessages(final int start, final int end, Date earliestDate, final boolean includeDeleted, final MessageRetrievalListener<ImapMessage> listener)
throws MessagingException {
if (start < 1 || end < 1 || end < start) {
throw new MessagingException(
String.format(Locale.US, "Invalid message set %d %d",
start, end));
}
final StringBuilder dateSearchString = new StringBuilder();
if (earliestDate != null) {
dateSearchString.append(" SINCE ");
synchronized (RFC3501_DATE) {
dateSearchString.append(RFC3501_DATE.format(earliestDate));
}
}
ImapSearcher searcher = new ImapSearcher() {
@Override
public List<ImapResponse> search() throws IOException, MessagingException {
return executeSimpleCommand(String.format(Locale.US, "UID SEARCH %d:%d%s%s", start, end, dateSearchString, includeDeleted ? "" : " NOT DELETED"));
}
};
return search(searcher, listener);
}
protected List<ImapMessage> getMessages(final List<Long> mesgSeqs,
final boolean includeDeleted,
final MessageRetrievalListener<ImapMessage> listener)
throws MessagingException {
ImapSearcher searcher = new ImapSearcher() {
@Override
public List<ImapResponse> search() throws IOException, MessagingException {
return executeSimpleCommand(String.format("UID SEARCH %s%s", combine(mesgSeqs.toArray(), ','), includeDeleted ? "" : " NOT DELETED"));
}
};
return search(searcher, listener);
}
protected List<? extends Message> getMessagesFromUids(final List<String> mesgUids,
final boolean includeDeleted,
final MessageRetrievalListener<ImapMessage> listener) throws MessagingException {
ImapSearcher searcher = new ImapSearcher() {
@Override
public List<ImapResponse> search() throws IOException, MessagingException {
return executeSimpleCommand(String.format("UID SEARCH UID %s%s", combine(mesgUids.toArray(), ','), includeDeleted ? "" : " NOT DELETED"));
}
};
return search(searcher, listener);
}
protected List<ImapMessage> search(ImapSearcher searcher, MessageRetrievalListener<ImapMessage> listener) throws MessagingException {
checkOpen(); //only need READ access
List<ImapMessage> messages = new ArrayList<ImapMessage>();
try {
List<Long> uids = new ArrayList<Long>();
List<ImapResponse> responses = searcher.search(); //
for (ImapResponse response : responses) {
if (response.getTag() == null) {
if (ImapResponseParser.equalsIgnoreCase(response.get(0), "SEARCH")) {
for (int i = 1, count = response.size(); i < count; i++) {
uids.add(response.getLong(i));
}
}
}
}
// Sort the uids in numerically decreasing order
// By doing it in decreasing order, we ensure newest messages are dealt with first
// This makes the most sense when a limit is imposed, and also prevents UI from going
// crazy adding stuff at the top.
Collections.sort(uids, Collections.reverseOrder());
for (int i = 0, count = uids.size(); i < count; i++) {
String uid = uids.get(i).toString();
if (listener != null) {
listener.messageStarted(uid, i, count);
}
ImapMessage message = new ImapMessage(uid, this);
messages.add(message);
if (listener != null) {
listener.messageFinished(message, i, count);
}
}
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
return messages;
}
@Override
public List<ImapMessage> getMessages(MessageRetrievalListener<ImapMessage> listener) throws MessagingException {
return getMessages(null, listener);
}
@Override
public List<ImapMessage> getMessages(String[] uids, MessageRetrievalListener<ImapMessage> listener)
throws MessagingException {
checkOpen(); //only need READ access
List<ImapMessage> messages = new ArrayList<ImapMessage>();
try {
if (uids == null) {
List<ImapResponse> responses = executeSimpleCommand("UID SEARCH 1:* NOT DELETED");
List<String> tempUids = new ArrayList<String>();
for (ImapResponse response : responses) {
if (ImapResponseParser.equalsIgnoreCase(response.get(0), "SEARCH")) {
for (int i = 1, count = response.size(); i < count; i++) {
tempUids.add(response.getString(i));
}
}
}
uids = tempUids.toArray(EMPTY_STRING_ARRAY);
}
for (int i = 0, count = uids.length; i < count; i++) {
if (listener != null) {
listener.messageStarted(uids[i], i, count);
}
ImapMessage message = new ImapMessage(uids[i], this);
messages.add(message);
if (listener != null) {
listener.messageFinished(message, i, count);
}
}
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
return messages;
}
@Override
public void fetch(List<ImapMessage> messages, FetchProfile fp, MessageRetrievalListener<ImapMessage> listener)
throws MessagingException {
if (messages == null || messages.isEmpty()) {
return;
}
checkOpen(); //only need READ access
List<String> uids = new ArrayList<String>(messages.size());
HashMap<String, Message> messageMap = new HashMap<String, Message>();
for (Message msg : messages) {
String uid = msg.getUid();
uids.add(uid);
messageMap.put(uid, msg);
}
/*
* Figure out what command we are going to run:
* Flags - UID FETCH (FLAGS)
* Envelope - UID FETCH ([FLAGS] INTERNALDATE UID RFC822.SIZE FLAGS BODY.PEEK[HEADER.FIELDS (date subject from content-type to cc)])
*
*/
Set<String> fetchFields = new LinkedHashSet<String>();
fetchFields.add("UID");
if (fp.contains(FetchProfile.Item.FLAGS)) {
fetchFields.add("FLAGS");
}
if (fp.contains(FetchProfile.Item.ENVELOPE)) {
fetchFields.add("INTERNALDATE");
fetchFields.add("RFC822.SIZE");
fetchFields.add("BODY.PEEK[HEADER.FIELDS (date subject from content-type to cc " +
"reply-to message-id references in-reply-to " + K9MailLib.IDENTITY_HEADER + ")]");
}
if (fp.contains(FetchProfile.Item.STRUCTURE)) {
fetchFields.add("BODYSTRUCTURE");
}
if (fp.contains(FetchProfile.Item.BODY_SANE)) {
// If the user wants to download unlimited-size messages, don't go only for the truncated body
if (mStoreConfig.getMaximumAutoDownloadMessageSize() > 0) {
fetchFields.add(String.format(Locale.US, "BODY.PEEK[]<0.%d>", mStoreConfig.getMaximumAutoDownloadMessageSize()));
} else {
fetchFields.add("BODY.PEEK[]");
}
}
if (fp.contains(FetchProfile.Item.BODY)) {
fetchFields.add("BODY.PEEK[]");
}
for (int windowStart = 0; windowStart < messages.size(); windowStart += (FETCH_WINDOW_SIZE)) {
List<String> uidWindow = uids.subList(windowStart, Math.min((windowStart + FETCH_WINDOW_SIZE), messages.size()));
try {
mConnection.sendCommand(String.format("UID FETCH %s (%s)",
combine(uidWindow.toArray(new String[uidWindow.size()]), ','),
combine(fetchFields.toArray(new String[fetchFields.size()]), ' ')
), false);
ImapResponse response;
int messageNumber = 0;
ImapResponseCallback callback = null;
if (fp.contains(FetchProfile.Item.BODY) || fp.contains(FetchProfile.Item.BODY_SANE)) {
callback = new FetchBodyCallback(messageMap);
}
do {
response = mConnection.readResponse(callback);
if (response.getTag() == null && ImapResponseParser.equalsIgnoreCase(response.get(1), "FETCH")) {
ImapList fetchList = (ImapList)response.getKeyedValue("FETCH");
String uid = fetchList.getKeyedString("UID");
long msgSeq = response.getLong(0);
if (uid != null) {
try {
msgSeqUidMap.put(msgSeq, uid);
if (K9MailLib.isDebug()) {
Log.v(LOG_TAG, "Stored uid '" + uid + "' for msgSeq " + msgSeq + " into map " /*+ msgSeqUidMap.toString() */);
}
} catch (Exception e) {
Log.e(LOG_TAG, "Unable to store uid '" + uid + "' for msgSeq " + msgSeq);
}
}
Message message = messageMap.get(uid);
if (message == null) {
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "Do not have message in messageMap for UID " + uid + " for " + getLogId());
handleUntaggedResponse(response);
continue;
}
if (listener != null) {
listener.messageStarted(uid, messageNumber++, messageMap.size());
}
ImapMessage imapMessage = (ImapMessage) message;
Object literal = handleFetchResponse(imapMessage, fetchList);
if (literal != null) {
if (literal instanceof String) {
String bodyString = (String)literal;
InputStream bodyStream = new ByteArrayInputStream(bodyString.getBytes());
imapMessage.parse(bodyStream);
} else if (literal instanceof Integer) {
// All the work was done in FetchBodyCallback.foundLiteral()
} else {
// This shouldn't happen
throw new MessagingException("Got FETCH response with bogus parameters");
}
}
if (listener != null) {
listener.messageFinished(imapMessage, messageNumber, messageMap.size());
}
} else {
handleUntaggedResponse(response);
}
} while (response.getTag() == null);
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
}
@Override
public void fetchPart(Message message, Part part, MessageRetrievalListener<Message> listener)
throws MessagingException {
checkOpen(); //only need READ access
String[] parts = part.getHeader(MimeHeader.HEADER_ANDROID_ATTACHMENT_STORE_DATA);
if (parts == null) {
return;
}
String fetch;
String partId = parts[0];
if ("TEXT".equalsIgnoreCase(partId)) {
fetch = String.format(Locale.US, "BODY.PEEK[TEXT]<0.%d>",
mStoreConfig.getMaximumAutoDownloadMessageSize());
} else {
fetch = String.format("BODY.PEEK[%s]", partId);
}
try {
mConnection.sendCommand(
String.format("UID FETCH %s (UID %s)", message.getUid(), fetch),
false);
ImapResponse response;
int messageNumber = 0;
ImapResponseCallback callback = new FetchPartCallback(part);
do {
response = mConnection.readResponse(callback);
if ((response.getTag() == null) &&
(ImapResponseParser.equalsIgnoreCase(response.get(1), "FETCH"))) {
ImapList fetchList = (ImapList)response.getKeyedValue("FETCH");
String uid = fetchList.getKeyedString("UID");
if (!message.getUid().equals(uid)) {
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "Did not ask for UID " + uid + " for " + getLogId());
handleUntaggedResponse(response);
continue;
}
if (listener != null) {
listener.messageStarted(uid, messageNumber++, 1);
}
ImapMessage imapMessage = (ImapMessage) message;
Object literal = handleFetchResponse(imapMessage, fetchList);
if (literal != null) {
if (literal instanceof Body) {
// Most of the work was done in FetchAttchmentCallback.foundLiteral()
MimeMessageHelper.setBody(part, (Body) literal);
} else if (literal instanceof String) {
String bodyString = (String)literal;
InputStream bodyStream = new ByteArrayInputStream(bodyString.getBytes());
String contentTransferEncoding = part
.getHeader(MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING)[0];
String contentType = part
.getHeader(MimeHeader.HEADER_CONTENT_TYPE)[0];
MimeMessageHelper.setBody(part, MimeUtility.createBody(bodyStream,
contentTransferEncoding, contentType));
} else {
// This shouldn't happen
throw new MessagingException("Got FETCH response with bogus parameters");
}
}
if (listener != null) {
listener.messageFinished(message, messageNumber, 1);
}
} else {
handleUntaggedResponse(response);
}
} while (response.getTag() == null);
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
// Returns value of body field
private Object handleFetchResponse(ImapMessage message, ImapList fetchList) throws MessagingException {
Object result = null;
if (fetchList.containsKey("FLAGS")) {
ImapList flags = fetchList.getKeyedList("FLAGS");
if (flags != null) {
for (int i = 0, count = flags.size(); i < count; i++) {
String flag = flags.getString(i);
if (flag.equalsIgnoreCase("\\Deleted")) {
message.setFlagInternal(Flag.DELETED, true);
} else if (flag.equalsIgnoreCase("\\Answered")) {
message.setFlagInternal(Flag.ANSWERED, true);
} else if (flag.equalsIgnoreCase("\\Seen")) {
message.setFlagInternal(Flag.SEEN, true);
} else if (flag.equalsIgnoreCase("\\Flagged")) {
message.setFlagInternal(Flag.FLAGGED, true);
} else if (flag.equalsIgnoreCase("$Forwarded")) {
message.setFlagInternal(Flag.FORWARDED, true);
/* a message contains FORWARDED FLAG -> so we can also create them */
mPermanentFlagsIndex.add(Flag.FORWARDED);
}
}
}
}
if (fetchList.containsKey("INTERNALDATE")) {
Date internalDate = fetchList.getKeyedDate("INTERNALDATE");
message.setInternalDate(internalDate);
}
if (fetchList.containsKey("RFC822.SIZE")) {
int size = fetchList.getKeyedNumber("RFC822.SIZE");
message.setSize(size);
}
if (fetchList.containsKey("BODYSTRUCTURE")) {
ImapList bs = fetchList.getKeyedList("BODYSTRUCTURE");
if (bs != null) {
try {
parseBodyStructure(bs, message, "TEXT");
} catch (MessagingException e) {
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "Error handling message for " + getLogId(), e);
message.setBody(null);
}
}
}
if (fetchList.containsKey("BODY")) {
int index = fetchList.getKeyIndex("BODY") + 2;
int size = fetchList.size();
if (index < size) {
result = fetchList.getObject(index);
// Check if there's an origin octet
if (result instanceof String) {
String originOctet = (String) result;
if (originOctet.startsWith("<") && (index + 1) < size) {
result = fetchList.getObject(index + 1);
}
}
}
}
return result;
}
/**
* Handle any untagged responses that the caller doesn't care to handle themselves.
*/
protected List<ImapResponse> handleUntaggedResponses(List<ImapResponse> responses) {
for (ImapResponse response : responses) {
handleUntaggedResponse(response);
}
return responses;
}
protected void handlePossibleUidNext(ImapResponse response) {
if (ImapResponseParser.equalsIgnoreCase(response.get(0), "OK") && response.size() > 1) {
Object bracketedObj = response.get(1);
if (bracketedObj instanceof ImapList) {
ImapList bracketed = (ImapList)bracketedObj;
if (bracketed.size() > 1) {
Object keyObj = bracketed.get(0);
if (keyObj instanceof String) {
String key = (String)keyObj;
if ("UIDNEXT".equalsIgnoreCase(key)) {
uidNext = bracketed.getLong(1);
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "Got UidNext = " + uidNext + " for " + getLogId());
}
}
}
}
}
}
/**
* Handle an untagged response that the caller doesn't care to handle themselves.
*/
protected void handleUntaggedResponse(ImapResponse response) {
if (response.getTag() == null && response.size() > 1) {
if (ImapResponseParser.equalsIgnoreCase(response.get(1), "EXISTS")) {
mMessageCount = response.getNumber(0);
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "Got untagged EXISTS with value " + mMessageCount + " for " + getLogId());
}
handlePossibleUidNext(response);
if (ImapResponseParser.equalsIgnoreCase(response.get(1), "EXPUNGE") && mMessageCount > 0) {
mMessageCount--;
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "Got untagged EXPUNGE with mMessageCount " + mMessageCount + " for " + getLogId());
}
// if (response.size() > 1) {
// Object bracketedObj = response.get(1);
// if (bracketedObj instanceof ImapList)
// {
// ImapList bracketed = (ImapList)bracketedObj;
//
// if (!bracketed.isEmpty())
// {
// Object keyObj = bracketed.get(0);
// if (keyObj instanceof String)
// {
// String key = (String)keyObj;
// if ("ALERT".equalsIgnoreCase(key))
// {
// StringBuilder sb = new StringBuilder();
// for (int i = 2, count = response.size(); i < count; i++) {
// sb.append(response.get(i).toString());
// sb.append(' ');
// }
//
// Log.w(LOG_TAG, "ALERT: " + sb.toString() + " for " + getLogId());
// }
// }
// }
//
//
// }
// }
}
//Log.i(LOG_TAG, "mMessageCount = " + mMessageCount + " for " + getLogId());
}
private void parseBodyStructure(ImapList bs, Part part, String id)
throws MessagingException {
if (bs.get(0) instanceof ImapList) {
/*
* This is a multipart/*
*/
MimeMultipart mp = new MimeMultipart();
for (int i = 0, count = bs.size(); i < count; i++) {
if (bs.get(i) instanceof ImapList) {
/*
* For each part in the message we're going to add a new BodyPart and parse
* into it.
*/
MimeBodyPart bp = new MimeBodyPart();
if (id.equalsIgnoreCase("TEXT")) {
parseBodyStructure(bs.getList(i), bp, Integer.toString(i + 1));
} else {
parseBodyStructure(bs.getList(i), bp, id + "." + (i + 1));
}
mp.addBodyPart(bp);
} else {
/*
* We've got to the end of the children of the part, so now we can find out
* what type it is and bail out.
*/
String subType = bs.getString(i);
mp.setSubType(subType.toLowerCase(Locale.US));
break;
}
}
part.setBody(mp);
} else {
/*
* This is a body. We need to add as much information as we can find out about
* it to the Part.
*/
/*
* 0| 0 body type
* 1| 1 body subtype
* 2| 2 body parameter parenthesized list
* 3| 3 body id (unused)
* 4| 4 body description (unused)
* 5| 5 body encoding
* 6| 6 body size
* -| 7 text lines (only for type TEXT, unused)
* Extensions (optional):
* 7| 8 body MD5 (unused)
* 8| 9 body disposition
* 9|10 body language (unused)
* 10|11 body location (unused)
*/
String type = bs.getString(0);
String subType = bs.getString(1);
String mimeType = (type + "/" + subType).toLowerCase(Locale.US);
ImapList bodyParams = null;
if (bs.get(2) instanceof ImapList) {
bodyParams = bs.getList(2);
}
String encoding = bs.getString(5);
int size = bs.getNumber(6);
if (MimeUtility.mimeTypeMatches(mimeType, "message/rfc822")) {
// A body type of type MESSAGE and subtype RFC822
// contains, immediately after the basic fields, the
// envelope structure, body structure, and size in
// text lines of the encapsulated message.
// [MESSAGE, RFC822, [NAME, Fwd: [#HTR-517941]: update plans at 1am Friday - Memory allocation - displayware.eml], NIL, NIL, 7BIT, 5974, NIL, [INLINE, [FILENAME*0, Fwd: [#HTR-517941]: update plans at 1am Friday - Memory all, FILENAME*1, ocation - displayware.eml]], NIL]
/*
* This will be caught by fetch and handled appropriately.
*/
throw new MessagingException("BODYSTRUCTURE message/rfc822 not yet supported.");
}
/*
* Set the content type with as much information as we know right now.
*/
StringBuilder contentType = new StringBuilder();
contentType.append(mimeType);
if (bodyParams != null) {
/*
* If there are body params we might be able to get some more information out
* of them.
*/
for (int i = 0, count = bodyParams.size(); i < count; i += 2) {
contentType.append(String.format(";\r\n %s=\"%s\"",
bodyParams.getString(i),
bodyParams.getString(i + 1)));
}
}
part.setHeader(MimeHeader.HEADER_CONTENT_TYPE, contentType.toString());
// Extension items
ImapList bodyDisposition = null;
if (("text".equalsIgnoreCase(type))
&& (bs.size() > 9)
&& (bs.get(9) instanceof ImapList)) {
bodyDisposition = bs.getList(9);
} else if (!("text".equalsIgnoreCase(type))
&& (bs.size() > 8)
&& (bs.get(8) instanceof ImapList)) {
bodyDisposition = bs.getList(8);
}
StringBuilder contentDisposition = new StringBuilder();
if (bodyDisposition != null && !bodyDisposition.isEmpty()) {
if (!"NIL".equalsIgnoreCase(bodyDisposition.getString(0))) {
contentDisposition.append(bodyDisposition.getString(0).toLowerCase(Locale.US));
}
if ((bodyDisposition.size() > 1)
&& (bodyDisposition.get(1) instanceof ImapList)) {
ImapList bodyDispositionParams = bodyDisposition.getList(1);
/*
* If there is body disposition information we can pull some more information
* about the attachment out.
*/
for (int i = 0, count = bodyDispositionParams.size(); i < count; i += 2) {
contentDisposition.append(String.format(";\r\n %s=\"%s\"",
bodyDispositionParams.getString(i).toLowerCase(Locale.US),
bodyDispositionParams.getString(i + 1)));
}
}
}
if (MimeUtility.getHeaderParameter(contentDisposition.toString(), "size") == null) {
contentDisposition.append(String.format(Locale.US, ";\r\n size=%d", size));
}
/*
* Set the content disposition containing at least the size. Attachment
* handling code will use this down the road.
*/
part.setHeader(MimeHeader.HEADER_CONTENT_DISPOSITION, contentDisposition.toString());
/*
* Set the Content-Transfer-Encoding header. Attachment code will use this
* to parse the body.
*/
part.setHeader(MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING, encoding);
if (part instanceof ImapMessage) {
((ImapMessage) part).setSize(size);
}
part.setHeader(MimeHeader.HEADER_ANDROID_ATTACHMENT_STORE_DATA, id);
}
}
/**
* Appends the given messages to the selected folder.
*
* <p>
* This implementation also determines the new UIDs of the given messages on the IMAP
* server and changes the messages' UIDs to the new server UIDs.
* </p>
*
* @param messages
* The messages to append to the folder.
*
* @return The mapping of original message UIDs to the new server UIDs.
*/
@Override
public Map<String, String> appendMessages(List<? extends Message> messages) throws MessagingException {
open(OPEN_MODE_RW);
checkOpen();
try {
Map<String, String> uidMap = new HashMap<String, String>();
for (Message message : messages) {
mConnection.sendCommand(
String.format(Locale.US, "APPEND %s (%s) {%d}",
encodeString(encodeFolderName(getPrefixedName())),
combineFlags(message.getFlags()),
message.calculateSize()), false);
ImapResponse response;
do {
response = mConnection.readResponse();
handleUntaggedResponse(response);
if (response.isContinuationRequested()) {
EOLConvertingOutputStream eolOut = new EOLConvertingOutputStream(mConnection.getOutputStream());
message.writeTo(eolOut);
eolOut.write('\r');
eolOut.write('\n');
eolOut.flush();
}
} while (response.getTag() == null);
if (response.size() > 1) {
/*
* If the server supports UIDPLUS, then along with the APPEND response it
* will return an APPENDUID response code, e.g.
*
* 11 OK [APPENDUID 2 238268] APPEND completed
*
* We can use the UID included in this response to update our records.
*/
Object responseList = response.get(1);
if (responseList instanceof ImapList) {
ImapList appendList = (ImapList) responseList;
if (appendList.size() >= 3 &&
appendList.getString(0).equals("APPENDUID")) {
String newUid = appendList.getString(2);
if (!TextUtils.isEmpty(newUid)) {
message.setUid(newUid);
uidMap.put(message.getUid(), newUid);
continue;
}
}
}
}
/*
* This part is executed in case the server does not support UIDPLUS or does
* not implement the APPENDUID response code.
*/
String newUid = getUidFromMessageId(message);
if (K9MailLib.isDebug()) {
Log.d(LOG_TAG, "Got UID " + newUid + " for message for " + getLogId());
}
if (!TextUtils.isEmpty(newUid)) {
uidMap.put(message.getUid(), newUid);
message.setUid(newUid);
}
}
/*
* We need uidMap to be null if new UIDs are not available to maintain consistency
* with the behavior of other similar methods (copyMessages, moveMessages) which
* return null.
*/
return (uidMap.isEmpty()) ? null : uidMap;
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
@Override
public String getUidFromMessageId(Message message) throws MessagingException {
try {
/*
* Try to find the UID of the message we just appended using the
* Message-ID header.
*/
String[] messageIdHeader = message.getHeader("Message-ID");
if (messageIdHeader == null || messageIdHeader.length == 0) {
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "Did not get a message-id in order to search for UID for " + getLogId());
return null;
}
String messageId = messageIdHeader[0];
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "Looking for UID for message with message-id " + messageId + " for " + getLogId());
List<ImapResponse> responses =
executeSimpleCommand(
String.format("UID SEARCH HEADER MESSAGE-ID %s", encodeString(messageId)));
for (ImapResponse response1 : responses) {
if (response1.getTag() == null && ImapResponseParser.equalsIgnoreCase(response1.get(0), "SEARCH")
&& response1.size() > 1) {
return response1.getString(1);
}
}
return null;
} catch (IOException ioe) {
throw new MessagingException("Could not find UID for message based on Message-ID", ioe);
}
}
@Override
public void expunge() throws MessagingException {
open(OPEN_MODE_RW);
checkOpen();
try {
executeSimpleCommand("EXPUNGE");
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
private String combineFlags(Iterable<Flag> flags) {
List<String> flagNames = new ArrayList<String>();
for (Flag flag : flags) {
if (flag == Flag.SEEN) {
flagNames.add("\\Seen");
} else if (flag == Flag.DELETED) {
flagNames.add("\\Deleted");
} else if (flag == Flag.ANSWERED) {
flagNames.add("\\Answered");
} else if (flag == Flag.FLAGGED) {
flagNames.add("\\Flagged");
} else if (flag == Flag.FORWARDED
&& (mCanCreateKeywords || mPermanentFlagsIndex.contains(Flag.FORWARDED))) {
flagNames.add("$Forwarded");
}
}
return combine(flagNames.toArray(new String[flagNames.size()]), ' ');
}
@Override
public void setFlags(Set<Flag> flags, boolean value)
throws MessagingException {
open(OPEN_MODE_RW);
checkOpen();
try {
executeSimpleCommand(String.format("UID STORE 1:* %sFLAGS.SILENT (%s)",
value ? "+" : "-", combineFlags(flags)));
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
@Override
public String getNewPushState(String oldPushStateS, Message message) {
try {
String messageUidS = message.getUid();
long messageUid = Long.parseLong(messageUidS);
ImapPushState oldPushState = ImapPushState.parse(oldPushStateS);
if (messageUid >= oldPushState.uidNext) {
long uidNext = messageUid + 1;
ImapPushState newPushState = new ImapPushState(uidNext);
return newPushState.toString();
} else {
return null;
}
} catch (Exception e) {
Log.e(LOG_TAG, "Exception while updated push state for " + getLogId(), e);
return null;
}
}
@Override
public void setFlags(List<? extends Message> messages, final Set<Flag> flags, boolean value)
throws MessagingException {
open(OPEN_MODE_RW);
checkOpen();
String[] uids = new String[messages.size()];
for (int i = 0, count = messages.size(); i < count; i++) {
uids[i] = messages.get(i).getUid();
}
try {
executeSimpleCommand(String.format("UID STORE %s %sFLAGS.SILENT (%s)",
combine(uids, ','),
value ? "+" : "-",
combineFlags(flags)));
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
private void checkOpen() throws MessagingException {
if (!isOpen()) {
throw new MessagingException("Folder " + getPrefixedName() + " is not open.");
}
}
private MessagingException ioExceptionHandler(ImapConnection connection, IOException ioe) {
Log.e(LOG_TAG, "IOException for " + getLogId(), ioe);
if (connection != null) {
connection.close();
}
close();
return new MessagingException("IO Error", ioe);
}
@Override
public boolean equals(Object o) {
if (o instanceof ImapFolder) {
return ((ImapFolder)o).getName().equalsIgnoreCase(getName());
}
return super.equals(o);
}
@Override
public int hashCode() {
return getName().hashCode();
}
protected ImapStore getStore() {
return store;
}
protected String getLogId() {
String id = mStoreConfig.toString() + ":" + getName() + "/" + Thread.currentThread().getName();
if (mConnection != null) {
id += "/" + mConnection.getLogId();
}
return id;
}
/**
* Search the remote ImapFolder.
* @param queryString String to query for.
* @param requiredFlags Mandatory flags
* @param forbiddenFlags Flags to exclude
* @return List of messages found
* @throws MessagingException On any error.
*/
@Override
public List<ImapMessage> search(final String queryString, final Set<Flag> requiredFlags, final Set<Flag> forbiddenFlags)
throws MessagingException {
if (!mStoreConfig.allowRemoteSearch()) {
throw new MessagingException("Your settings do not allow remote searching of this account");
}
// Setup the searcher
final ImapSearcher searcher = new ImapSearcher() {
@Override
public List<ImapResponse> search() throws IOException, MessagingException {
String imapQuery = "UID SEARCH ";
if (requiredFlags != null) {
for (Flag f : requiredFlags) {
switch (f) {
case DELETED:
imapQuery += "DELETED ";
break;
case SEEN:
imapQuery += "SEEN ";
break;
case ANSWERED:
imapQuery += "ANSWERED ";
break;
case FLAGGED:
imapQuery += "FLAGGED ";
break;
case DRAFT:
imapQuery += "DRAFT ";
break;
case RECENT:
imapQuery += "RECENT ";
break;
default:
break;
}
}
}
if (forbiddenFlags != null) {
for (Flag f : forbiddenFlags) {
switch (f) {
case DELETED:
imapQuery += "UNDELETED ";
break;
case SEEN:
imapQuery += "UNSEEN ";
break;
case ANSWERED:
imapQuery += "UNANSWERED ";
break;
case FLAGGED:
imapQuery += "UNFLAGGED ";
break;
case DRAFT:
imapQuery += "UNDRAFT ";
break;
case RECENT:
imapQuery += "UNRECENT ";
break;
default:
break;
}
}
}
final String encodedQry = encodeString(queryString);
if (mStoreConfig.isRemoteSearchFullText()) {
imapQuery += "TEXT " + encodedQry;
} else {
imapQuery += "OR SUBJECT " + encodedQry + " FROM " + encodedQry;
}
return executeSimpleCommand(imapQuery);
}
};
// Execute the search
try {
open(OPEN_MODE_RO);
checkOpen();
mInSearch = true;
// don't pass listener--we don't want to add messages until we've downloaded them
return search(searcher, null);
} finally {
mInSearch = false;
}
}
}
protected static class ImapMessage extends MimeMessage {
ImapMessage(String uid, Folder folder) {
this.mUid = uid;
this.mFolder = folder;
}
public void setSize(int size) {
this.mSize = size;
}
public void setFlagInternal(Flag flag, boolean set) throws MessagingException {
super.setFlag(flag, set);
}
@Override
public void setFlag(Flag flag, boolean set) throws MessagingException {
super.setFlag(flag, set);
mFolder.setFlags(Collections.singletonList(this), Collections.singleton(flag), set);
}
@Override
public void delete(String trashFolderName) throws MessagingException {
getFolder().delete(Collections.singletonList(this), trashFolderName);
}
}
protected class ImapFolderPusher extends ImapFolder implements UntaggedHandler {
private final PushReceiver receiver;
private Thread listeningThread = null;
private final AtomicBoolean stop = new AtomicBoolean(false);
private final AtomicBoolean idling = new AtomicBoolean(false);
private final AtomicBoolean doneSent = new AtomicBoolean(false);
private final AtomicInteger delayTime = new AtomicInteger(NORMAL_DELAY_TIME);
private final AtomicInteger idleFailureCount = new AtomicInteger(0);
private final AtomicBoolean needsPoll = new AtomicBoolean(false);
private List<ImapResponse> storedUntaggedResponses = new ArrayList<ImapResponse>();
private TracingWakeLock wakeLock = null;
public ImapFolderPusher(ImapStore store, String name, PushReceiver nReceiver) {
super(store, name);
receiver = nReceiver;
TracingPowerManager pm = TracingPowerManager.getPowerManager(receiver.getContext());
wakeLock = pm.newWakeLock(PowerManager.PARTIAL_WAKE_LOCK, "ImapFolderPusher " + mStoreConfig.toString() + ":" + getName());
wakeLock.setReferenceCounted(false);
}
public void refresh() throws IOException, MessagingException {
if (idling.get()) {
wakeLock.acquire(PUSH_WAKE_LOCK_TIMEOUT);
sendDone();
}
}
private void sendDone() throws IOException, MessagingException {
if (doneSent.compareAndSet(false, true)) {
ImapConnection conn = mConnection;
if (conn != null) {
conn.setReadTimeout(SOCKET_READ_TIMEOUT);
sendContinuation("DONE");
}
}
}
private void sendContinuation(String continuation)
throws IOException {
ImapConnection conn = mConnection;
if (conn != null) {
conn.sendContinuation(continuation);
}
}
public void start() {
Runnable runner = new Runnable() {
@Override
public void run() {
wakeLock.acquire(PUSH_WAKE_LOCK_TIMEOUT);
if (K9MailLib.isDebug())
Log.i(LOG_TAG, "Pusher starting for " + getLogId());
long lastUidNext = -1L;
while (!stop.get()) {
try {
long oldUidNext = -1L;
try {
String pushStateS = receiver.getPushState(getName());
ImapPushState pushState = ImapPushState.parse(pushStateS);
oldUidNext = pushState.uidNext;
if (K9MailLib.isDebug())
Log.i(LOG_TAG, "Got oldUidNext " + oldUidNext + " for " + getLogId());
} catch (Exception e) {
Log.e(LOG_TAG, "Unable to get oldUidNext for " + getLogId(), e);
}
/*
* This makes sure 'oldUidNext' is never smaller than 'UIDNEXT' from
* the last loop iteration. This way we avoid looping endlessly causing
* the battery to drain.
*
* See issue 4907
*/
if (oldUidNext < lastUidNext) {
oldUidNext = lastUidNext;
}
ImapConnection oldConnection = mConnection;
internalOpen(OPEN_MODE_RO);
ImapConnection conn = mConnection;
if (conn == null) {
receiver.pushError("Could not establish connection for IDLE", null);
throw new MessagingException("Could not establish connection for IDLE");
}
if (!conn.isIdleCapable()) {
stop.set(true);
receiver.pushError("IMAP server is not IDLE capable: " + conn.toString(), null);
throw new MessagingException("IMAP server is not IDLE capable:" + conn.toString());
}
if (!stop.get() && mStoreConfig.isPushPollOnConnect() && (conn != oldConnection || needsPoll.getAndSet(false))) {
List<ImapResponse> untaggedResponses = new ArrayList<ImapResponse>(storedUntaggedResponses);
storedUntaggedResponses.clear();
processUntaggedResponses(untaggedResponses);
if (mMessageCount == -1) {
throw new MessagingException("Message count = -1 for idling");
}
receiver.syncFolder(ImapFolderPusher.this);
}
if (stop.get()) {
continue;
}
long startUid = oldUidNext;
long newUidNext = uidNext;
if (newUidNext == -1) {
if (K9MailLib.isDebug()) {
Log.d(LOG_TAG, "uidNext is -1, using search to find highest UID");
}
long highestUid = getHighestUid();
if (highestUid != -1L) {
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "highest UID = " + highestUid);
newUidNext = highestUid + 1;
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "highest UID = " + highestUid
+ ", set newUidNext to " + newUidNext);
}
}
if (startUid < newUidNext - mStoreConfig.getDisplayCount()) {
startUid = newUidNext - mStoreConfig.getDisplayCount();
}
if (startUid < 1) {
startUid = 1;
}
lastUidNext = newUidNext;
if (newUidNext > startUid) {
if (K9MailLib.isDebug())
Log.i(LOG_TAG, "Needs sync from uid " + startUid + " to " + newUidNext + " for " + getLogId());
List<Message> messages = new ArrayList<Message>();
for (long uid = startUid; uid < newUidNext; uid++) {
ImapMessage message = new ImapMessage("" + uid, ImapFolderPusher.this);
messages.add(message);
}
if (!messages.isEmpty()) {
pushMessages(messages, true);
}
} else {
List<ImapResponse> untaggedResponses;
while (!storedUntaggedResponses.isEmpty()) {
if (K9MailLib.isDebug())
Log.i(LOG_TAG, "Processing " + storedUntaggedResponses.size() + " untagged responses from previous commands for " + getLogId());
untaggedResponses = new ArrayList<ImapResponse>(storedUntaggedResponses);
storedUntaggedResponses.clear();
processUntaggedResponses(untaggedResponses);
}
if (K9MailLib.isDebug())
Log.i(LOG_TAG, "About to IDLE for " + getLogId());
receiver.setPushActive(getName(), true);
idling.set(true);
doneSent.set(false);
conn.setReadTimeout((mStoreConfig.getIdleRefreshMinutes() * 60 * 1000) + IDLE_READ_TIMEOUT_INCREMENT);
executeSimpleCommand(ImapCommands.COMMAND_IDLE, false, ImapFolderPusher.this);
idling.set(false);
delayTime.set(NORMAL_DELAY_TIME);
idleFailureCount.set(0);
}
} catch (Exception e) {
wakeLock.acquire(PUSH_WAKE_LOCK_TIMEOUT);
storedUntaggedResponses.clear();
idling.set(false);
receiver.setPushActive(getName(), false);
try {
close();
} catch (Exception me) {
Log.e(LOG_TAG, "Got exception while closing for exception for " + getLogId(), me);
}
if (stop.get()) {
Log.i(LOG_TAG, "Got exception while idling, but stop is set for " + getLogId());
} else {
receiver.pushError("Push error for " + getName(), e);
Log.e(LOG_TAG, "Got exception while idling for " + getLogId(), e);
int delayTimeInt = delayTime.get();
receiver.sleep(wakeLock, delayTimeInt);
delayTimeInt *= 2;
if (delayTimeInt > MAX_DELAY_TIME) {
delayTimeInt = MAX_DELAY_TIME;
}
delayTime.set(delayTimeInt);
if (idleFailureCount.incrementAndGet() > IDLE_FAILURE_COUNT_LIMIT) {
Log.e(LOG_TAG, "Disabling pusher for " + getLogId() + " after " + idleFailureCount.get() + " consecutive errors");
receiver.pushError("Push disabled for " + getName() + " after " + idleFailureCount.get() + " consecutive errors", e);
stop.set(true);
}
}
}
}
receiver.setPushActive(getName(), false);
try {
if (K9MailLib.isDebug())
Log.i(LOG_TAG, "Pusher for " + getLogId() + " is exiting");
close();
} catch (Exception me) {
Log.e(LOG_TAG, "Got exception while closing for " + getLogId(), me);
} finally {
wakeLock.release();
}
}
};
listeningThread = new Thread(runner);
listeningThread.start();
}
@Override
protected void handleUntaggedResponse(ImapResponse response) {
if (response.getTag() == null && response.size() > 1) {
Object responseType = response.get(1);
if (ImapResponseParser.equalsIgnoreCase(responseType, "FETCH")
|| ImapResponseParser.equalsIgnoreCase(responseType, "EXPUNGE")
|| ImapResponseParser.equalsIgnoreCase(responseType, "EXISTS")) {
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "Storing response " + response + " for later processing");
storedUntaggedResponses.add(response);
}
handlePossibleUidNext(response);
}
}
protected void processUntaggedResponses(List<ImapResponse> responses) throws MessagingException {
boolean skipSync = false;
int oldMessageCount = mMessageCount;
if (oldMessageCount == -1) {
skipSync = true;
}
List<Long> flagSyncMsgSeqs = new ArrayList<Long>();
List<String> removeMsgUids = new LinkedList<String>();
for (ImapResponse response : responses) {
oldMessageCount += processUntaggedResponse(oldMessageCount, response, flagSyncMsgSeqs, removeMsgUids);
}
if (!skipSync) {
if (oldMessageCount < 0) {
oldMessageCount = 0;
}
if (mMessageCount > oldMessageCount) {
syncMessages(mMessageCount, true);
}
}
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "UIDs for messages needing flag sync are " + flagSyncMsgSeqs + " for " + getLogId());
if (!flagSyncMsgSeqs.isEmpty()) {
syncMessages(flagSyncMsgSeqs);
}
if (!removeMsgUids.isEmpty()) {
removeMessages(removeMsgUids);
}
}
private void syncMessages(int end, boolean newArrivals) throws MessagingException {
long oldUidNext = -1L;
try {
String pushStateS = receiver.getPushState(getName());
ImapPushState pushState = ImapPushState.parse(pushStateS);
oldUidNext = pushState.uidNext;
if (K9MailLib.isDebug())
Log.i(LOG_TAG, "Got oldUidNext " + oldUidNext + " for " + getLogId());
} catch (Exception e) {
Log.e(LOG_TAG, "Unable to get oldUidNext for " + getLogId(), e);
}
List<? extends Message> messageList = getMessages(end, end, null, true, null);
if (messageList != null && messageList.size() > 0) {
long newUid = Long.parseLong(messageList.get(0).getUid());
if (K9MailLib.isDebug())
Log.i(LOG_TAG, "Got newUid " + newUid + " for message " + end + " on " + getLogId());
long startUid = oldUidNext;
if (startUid < newUid - 10) {
startUid = newUid - 10;
}
if (startUid < 1) {
startUid = 1;
}
if (newUid >= startUid) {
if (K9MailLib.isDebug())
Log.i(LOG_TAG, "Needs sync from uid " + startUid + " to " + newUid + " for " + getLogId());
List<Message> messages = new ArrayList<Message>();
for (long uid = startUid; uid <= newUid; uid++) {
ImapMessage message = new ImapMessage(Long.toString(uid), ImapFolderPusher.this);
messages.add(message);
}
if (!messages.isEmpty()) {
pushMessages(messages, true);
}
}
}
}
private void syncMessages(List<Long> flagSyncMsgSeqs) {
try {
List<? extends Message> messageList = getMessages(flagSyncMsgSeqs, true, null);
List<Message> messages = new ArrayList<Message>();
messages.addAll(messageList);
pushMessages(messages, false);
} catch (Exception e) {
receiver.pushError("Exception while processing Push untagged responses", e);
}
}
private void removeMessages(List<String> removeUids) {
List<Message> messages = new ArrayList<Message>(removeUids.size());
try {
List<? extends Message> existingMessages = getMessagesFromUids(removeUids, true, null);
for (Message existingMessage : existingMessages) {
needsPoll.set(true);
msgSeqUidMap.clear();
String existingUid = existingMessage.getUid();
Log.w(LOG_TAG, "Message with UID " + existingUid + " still exists on server, not expunging");
removeUids.remove(existingUid);
}
for (String uid : removeUids) {
ImapMessage message = new ImapMessage(uid, this);
try {
message.setFlagInternal(Flag.DELETED, true);
} catch (MessagingException me) {
Log.e(LOG_TAG, "Unable to set DELETED flag on message " + message.getUid());
}
messages.add(message);
}
receiver.messagesRemoved(this, messages);
} catch (Exception e) {
Log.e(LOG_TAG, "Cannot remove EXPUNGEd messages", e);
}
}
protected int processUntaggedResponse(long oldMessageCount, ImapResponse response, List<Long> flagSyncMsgSeqs, List<String> removeMsgUids) {
super.handleUntaggedResponse(response);
int messageCountDelta = 0;
if (response.getTag() == null && response.size() > 1) {
try {
Object responseType = response.get(1);
if (ImapResponseParser.equalsIgnoreCase(responseType, "FETCH")) {
Log.i(LOG_TAG, "Got FETCH " + response);
long msgSeq = response.getLong(0);
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "Got untagged FETCH for msgseq " + msgSeq + " for " + getLogId());
if (!flagSyncMsgSeqs.contains(msgSeq)) {
flagSyncMsgSeqs.add(msgSeq);
}
}
if (ImapResponseParser.equalsIgnoreCase(responseType, "EXPUNGE")) {
long msgSeq = response.getLong(0);
if (msgSeq <= oldMessageCount) {
messageCountDelta = -1;
}
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "Got untagged EXPUNGE for msgseq " + msgSeq + " for " + getLogId());
List<Long> newSeqs = new ArrayList<Long>();
Iterator<Long> flagIter = flagSyncMsgSeqs.iterator();
while (flagIter.hasNext()) {
long flagMsg = flagIter.next();
if (flagMsg >= msgSeq) {
flagIter.remove();
if (flagMsg > msgSeq) {
newSeqs.add(flagMsg--);
}
}
}
flagSyncMsgSeqs.addAll(newSeqs);
List<Long> msgSeqs = new ArrayList<Long>(msgSeqUidMap.keySet());
Collections.sort(msgSeqs); // Have to do comparisons in order because of msgSeq reductions
for (long msgSeqNum : msgSeqs) {
if (K9MailLib.isDebug()) {
Log.v(LOG_TAG, "Comparing EXPUNGEd msgSeq " + msgSeq + " to " + msgSeqNum);
}
if (msgSeqNum == msgSeq) {
String uid = msgSeqUidMap.get(msgSeqNum);
if (K9MailLib.isDebug()) {
Log.d(LOG_TAG, "Scheduling removal of UID " + uid + " because msgSeq " + msgSeqNum + " was expunged");
}
removeMsgUids.add(uid);
msgSeqUidMap.remove(msgSeqNum);
} else if (msgSeqNum > msgSeq) {
String uid = msgSeqUidMap.get(msgSeqNum);
if (K9MailLib.isDebug()) {
Log.d(LOG_TAG, "Reducing msgSeq for UID " + uid + " from " + msgSeqNum + " to " + (msgSeqNum - 1));
}
msgSeqUidMap.remove(msgSeqNum);
msgSeqUidMap.put(msgSeqNum - 1, uid);
}
}
}
} catch (Exception e) {
Log.e(LOG_TAG, "Could not handle untagged FETCH for " + getLogId(), e);
}
}
return messageCountDelta;
}
private void pushMessages(List<Message> messages, boolean newArrivals) {
RuntimeException holdException = null;
try {
if (newArrivals) {
receiver.messagesArrived(this, messages);
} else {
receiver.messagesFlagsChanged(this, messages);
}
} catch (RuntimeException e) {
holdException = e;
}
if (holdException != null) {
throw holdException;
}
}
public void stop() {
stop.set(true);
if (listeningThread != null) {
listeningThread.interrupt();
}
ImapConnection conn = mConnection;
if (conn != null) {
if (K9MailLib.isDebug())
Log.v(LOG_TAG, "Closing mConnection to stop pushing for " + getLogId());
conn.close();
} else {
Log.w(LOG_TAG, "Attempt to interrupt null mConnection to stop pushing on folderPusher for " + getLogId());
}
}
@Override
public void handleAsyncUntaggedResponse(ImapResponse response) {
if (K9MailLib.isDebug())
Log.v(LOG_TAG, "Got async response: " + response);
if (stop.get()) {
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "Got async untagged response: " + response + ", but stop is set for " + getLogId());
try {
sendDone();
} catch (Exception e) {
Log.e(LOG_TAG, "Exception while sending DONE for " + getLogId(), e);
}
} else {
if (response.getTag() == null) {
if (response.size() > 1) {
boolean started = false;
Object responseType = response.get(1);
if (ImapResponseParser.equalsIgnoreCase(responseType, "EXISTS") || ImapResponseParser.equalsIgnoreCase(responseType, "EXPUNGE") ||
ImapResponseParser.equalsIgnoreCase(responseType, "FETCH")) {
if (!started) {
wakeLock.acquire(PUSH_WAKE_LOCK_TIMEOUT);
started = true;
}
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "Got useful async untagged response: " + response + " for " + getLogId());
try {
sendDone();
} catch (Exception e) {
Log.e(LOG_TAG, "Exception while sending DONE for " + getLogId(), e);
}
}
} else if (response.isContinuationRequested()) {
if (K9MailLib.isDebug())
Log.d(LOG_TAG, "Idling " + getLogId());
wakeLock.release();
}
}
}
}
}
@Override
public Pusher getPusher(PushReceiver receiver) {
return new ImapPusher(this, receiver);
}
public class ImapPusher implements Pusher {
private final ImapStore mStore;
final PushReceiver mReceiver;
private long lastRefresh = -1;
final Map<String, ImapFolderPusher> folderPushers = new HashMap<String, ImapFolderPusher>();
public ImapPusher(ImapStore store, PushReceiver receiver) {
mStore = store;
mReceiver = receiver;
}
@Override
public void start(List<String> folderNames) {
stop();
synchronized (folderPushers) {
setLastRefresh(System.currentTimeMillis());
for (String folderName : folderNames) {
ImapFolderPusher pusher = folderPushers.get(folderName);
if (pusher == null) {
pusher = new ImapFolderPusher(mStore, folderName, mReceiver);
folderPushers.put(folderName, pusher);
pusher.start();
}
}
}
}
@Override
public void refresh() {
synchronized (folderPushers) {
for (ImapFolderPusher folderPusher : folderPushers.values()) {
try {
folderPusher.refresh();
} catch (Exception e) {
Log.e(LOG_TAG, "Got exception while refreshing for " + folderPusher.getName(), e);
}
}
}
}
@Override
public void stop() {
if (K9MailLib.isDebug())
Log.i(LOG_TAG, "Requested stop of IMAP pusher");
synchronized (folderPushers) {
for (ImapFolderPusher folderPusher : folderPushers.values()) {
try {
if (K9MailLib.isDebug())
Log.i(LOG_TAG, "Requesting stop of IMAP folderPusher " + folderPusher.getName());
folderPusher.stop();
} catch (Exception e) {
Log.e(LOG_TAG, "Got exception while stopping " + folderPusher.getName(), e);
}
}
folderPushers.clear();
}
}
@Override
public int getRefreshInterval() {
return (mStoreConfig.getIdleRefreshMinutes() * 60 * 1000);
}
@Override
public long getLastRefresh() {
return lastRefresh;
}
@Override
public void setLastRefresh(long lastRefresh) {
this.lastRefresh = lastRefresh;
}
}
protected static class ImapPushState {
protected long uidNext;
protected ImapPushState(long nUidNext) {
uidNext = nUidNext;
}
protected static ImapPushState parse(String pushState) {
long newUidNext = -1L;
if (pushState != null) {
StringTokenizer tokenizer = new StringTokenizer(pushState, ";");
while (tokenizer.hasMoreTokens()) {
StringTokenizer thisState = new StringTokenizer(tokenizer.nextToken(), "=");
if (thisState.hasMoreTokens()) {
String key = thisState.nextToken();
if ("uidNext".equalsIgnoreCase(key) && thisState.hasMoreTokens()) {
String value = thisState.nextToken();
try {
newUidNext = Long.parseLong(value);
} catch (NumberFormatException e) {
Log.e(LOG_TAG, "Unable to part uidNext value " + value, e);
}
}
}
}
}
return new ImapPushState(newUidNext);
}
@Override
public String toString() {
return "uidNext=" + uidNext;
}
}
protected interface ImapSearcher {
List<ImapResponse> search() throws IOException, MessagingException;
}
private static class FetchBodyCallback implements ImapResponseCallback {
private Map<String, Message> mMessageMap;
FetchBodyCallback(Map<String, Message> messageMap) {
mMessageMap = messageMap;
}
@Override
public Object foundLiteral(ImapResponse response,
FixedLengthInputStream literal) throws MessagingException, IOException {
if (response.getTag() == null &&
ImapResponseParser.equalsIgnoreCase(response.get(1), "FETCH")) {
ImapList fetchList = (ImapList)response.getKeyedValue("FETCH");
String uid = fetchList.getKeyedString("UID");
ImapMessage message = (ImapMessage) mMessageMap.get(uid);
message.parse(literal);
// Return placeholder object
return 1;
}
return null;
}
}
private static class FetchPartCallback implements ImapResponseCallback {
private Part mPart;
FetchPartCallback(Part part) {
mPart = part;
}
@Override
public Object foundLiteral(ImapResponse response,
FixedLengthInputStream literal) throws MessagingException, IOException {
if (response.getTag() == null &&
ImapResponseParser.equalsIgnoreCase(response.get(1), "FETCH")) {
//TODO: check for correct UID
String contentTransferEncoding = mPart
.getHeader(MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING)[0];
String contentType = mPart
.getHeader(MimeHeader.HEADER_CONTENT_TYPE)[0];
return MimeUtility.createBody(literal, contentTransferEncoding,
contentType);
}
return null;
}
}
private static String combine(Object[] parts, char separator) {
if (parts == null) {
return null;
}
return TextUtils.join(String.valueOf(separator), parts);
}
private class StoreImapSettings implements ImapSettings {
@Override
public String getHost() {
return mHost;
}
@Override
public int getPort() {
return mPort;
}
@Override
public ConnectionSecurity getConnectionSecurity() {
return mConnectionSecurity;
}
@Override
public AuthType getAuthType() {
return mAuthType;
}
@Override
public String getUsername() {
return mUsername;
}
@Override
public String getPassword() {
return mPassword;
}
@Override
public String getClientCertificateAlias() {
return mClientCertificateAlias;
}
@Override
public boolean useCompression(final int type) {
return mStoreConfig.useCompression(type);
}
@Override
public String getPathPrefix() {
return mPathPrefix;
}
@Override
public void setPathPrefix(String prefix) {
mPathPrefix = prefix;
}
@Override
public String getPathDelimiter() {
return mPathDelimiter;
}
@Override
public void setPathDelimiter(String delimiter) {
mPathDelimiter = delimiter;
}
@Override
public String getCombinedPrefix() {
return mCombinedPrefix;
}
@Override
public void setCombinedPrefix(String prefix) {
mCombinedPrefix = prefix;
}
}
}
| 1 | 12,972 | With the introduction of the enum(s) the constants in the store classes should be removed. | k9mail-k-9 | java |
@@ -455,6 +455,12 @@ static CALI_BPF_INLINE int calico_tc(struct __sk_buff *skb)
}
}
+ // Drop packets with IP options
+ if (ip_header->ihl > 5) {
+ fwd.reason = CALI_REASON_IP_OPTIONS;
+ CALI_DEBUG("Drop packets with IP options\n");
+ goto deny;
+ }
// Setting all of these up-front to keep the verifier happy.
struct tcphdr *tcp_header = (void*)(ip_header+1);
struct udphdr *udp_header = (void*)(ip_header+1); | 1 | // Project Calico BPF dataplane programs.
// Copyright (c) 2020 Tigera, Inc. All rights reserved.
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#include <asm/types.h>
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/icmp.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/if_ether.h>
#include <iproute2/bpf_elf.h>
#include <stdbool.h>
#include <stdint.h>
#include <stddef.h>
#include "bpf.h"
#include "log.h"
#include "skb.h"
#include "policy.h"
#include "conntrack.h"
#include "nat.h"
#include "routes.h"
#include "jump.h"
#include "reasons.h"
#include "icmp.h"
#ifndef CALI_FIB_LOOKUP_ENABLED
#define CALI_FIB_LOOKUP_ENABLED true
#endif
#ifndef CALI_DROP_WORKLOAD_TO_HOST
#define CALI_DROP_WORKLOAD_TO_HOST false
#endif
#ifdef CALI_DEBUG_ALLOW_ALL
/* If we want to just compile the code without defining any policies and to
* avoid compiling out code paths that are not reachable if traffic is denied,
* we can compile it with allow all
*/
static CALI_BPF_INLINE enum calico_policy_result execute_policy_norm(struct __sk_buff *skb,
__u8 ip_proto, __u32 saddr, __u32 daddr, __u16 sport, __u16 dport)
{
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-label"
RULE_START(0);
RULE_END(0, allow);
return CALI_POL_NO_MATCH;
deny:
return CALI_POL_DENY;
allow:
return CALI_POL_ALLOW;
#pragma clang diagnostic pop
}
#else
static CALI_BPF_INLINE enum calico_policy_result execute_policy_norm(struct __sk_buff *skb,
__u8 ip_proto, __u32 saddr, __u32 daddr, __u16 sport, __u16 dport)
{
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-label"
RULE_START(0);
RULE_END(0, deny);
return CALI_POL_NO_MATCH;
deny:
return CALI_POL_DENY;
allow:
return CALI_POL_ALLOW;
#pragma clang diagnostic pop
}
#endif /* CALI_DEBUG_ALLOW_ALL */
__attribute__((section("1/0")))
int calico_tc_norm_pol_tail(struct __sk_buff *skb)
{
CALI_DEBUG("Entering normal policy tail call\n");
__u32 key = 0;
struct cali_tc_state *state = cali_v4_state_lookup_elem(&key);
if (!state) {
CALI_DEBUG("State map lookup failed: DROP\n");
goto deny;
}
state->pol_rc = execute_policy_norm(skb, state->ip_proto, state->ip_src,
state->ip_dst, state->sport, state->dport);
bpf_tail_call(skb, &cali_jump, 1);
CALI_DEBUG("Tail call to post-policy program failed: DROP\n");
deny:
return TC_ACT_SHOT;
}
struct fwd {
int res;
uint32_t mark;
enum calico_reason reason;
#if FIB_ENABLED
uint32_t fib_flags;
bool fib;
#endif
};
#if FIB_ENABLED
#define fwd_fib(fwd) ((fwd)->fib)
#define fwd_fib_set(fwd, v) ((fwd)->fib = v)
#define fwd_fib_set_flags(fwd, flags) ((fwd)->fib_flags = flags)
#else
#define fwd_fib(fwd) false
#define fwd_fib_set(fwd, v)
#define fwd_fib_set_flags(fwd, flags)
#endif
static CALI_BPF_INLINE struct fwd calico_tc_skb_accepted(struct __sk_buff *skb,
struct iphdr *ip_header,
struct cali_tc_state *state,
struct calico_nat_dest *nat_dest);
static CALI_BPF_INLINE int skb_nat_l4_csum_ipv4(struct __sk_buff *skb, size_t off,
__be32 ip_from, __be32 ip_to,
__u16 port_from, __u16 port_to,
uint64_t flags)
{
int ret = 0;
if (ip_from != ip_to) {
CALI_DEBUG("L4 checksum update (csum is at %d) IP from %x to %x\n", off,
be32_to_host(ip_from), be32_to_host(ip_to));
ret = bpf_l4_csum_replace(skb, off, ip_from, ip_to, flags | BPF_F_PSEUDO_HDR | 4);
CALI_DEBUG("bpf_l4_csum_replace(IP): %d\n", ret);
}
if (port_from != port_to) {
CALI_DEBUG("L4 checksum update (csum is at %d) port from %d to %d\n",
off, be16_to_host(port_from), be16_to_host(port_to));
int rc = bpf_l4_csum_replace(skb, off, port_from, port_to, flags | 2);
CALI_DEBUG("bpf_l4_csum_replace(port): %d\n", rc);
ret |= rc;
}
return ret;
}
static CALI_BPF_INLINE int forward_or_drop(struct __sk_buff *skb,
struct cali_tc_state *state,
struct fwd *fwd)
{
int rc = fwd->res;
enum calico_reason reason = fwd->reason;
if (rc == TC_ACT_SHOT) {
goto deny;
}
if (rc == CALI_RES_REDIR_IFINDEX) {
int redir_flags = 0;
if (CALI_F_FROM_HOST) {
redir_flags = BPF_F_INGRESS;
}
/* Revalidate the access to the packet */
if ((void *)(long)skb->data + sizeof(struct ethhdr) > (void *)(long)skb->data_end) {
reason = CALI_REASON_SHORT;
goto deny;
}
/* Swap the MACs as we are turning it back */
struct ethhdr *eth_hdr = (void *)(long)skb->data;
unsigned char mac[ETH_ALEN];
__builtin_memcpy(mac, ð_hdr->h_dest, ETH_ALEN);
__builtin_memcpy(ð_hdr->h_dest, ð_hdr->h_source, ETH_ALEN);
__builtin_memcpy(ð_hdr->h_source, mac, ETH_ALEN);
rc = bpf_redirect(skb->ifindex, redir_flags);
if (rc == TC_ACT_REDIRECT) {
CALI_DEBUG("Redirect to the same interface (%d) succeeded\n", skb->ifindex);
goto skip_fib;
}
CALI_DEBUG("Redirect to the same interface (%d) failed\n", skb->ifindex);
goto deny;
}
#if FIB_ENABLED
// Try a short-circuit FIB lookup.
if (fwd_fib(fwd)) {
/* XXX we might include the tot_len in the fwd, set it once when
* we get the ip_header the first time and only adjust the value
* when we modify the packet - to avoid geting the header here
* again - it is simpler though.
*/
if (skb_too_short(skb)) {
reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
struct iphdr *ip_header = skb_iphdr(skb);
struct bpf_fib_lookup fib_params = {
.family = 2, /* AF_INET */
.tot_len = be16_to_host(ip_header->tot_len),
.ifindex = skb->ingress_ifindex,
.l4_protocol = state->ip_proto,
.sport = host_to_be16(state->sport),
.dport = host_to_be16(state->dport),
};
/* set the ipv4 here, otherwise the ipv4/6 unions do not get
* zeroed properly
*/
fib_params.ipv4_src = state->ip_src;
fib_params.ipv4_dst = state->ip_dst;
CALI_DEBUG("FIB family=%d\n", fib_params.family);
CALI_DEBUG("FIB tot_len=%d\n", fib_params.tot_len);
CALI_DEBUG("FIB ifindex=%d\n", fib_params.ifindex);
CALI_DEBUG("FIB l4_protocol=%d\n", fib_params.l4_protocol);
CALI_DEBUG("FIB sport=%d\n", be16_to_host(fib_params.sport));
CALI_DEBUG("FIB dport=%d\n", be16_to_host(fib_params.dport));
CALI_DEBUG("FIB ipv4_src=%x\n", be32_to_host(fib_params.ipv4_src));
CALI_DEBUG("FIB ipv4_dst=%x\n", be32_to_host(fib_params.ipv4_dst));
CALI_DEBUG("Traffic is towards the host namespace, doing Linux FIB lookup\n");
rc = bpf_fib_lookup(skb, &fib_params, sizeof(fib_params), fwd->fib_flags);
if (rc == 0) {
CALI_DEBUG("FIB lookup succeeded\n");
/* Since we are going to short circuit the IP stack on
* forward, check if TTL is still alive. If not, let the
* IP stack handle it. It was approved by policy, so it
* is safe.
*/
if ip_ttl_exceeded(ip_header) {
rc = TC_ACT_UNSPEC;
goto cancel_fib;
}
// Update the MACs. NAT may have invalidated pointer into the packet so need to
// revalidate.
if ((void *)(long)skb->data + sizeof(struct ethhdr) > (void *)(long)skb->data_end) {
reason = CALI_REASON_SHORT;
goto deny;
}
struct ethhdr *eth_hdr = (void *)(long)skb->data;
__builtin_memcpy(ð_hdr->h_source, fib_params.smac, sizeof(eth_hdr->h_source));
__builtin_memcpy(ð_hdr->h_dest, fib_params.dmac, sizeof(eth_hdr->h_dest));
// Redirect the packet.
CALI_DEBUG("Got Linux FIB hit, redirecting to iface %d.\n", fib_params.ifindex);
rc = bpf_redirect(fib_params.ifindex, 0);
/* now we know we will bypass IP stack and ip->ttl > 1, decrement it! */
if (rc == TC_ACT_REDIRECT) {
ip_dec_ttl(ip_header);
}
} else if (rc < 0) {
CALI_DEBUG("FIB lookup failed (bad input): %d.\n", rc);
rc = TC_ACT_UNSPEC;
} else {
CALI_DEBUG("FIB lookup failed (FIB problem): %d.\n", rc);
rc = TC_ACT_UNSPEC;
}
}
cancel_fib:
#endif /* FIB_ENABLED */
skip_fib:
if (CALI_F_TO_HOST) {
/* Packet is towards host namespace, mark it so that downstream
* programs know that they're not the first to see the packet.
*/
CALI_DEBUG("Traffic is towards host namespace, marking with %x.\n", fwd->mark);
/* FIXME: this ignores the mask that we should be using.
* However, if we mask off the bits, then clang spots that it
* can do a 16-bit store instead of a 32-bit load/modify/store,
* which trips up the validator.
*/
skb->mark = fwd->mark | CALI_SKB_MARK_SEEN; /* make sure that each pkt has SEEN mark */
}
if (CALI_LOG_LEVEL >= CALI_LOG_LEVEL_INFO) {
uint64_t prog_end_time = bpf_ktime_get_ns();
CALI_INFO("Final result=ALLOW (%d). Program execution time: %lluns\n",
rc, prog_end_time-state->prog_start_time);
}
return rc;
deny:
if (CALI_LOG_LEVEL >= CALI_LOG_LEVEL_INFO) {
uint64_t prog_end_time = bpf_ktime_get_ns();
CALI_INFO("Final result=DENY (%x). Program execution time: %lluns\n",
reason, prog_end_time-state->prog_start_time);
}
return TC_ACT_SHOT;
}
static CALI_BPF_INLINE int calico_tc(struct __sk_buff *skb)
{
struct cali_tc_state state = {};
struct fwd fwd = {
.res = TC_ACT_UNSPEC,
.reason = CALI_REASON_UNKNOWN,
};
struct calico_nat_dest *nat_dest = NULL;
/* we assume we do FIB and from this point on, we only set it to false
* if we decide not to do it.
*/
fwd_fib_set(&fwd, true);
if (CALI_LOG_LEVEL >= CALI_LOG_LEVEL_INFO) {
state.prog_start_time = bpf_ktime_get_ns();
}
state.nat_tun_src = 0;
#ifdef CALI_SET_SKB_MARK
/* workaround for test since bpftool run cannot set it in context, wont
* be necessary if fixed in kernel
*/
skb->mark = CALI_SET_SKB_MARK;
#endif
if (!CALI_F_TO_HOST && skb->mark == CALI_SKB_MARK_BYPASS) {
CALI_DEBUG("Packet pre-approved by another hook, allow.\n");
fwd.reason = CALI_REASON_BYPASS;
goto allow;
}
struct iphdr *ip_header;
if (CALI_F_TO_HEP || CALI_F_TO_WEP) {
switch (skb->mark) {
case CALI_SKB_MARK_BYPASS_FWD:
CALI_DEBUG("Packet approved for forward.\n");
fwd.reason = CALI_REASON_BYPASS;
goto allow;
case CALI_SKB_MARK_BYPASS_FWD_SRC_FIXUP:
CALI_DEBUG("Packet approved for forward - src ip fixup\n");
fwd.reason = CALI_REASON_BYPASS;
/* we need to fix up the right src host IP */
if (skb_too_short(skb)) {
fwd.reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
__be32 ip_src = ip_header->saddr;
if (ip_src == HOST_IP) {
CALI_DEBUG("src ip fixup not needed %x\n", be32_to_host(ip_src));
goto allow;
}
/* XXX do a proper CT lookup to find this */
ip_header->saddr = HOST_IP;
int l3_csum_off = skb_iphdr_offset(skb) + offsetof(struct iphdr, check);
int res = bpf_l3_csum_replace(skb, l3_csum_off, ip_src, HOST_IP, 4);
if (res) {
fwd.reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
goto allow;
}
}
// Parse the packet.
// TODO Do we need to handle any odd-ball frames here (e.g. with a 0 VLAN header)?
switch (host_to_be16(skb->protocol)) {
case ETH_P_IP:
break;
case ETH_P_ARP:
CALI_DEBUG("ARP: allowing packet\n");
fwd_fib_set(&fwd, false);
goto allow;
case ETH_P_IPV6:
if (CALI_F_WEP) {
CALI_DEBUG("IPv6 from workload: drop\n");
return TC_ACT_SHOT;
} else {
// FIXME: support IPv6.
CALI_DEBUG("IPv6 on host interface: allow\n");
return TC_ACT_UNSPEC;
}
default:
if (CALI_F_WEP) {
CALI_DEBUG("Unknown ethertype (%x), drop\n", be16_to_host(skb->protocol));
goto deny;
} else {
CALI_DEBUG("Unknown ethertype on host interface (%x), allow\n",
be16_to_host(skb->protocol));
return TC_ACT_UNSPEC;
}
}
if (skb_too_short(skb)) {
fwd.reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
if (dnat_should_decap() && is_vxlan_tunnel(ip_header)) {
struct udphdr *udp_header = (void*)(ip_header+1);
/* decap on host ep only if directly for the node */
CALI_DEBUG("VXLAN tunnel packet to %x (host IP=%x)\n", ip_header->daddr, HOST_IP);
if (ip_header->daddr == HOST_IP &&
vxlan_udp_csum_ok(udp_header) &&
vxlan_size_ok(skb, udp_header) &&
vxlan_vni_is_valid(skb, udp_header) &&
vxlan_vni(skb, udp_header) == CALI_VXLAN_VNI) {
state.nat_tun_src = ip_header->saddr;
CALI_DEBUG("vxlan decap\n");
if (vxlan_v4_decap(skb)) {
fwd.reason = CALI_REASON_DECAP_FAIL;
goto deny;
}
if (skb_too_short(skb)) {
fwd.reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short after VXLAN decap\n");
goto deny;
}
ip_header = skb_iphdr(skb);
CALI_DEBUG("vxlan decap origin %x\n", be32_to_host(state.nat_tun_src));
}
}
// Setting all of these up-front to keep the verifier happy.
struct tcphdr *tcp_header = (void*)(ip_header+1);
struct udphdr *udp_header = (void*)(ip_header+1);
struct icmphdr *icmp_header = (void*)(ip_header+1);
tc_state_fill_from_iphdr(&state, ip_header);
switch (state.ip_proto) {
case IPPROTO_TCP:
// Re-check buffer space for TCP (has larger headers than UDP).
if (!skb_has_data_after(skb, ip_header, sizeof(struct tcphdr))) {
CALI_DEBUG("Too short for TCP: DROP\n");
goto deny;
}
state.sport = be16_to_host(tcp_header->source);
state.dport = be16_to_host(tcp_header->dest);
CALI_DEBUG("TCP; ports: s=%d d=%d\n", state.sport, state.dport);
break;
case IPPROTO_UDP:
state.sport = be16_to_host(udp_header->source);
state.dport = be16_to_host(udp_header->dest);
CALI_DEBUG("UDP; ports: s=%d d=%d\n", state.sport, state.dport);
break;
case IPPROTO_ICMP:
icmp_header = (void*)(ip_header+1);
CALI_DEBUG("ICMP; type=%d code=%d\n",
icmp_header->type, icmp_header->code);
break;
case 4:
// IPIP
if (CALI_F_HEP) {
// TODO IPIP whitelist.
CALI_DEBUG("IPIP: allow\n");
fwd_fib_set(&fwd, false);
goto allow;
}
default:
CALI_DEBUG("Unknown protocol (%d), unable to extract ports\n", (int)state.ip_proto);
}
state.pol_rc = CALI_POL_NO_MATCH;
switch (state.ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_ICMP:
break;
default:
if (CALI_F_HEP) {
// FIXME: allow unknown protocols through on host endpoints.
goto allow;
}
// FIXME non-port based conntrack.
goto deny;
}
struct ct_ctx ct_lookup_ctx = {
.skb = skb,
.proto = state.ip_proto,
.src = state.ip_src,
.sport = state.sport,
.dst = state.ip_dst,
.dport = state.dport,
.nat_tun_src = state.nat_tun_src,
};
if (state.ip_proto == IPPROTO_TCP) {
if (!skb_has_data_after(skb, ip_header, sizeof(struct tcphdr))) {
CALI_DEBUG("Too short for TCP: DROP\n");
goto deny;
}
tcp_header = (void*)(ip_header+1);
ct_lookup_ctx.tcp = tcp_header;
}
/* Do conntrack lookup before anything else */
state.ct_result = calico_ct_v4_lookup(&ct_lookup_ctx);
if (state.ct_result.flags & CALI_CT_FLAG_NAT_OUT) {
state.flags |= CALI_ST_NAT_OUTGOING;
}
/* We are possibly past (D)NAT, but that is ok, we need to let the IP
* stack do the RPF check on the source, dest is not importatnt.
*/
if (CALI_F_TO_HOST && ct_result_rpf_failed(state.ct_result.rc)) {
fwd_fib_set(&fwd, false);
}
/* skip policy if we get conntrack hit */
if (ct_result_rc(state.ct_result.rc) != CALI_CT_NEW) {
goto skip_policy;
}
/* Unlike from WEP where we can do RPF by comparing to calico routing
* info, we must rely in Linux to do it for us when receiving packets
* from outside of the host. We enforce RPF failed on every new flow.
* This will make it to skip fib in calico_tc_skb_accepted()
*/
if (CALI_F_FROM_HEP) {
ct_result_set_flag(state.ct_result.rc, CALI_CT_RPF_FAILED);
}
/* No conntrack entry, check if we should do NAT */
nat_dest = calico_v4_nat_lookup2(state.ip_src, state.ip_dst,
state.ip_proto, state.dport,
state.nat_tun_src != 0);
if (nat_dest != NULL) {
state.post_nat_ip_dst = nat_dest->addr;
state.post_nat_dport = nat_dest->port;
} else {
state.post_nat_ip_dst = state.ip_dst;
state.post_nat_dport = state.dport;
}
if (CALI_F_TO_WEP &&
skb->mark != CALI_SKB_MARK_SEEN &&
cali_rt_flags_local_host(cali_rt_lookup_flags(state.ip_src))) {
/* Host to workload traffic always allowed. We discount traffic that was
* seen by another program since it must have come in via another interface.
*/
CALI_DEBUG("Packet is from the host: ACCEPT\n");
state.pol_rc = CALI_POL_ALLOW;
goto skip_policy;
}
if (CALI_F_FROM_WEP) {
/* Do RPF check since it's our responsibility to police that. */
CALI_DEBUG("Workload RPF check src=%x skb iface=%d.\n",
be32_to_host(state.ip_src), skb->ifindex);
struct cali_rt *r = cali_rt_lookup(state.ip_src);
if (!r) {
CALI_INFO("Workload RPF fail: missing route.\n");
goto deny;
}
if (!cali_rt_flags_local_workload(r->flags)) {
CALI_INFO("Workload RPF fail: not a local workload.\n");
goto deny;
}
if (r->if_index != skb->ifindex) {
CALI_INFO("Workload RPF fail skb iface (%d) != route iface (%d)\n",
skb->ifindex, r->if_index);
goto deny;
}
// Check whether the workload needs outgoing NAT to this address.
if (r->flags & CALI_RT_NAT_OUT) {
if (!(cali_rt_lookup_flags(state.post_nat_ip_dst) & CALI_RT_IN_POOL)) {
CALI_DEBUG("Source is in NAT-outgoing pool "
"but dest is not, need to SNAT.\n");
state.flags |= CALI_ST_NAT_OUTGOING;
}
}
}
/* icmp_type and icmp_code share storage with the ports; now we've used
* the ports set to 0 to do the conntrack lookup, we can set the ICMP fields
* for policy.
*/
if (state.ip_proto == IPPROTO_ICMP) {
state.icmp_type = icmp_header->type;
state.icmp_code = icmp_header->code;
}
// Set up an entry in the state map and then jump to the normal policy program.
int key = 0;
struct cali_tc_state *map_state = cali_v4_state_lookup_elem(&key);
if (!map_state) {
// Shouldn't be possible; the map is pre-allocated.
CALI_INFO("State map lookup failed: DROP\n");
goto deny;
}
state.pol_rc = CALI_POL_NO_MATCH;
if (nat_dest) {
state.nat_dest.addr = nat_dest->addr;
state.nat_dest.port = nat_dest->port;
} else {
state.nat_dest.addr = 0;
state.nat_dest.port = 0;
}
*map_state = state;
if (CALI_F_HEP) {
/* We don't support host-endpoint policy yet, skip straight to
* the epilogue program.
* FIXME we really want to just call calico_tc_skb_accepted()
* here but that runs out of stack space.
*/
map_state->pol_rc = CALI_POL_ALLOW;
bpf_tail_call(skb, &cali_jump, 1);
CALI_DEBUG("Tail call to epilogue program failed: ALLOW\n");
return TC_ACT_UNSPEC;
}
CALI_DEBUG("About to jump to policy program; lack of further "
"logs means policy dropped the packet...\n");
bpf_tail_call(skb, &cali_jump, 0);
CALI_DEBUG("Tail call to policy program failed: DROP\n");
return TC_ACT_SHOT;
skip_policy:
fwd = calico_tc_skb_accepted(skb, ip_header, &state, nat_dest);
allow:
finalize:
return forward_or_drop(skb, &state, &fwd);
deny:
fwd.res = TC_ACT_SHOT;
goto finalize;
}
__attribute__((section("1/1")))
int calico_tc_skb_accepted_entrypoint(struct __sk_buff *skb)
{
CALI_DEBUG("Entering calico_tc_skb_accepted_entrypoint\n");
struct iphdr *ip_header = NULL;
if (skb_too_short(skb)) {
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
__u32 key = 0;
struct cali_tc_state *state = bpf_map_lookup_elem(&cali_v4_state, &key);
if (!state) {
CALI_DEBUG("State map lookup failed: DROP\n");
goto deny;
}
struct calico_nat_dest *nat_dest = NULL;
struct calico_nat_dest nat_dest_2 = {
.addr=state->nat_dest.addr,
.port=state->nat_dest.port,
};
if (state->nat_dest.addr != 0) {
nat_dest = &nat_dest_2;
}
struct fwd fwd = calico_tc_skb_accepted(skb, ip_header, state, nat_dest);
return forward_or_drop(skb, state, &fwd);
deny:
return TC_ACT_SHOT;
}
static CALI_BPF_INLINE struct fwd calico_tc_skb_accepted(struct __sk_buff *skb,
struct iphdr *ip_header,
struct cali_tc_state *state,
struct calico_nat_dest *nat_dest)
{
CALI_DEBUG("Entering calico_tc_skb_accepted\n");
enum calico_reason reason = CALI_REASON_UNKNOWN;
int rc = TC_ACT_UNSPEC;
bool fib = false;
struct ct_ctx ct_nat_ctx = {};
int ct_rc = ct_result_rc(state->ct_result.rc);
bool ct_related = ct_result_is_related(state->ct_result.rc);
uint32_t seen_mark;
size_t l4_csum_off = 0, l3_csum_off;
uint32_t fib_flags = 0;
CALI_DEBUG("src=%x dst=%x\n", be32_to_host(state->ip_src), be32_to_host(state->ip_dst));
CALI_DEBUG("post_nat=%x:%d\n", be32_to_host(state->post_nat_ip_dst), state->post_nat_dport);
CALI_DEBUG("nat_tun=%x\n", state->nat_tun_src);
CALI_DEBUG("pol_rc=%d\n", state->pol_rc);
CALI_DEBUG("sport=%d\n", state->sport);
CALI_DEBUG("flags=%x\n", state->flags);
CALI_DEBUG("ct_rc=%d\n", ct_rc);
CALI_DEBUG("ct_related=%d\n", ct_related);
// Set the dport to 0, to make sure conntrack entries for icmp is proper as we use
// dport to hold icmp type and code
if (state->ip_proto == IPPROTO_ICMP) {
state->dport = 0;
}
if (CALI_F_FROM_WEP && (state->flags & CALI_ST_NAT_OUTGOING)) {
seen_mark = CALI_SKB_MARK_NAT_OUT;
} else {
/* XXX we do it here again because doing it in one place only
* XXX in calico_tc() irritates the verifier :'(
*/
if (!CALI_F_TO_HOST || !ct_result_rpf_failed(state->ct_result.rc)) {
fib = true;
}
seen_mark = CALI_SKB_MARK_SEEN;
}
/* We check the ttl here to avoid needing complicated handling of
* related trafic back from the host if we let the host to handle it.
*/
CALI_DEBUG("ip->ttl %d\n", ip_header->ttl);
if (ip_ttl_exceeded(ip_header)) {
switch (ct_rc){
case CALI_CT_NEW:
if (nat_dest) {
goto icmp_ttl_exceeded;
}
break;
case CALI_CT_ESTABLISHED_DNAT:
case CALI_CT_ESTABLISHED_SNAT:
goto icmp_ttl_exceeded;
}
}
l3_csum_off = skb_iphdr_offset(skb) + offsetof(struct iphdr, check);
if (ct_related) {
if (ip_header->protocol == IPPROTO_ICMP) {
struct icmphdr *icmp;
bool outer_ip_snat;
/* if we do SNAT ... */
outer_ip_snat = ct_rc == CALI_CT_ESTABLISHED_SNAT;
/* ... there is a return path to the tunnel ... */
outer_ip_snat = outer_ip_snat && state->ct_result.tun_ret_ip;
/* ... and should do encap and it is not DSR or it is leaving host
* and either DSR from WEP or originated at host ... */
outer_ip_snat = outer_ip_snat &&
((dnat_return_should_encap() && !CALI_F_DSR) ||
(CALI_F_TO_HEP &&
((CALI_F_DSR && skb_seen(skb)) || !skb_seen(skb))));
/* ... then fix the outer header IP first */
if (outer_ip_snat) {
ip_header->saddr = state->ct_result.nat_ip;
int res = bpf_l3_csum_replace(skb, l3_csum_off,
state->ip_src, state->ct_result.nat_ip, 4);
if (res) {
reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
CALI_DEBUG("ICMP related: outer IP SNAT to %x\n",
be32_to_host(state->ct_result.nat_ip));
}
if (!icmp_skb_get_hdr(skb, &icmp)) {
CALI_DEBUG("Ooops, we already passed one such a check!!!\n");
goto deny;
}
l3_csum_off += sizeof(*ip_header) + sizeof(*icmp);
ip_header = (struct iphdr *)(icmp + 1); /* skip to inner ip */
/* flip the direction, we need to reverse the original packet */
switch (ct_rc) {
case CALI_CT_ESTABLISHED_SNAT:
/* handle the DSR case, see CALI_CT_ESTABLISHED_SNAT where nat is done */
if (dnat_return_should_encap() && state->ct_result.tun_ret_ip) {
if (CALI_F_DSR) {
/* SNAT will be done after routing, when leaving HEP */
CALI_DEBUG("DSR enabled, skipping SNAT + encap\n");
goto allow;
}
}
ct_rc = CALI_CT_ESTABLISHED_DNAT;
break;
case CALI_CT_ESTABLISHED_DNAT:
if (CALI_F_FROM_HEP && state->nat_tun_src && !state->ct_result.tun_ret_ip) {
/* Packet is returning from a NAT tunnel, just forward it. */
seen_mark = CALI_SKB_MARK_BYPASS_FWD;
CALI_DEBUG("ICMP related returned from NAT tunnel\n");
goto allow;
}
ct_rc = CALI_CT_ESTABLISHED_SNAT;
break;
}
}
}
struct tcphdr *tcp_header = (void*)(ip_header+1);
struct udphdr *udp_header = (void*)(ip_header+1);
__u8 ihl = ip_header->ihl * 4;
int res = 0;
bool encap_needed = false;
if (state->ip_proto == IPPROTO_ICMP && ct_related) {
/* do not fix up embedded L4 checksum for related ICMP */
} else {
switch (ip_header->protocol) {
case IPPROTO_TCP:
l4_csum_off = skb_l4hdr_offset(skb, ihl) + offsetof(struct tcphdr, check);
break;
case IPPROTO_UDP:
l4_csum_off = skb_l4hdr_offset(skb, ihl) + offsetof(struct udphdr, check);
break;
}
}
switch (ct_rc){
case CALI_CT_NEW:
switch (state->pol_rc) {
case CALI_POL_NO_MATCH:
CALI_DEBUG("Implicitly denied by normal policy: DROP\n");
goto deny;
case CALI_POL_DENY:
CALI_DEBUG("Denied by normal policy: DROP\n");
goto deny;
case CALI_POL_ALLOW:
CALI_DEBUG("Allowed by normal policy: ACCEPT\n");
}
if (CALI_F_FROM_WEP &&
CALI_DROP_WORKLOAD_TO_HOST &&
cali_rt_flags_local_host(
cali_rt_lookup_flags(state->post_nat_ip_dst))) {
CALI_DEBUG("Workload to host traffic blocked by "
"DefaultEndpointToHostAction: DROP\n");
goto deny;
}
ct_nat_ctx.skb = skb;
ct_nat_ctx.proto = state->ip_proto;
ct_nat_ctx.src = state->ip_src;
ct_nat_ctx.sport = state->sport;
ct_nat_ctx.dst = state->post_nat_ip_dst;
ct_nat_ctx.dport = state->post_nat_dport;
ct_nat_ctx.nat_tun_src = state->nat_tun_src;
if (state->flags & CALI_ST_NAT_OUTGOING) {
ct_nat_ctx.flags |= CALI_CT_FLAG_NAT_OUT;
}
if (state->ip_proto == IPPROTO_TCP) {
if (!skb_has_data_after(skb, ip_header, sizeof(struct tcphdr))) {
CALI_DEBUG("Too short for TCP: DROP\n");
goto deny;
}
tcp_header = (void*)(ip_header+1);
ct_nat_ctx.tcp = tcp_header;
}
// If we get here, we've passed policy.
if (nat_dest == NULL) {
conntrack_create(&ct_nat_ctx, false);
goto allow;
}
ct_nat_ctx.orig_dst = state->ip_dst;
ct_nat_ctx.orig_dport = state->dport;
/* fall through as DNAT is now established */
case CALI_CT_ESTABLISHED_DNAT:
/* align with CALI_CT_NEW */
if (ct_rc == CALI_CT_ESTABLISHED_DNAT) {
if (CALI_F_FROM_HEP && state->nat_tun_src && !state->ct_result.tun_ret_ip) {
/* Packet is returning from a NAT tunnel,
* already SNATed, just forward it.
*/
seen_mark = CALI_SKB_MARK_BYPASS_FWD;
CALI_DEBUG("returned from NAT tunnel\n");
goto allow;
}
state->post_nat_ip_dst = state->ct_result.nat_ip;
state->post_nat_dport = state->ct_result.nat_port;
}
CALI_DEBUG("CT: DNAT to %x:%d\n",
be32_to_host(state->post_nat_ip_dst), state->post_nat_dport);
struct cali_rt *rt;
encap_needed = dnat_should_encap();
if (encap_needed) {
rt = cali_rt_lookup(state->post_nat_ip_dst);
if (!rt) {
reason = CALI_REASON_RT_UNKNOWN;
goto deny;
}
CALI_DEBUG("rt found for 0x%x\n", be32_to_host(state->post_nat_ip_dst));
encap_needed = !cali_rt_is_local(rt);
/* We cannot enforce RPF check on encapped traffic, do FIB if you can */
fib = true;
}
/* We have not created the conntrack yet since we did not know
* if we need encap or not. Must do before MTU check and before
* we jump to do the encap.
*/
if (ct_rc == CALI_CT_NEW) {
if (CALI_F_DSR && CALI_F_FROM_HEP &&
encap_needed && state->nat_tun_src == 0) {
ct_nat_ctx.flags |= CALI_CT_FLAG_DSR_FWD;
}
conntrack_create(&ct_nat_ctx, true);
}
if (encap_needed) {
if (!(state->ip_proto == IPPROTO_TCP && skb_is_gso(skb)) &&
ip_is_dnf(ip_header) && vxlan_v4_encap_too_big(skb)) {
CALI_DEBUG("Request packet with DNF set is too big\n");
goto icmp_too_big;
}
state->ip_src = HOST_IP;
state->ip_dst = cali_rt_is_workload(rt) ? rt->next_hop : state->post_nat_ip_dst;
seen_mark = CALI_SKB_MARK_BYPASS_FWD;
goto nat_encap;
}
ip_header->daddr = state->post_nat_ip_dst;
switch (ip_header->protocol) {
case IPPROTO_TCP:
tcp_header->dest = host_to_be16(state->post_nat_dport);
break;
case IPPROTO_UDP:
udp_header->dest = host_to_be16(state->post_nat_dport);
break;
}
CALI_VERB("L3 csum at %d L4 csum at %d\n", l3_csum_off, l4_csum_off);
if (l4_csum_off) {
res = skb_nat_l4_csum_ipv4(skb, l4_csum_off, state->ip_dst,
state->post_nat_ip_dst, host_to_be16(state->dport),
host_to_be16(state->post_nat_dport),
ip_header->protocol == IPPROTO_UDP ? BPF_F_MARK_MANGLED_0 : 0);
}
res |= bpf_l3_csum_replace(skb, l3_csum_off, state->ip_dst, state->post_nat_ip_dst, 4);
if (res) {
reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
/* Handle returning ICMP related to tunnel
*
* N.B. we assume that we can fit in the MTU. Since it is ICMP
* and even though Linux sends up to min ipv4 MTU, it is
* unlikely that we are anywhere to close the MTU limit. If we
* are, we need to fail anyway.
*/
if (ct_related && state->ip_proto == IPPROTO_ICMP
&& state->ct_result.tun_ret_ip
&& !CALI_F_DSR) {
if (dnat_return_should_encap()) {
CALI_DEBUG("Returning related ICMP from workload to tunnel\n");
state->ip_dst = state->ct_result.tun_ret_ip;
seen_mark = CALI_SKB_MARK_BYPASS_FWD_SRC_FIXUP;
goto nat_encap;
} else if (CALI_F_TO_HEP) {
/* Special case for ICMP error being returned by the host with the
* backing workload into the tunnel back to the original host. It is
* ICMP related and there is a return tunnel path. We need to change
* both the source and destination at once.
*
* XXX the packet was routed to the original client as if it was XXX
* DSR and we might not be on the right iface!!! Should we XXX try
* to reinject it to fix the routing?
*/
CALI_DEBUG("Returning related ICMP from host to tunnel\n");
state->ip_src = HOST_IP;
state->ip_dst = state->ct_result.tun_ret_ip;
goto nat_encap;
}
}
state->dport = state->post_nat_dport;
state->ip_dst = state->post_nat_ip_dst;
goto allow;
case CALI_CT_ESTABLISHED_SNAT:
CALI_DEBUG("CT: SNAT from %x:%d\n",
be32_to_host(state->ct_result.nat_ip), state->ct_result.nat_port);
if (dnat_return_should_encap() && state->ct_result.tun_ret_ip) {
if (CALI_F_DSR) {
/* SNAT will be done after routing, when leaving HEP */
CALI_DEBUG("DSR enabled, skipping SNAT + encap\n");
goto allow;
}
if (!(state->ip_proto == IPPROTO_TCP && skb_is_gso(skb)) &&
ip_is_dnf(ip_header) && vxlan_v4_encap_too_big(skb)) {
CALI_DEBUG("Return ICMP mtu is too big\n");
goto icmp_too_big;
}
}
// Actually do the NAT.
ip_header->saddr = state->ct_result.nat_ip;
switch (ip_header->protocol) {
case IPPROTO_TCP:
tcp_header->source = host_to_be16(state->ct_result.nat_port);
break;
case IPPROTO_UDP:
udp_header->source = host_to_be16(state->ct_result.nat_port);
break;
}
CALI_VERB("L3 csum at %d L4 csum at %d\n", l3_csum_off, l4_csum_off);
if (l4_csum_off) {
res = skb_nat_l4_csum_ipv4(skb, l4_csum_off, state->ip_src,
state->ct_result.nat_ip, host_to_be16(state->sport),
host_to_be16(state->ct_result.nat_port),
ip_header->protocol == IPPROTO_UDP ? BPF_F_MARK_MANGLED_0 : 0);
}
CALI_VERB("L3 checksum update (csum is at %d) port from %x to %x\n",
l3_csum_off, state->ip_src, state->ct_result.nat_ip);
int csum_rc = bpf_l3_csum_replace(skb, l3_csum_off,
state->ip_src, state->ct_result.nat_ip, 4);
CALI_VERB("bpf_l3_csum_replace(IP): %d\n", csum_rc);
res |= csum_rc;
if (res) {
reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
if (dnat_return_should_encap() && state->ct_result.tun_ret_ip) {
state->ip_dst = state->ct_result.tun_ret_ip;
seen_mark = CALI_SKB_MARK_BYPASS_FWD_SRC_FIXUP;
goto nat_encap;
}
state->sport = state->ct_result.nat_port;
state->ip_src = state->ct_result.nat_ip;
goto allow;
case CALI_CT_ESTABLISHED_BYPASS:
seen_mark = CALI_SKB_MARK_BYPASS;
// fall through
case CALI_CT_ESTABLISHED:
goto allow;
default:
if (CALI_F_FROM_HEP) {
/* Since we're using the host endpoint program for TC-redirect
* acceleration for workloads (but we haven't fully implemented
* host endpoint support yet), we can get an incorrect conntrack
* invalid for host traffic.
*
* FIXME: Properly handle host endpoint conntrack failures
*/
CALI_DEBUG("Traffic is towards host namespace but not conntracked, "
"falling through to iptables\n");
fib = false;
goto allow;
}
goto deny;
}
CALI_INFO("We should never fall through here\n");
goto deny;
icmp_ttl_exceeded:
if (skb_too_short(skb)) {
reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
/* we silently drop the packet if things go wrong */
/* XXX we should check if it is broadcast or multicast and not respond */
/* do not respond to IP fragments except the first */
if (ip_frag_no(ip_header)) {
goto deny;
}
if (icmp_v4_ttl_exceeded(skb)) {
goto deny;
}
/* we need to allow the reponse for the IP stack to route it back.
* XXX we might want to send it back the same iface
*/
goto icmp_allow;
icmp_too_big:
if (icmp_v4_too_big(skb)) {
reason = CALI_REASON_ICMP_DF;
goto deny;
}
/* XXX we might use skb->ifindex to redirect it straight back
* to where it came from if it is guaranteed to be the path
*/
fib_flags |= BPF_FIB_LOOKUP_OUTPUT;
if (CALI_F_FROM_WEP) {
/* we know it came from workload, just send it back the same way */
rc = CALI_RES_REDIR_IFINDEX;
}
goto icmp_allow;
icmp_allow:
/* recheck the size of the packet after it was turned into icmp and set
* state so that it can processed further.
*/
if (skb_shorter(skb, ETH_IPV4_UDP_SIZE)) {
reason = CALI_REASON_SHORT;
goto deny;
}
ip_header = skb_iphdr(skb);
tc_state_fill_from_iphdr(state, ip_header);
state->sport = state->dport = 0;
/* packet was created because of approved traffic, treat it as related */
seen_mark = CALI_SKB_MARK_BYPASS_FWD;
goto allow;
nat_encap:
if (vxlan_v4_encap(skb, state->ip_src, state->ip_dst)) {
reason = CALI_REASON_ENCAP_FAIL;
goto deny;
}
state->sport = state->dport = CALI_VXLAN_PORT;
state->ip_proto = IPPROTO_UDP;
if (CALI_F_INGRESS) {
fib_flags |= BPF_FIB_LOOKUP_OUTPUT;
}
allow:
{
struct fwd fwd = {
.res = rc,
.mark = seen_mark,
};
fwd_fib_set(&fwd, fib);
fwd_fib_set_flags(&fwd, fib_flags);
return fwd;
}
deny:
{
struct fwd fwd = {
.res = TC_ACT_SHOT,
.reason = reason,
};
return fwd;
}
}
#ifndef CALI_ENTRYPOINT_NAME
#define CALI_ENTRYPOINT_NAME calico_entrypoint
#endif
// Entrypoint with definable name. It's useful to redefine the name for each entrypoint
// because the name is exposed by bpftool et al.
__attribute__((section(XSTR(CALI_ENTRYPOINT_NAME))))
int tc_calico_entry(struct __sk_buff *skb)
{
return calico_tc(skb);
}
char ____license[] __attribute__((section("license"), used)) = "GPL";
| 1 | 17,829 | I would say `!= 5`; A packet with <5 would be malformed. | projectcalico-felix | go |
@@ -60,7 +60,7 @@ func New(selector export.AggregationSelector, exporter export.Exporter, opts ...
c.Timeout = c.Period
}
- integrator := simple.New(selector, c.Stateful)
+ integrator := simple.New(selector, exporter)
impl := sdk.NewAccumulator(
integrator,
sdk.WithResource(c.Resource), | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package push // import "go.opentelemetry.io/otel/sdk/metric/controller/push"
import (
"context"
"sync"
"time"
"go.opentelemetry.io/otel/api/global"
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/api/metric/registry"
export "go.opentelemetry.io/otel/sdk/export/metric"
sdk "go.opentelemetry.io/otel/sdk/metric"
controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time"
"go.opentelemetry.io/otel/sdk/metric/integrator/simple"
)
// DefaultPushPeriod is the default time interval between pushes.
const DefaultPushPeriod = 10 * time.Second
// Controller organizes a periodic push of metric data.
type Controller struct {
lock sync.Mutex
accumulator *sdk.Accumulator
provider *registry.Provider
integrator *simple.Integrator
exporter export.Exporter
wg sync.WaitGroup
ch chan struct{}
period time.Duration
timeout time.Duration
clock controllerTime.Clock
ticker controllerTime.Ticker
}
// New constructs a Controller, an implementation of metric.Provider,
// using the provided exporter and options to configure an SDK with
// periodic collection.
func New(selector export.AggregationSelector, exporter export.Exporter, opts ...Option) *Controller {
c := &Config{
Period: DefaultPushPeriod,
}
for _, opt := range opts {
opt.Apply(c)
}
if c.Timeout == 0 {
c.Timeout = c.Period
}
integrator := simple.New(selector, c.Stateful)
impl := sdk.NewAccumulator(
integrator,
sdk.WithResource(c.Resource),
)
return &Controller{
provider: registry.NewProvider(impl),
accumulator: impl,
integrator: integrator,
exporter: exporter,
ch: make(chan struct{}),
period: c.Period,
timeout: c.Timeout,
clock: controllerTime.RealClock{},
}
}
// SetClock supports setting a mock clock for testing. This must be
// called before Start().
func (c *Controller) SetClock(clock controllerTime.Clock) {
c.lock.Lock()
defer c.lock.Unlock()
c.clock = clock
}
// Provider returns a metric.Provider instance for this controller.
func (c *Controller) Provider() metric.Provider {
return c.provider
}
// Start begins a ticker that periodically collects and exports
// metrics with the configured interval.
func (c *Controller) Start() {
c.lock.Lock()
defer c.lock.Unlock()
if c.ticker != nil {
return
}
c.ticker = c.clock.Ticker(c.period)
c.wg.Add(1)
go c.run(c.ch)
}
// Stop waits for the background goroutine to return and then collects
// and exports metrics one last time before returning.
func (c *Controller) Stop() {
c.lock.Lock()
defer c.lock.Unlock()
if c.ch == nil {
return
}
close(c.ch)
c.ch = nil
c.wg.Wait()
c.ticker.Stop()
c.tick()
}
func (c *Controller) run(ch chan struct{}) {
for {
select {
case <-ch:
c.wg.Done()
return
case <-c.ticker.C():
c.tick()
}
}
}
func (c *Controller) tick() {
ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
defer cancel()
c.integrator.Lock()
defer c.integrator.Unlock()
c.accumulator.Collect(ctx)
err := c.exporter.Export(ctx, c.integrator.CheckpointSet())
c.integrator.FinishedCollection()
if err != nil {
global.Handle(err)
}
}
| 1 | 12,659 | I think you missed removing `Stateful` from `push/config.go` | open-telemetry-opentelemetry-go | go |
@@ -29,7 +29,7 @@ namespace Nethermind.Db.Databases
{
}
- protected override void UpdateReadMetrics() => Metrics.EthRequestsDbReads++;
- protected override void UpdateWriteMetrics() => Metrics.EthRequestsDbWrites++;
+ protected internal override void UpdateReadMetrics() => Metrics.EthRequestsDbReads++;
+ protected internal override void UpdateWriteMetrics() => Metrics.EthRequestsDbWrites++;
}
} | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using Nethermind.Db.Config;
using Nethermind.Logging;
using Nethermind.Store;
namespace Nethermind.Db.Databases
{
public class EthRequestsRocksDb : DbOnTheRocks
{
public override string Name { get; } = "EthRequests";
public EthRequestsRocksDb(string basePath, IDbConfig dbConfig, ILogManager logManager = null)
: base(basePath, "ethRequests", dbConfig, logManager)
{
}
protected override void UpdateReadMetrics() => Metrics.EthRequestsDbReads++;
protected override void UpdateWriteMetrics() => Metrics.EthRequestsDbWrites++;
}
} | 1 | 23,304 | why would you make it internal? if so then at least protected internal | NethermindEth-nethermind | .cs |
@@ -502,6 +502,18 @@ class TestHttps2Http(tservers.ReverseProxTest):
class TestTransparent(tservers.TransparentProxTest, CommonMixin, TcpMixin):
ssl = False
+ def test_tcp_stream_modify(self):
+ self.master.load_script(
+ tutils.test_data.path("scripts/tcp_stream_modify.py"))
+
+ self._tcpproxy_on()
+ d = self.pathod('200:b"foo"')
+ self._tcpproxy_off()
+
+ assert d.content == "bar"
+
+ self.master.unload_scripts()
+
class TestTransparentSSL(tservers.TransparentProxTest, CommonMixin, TcpMixin):
ssl = True | 1 | import os
import socket
import time
from OpenSSL import SSL
from netlib.exceptions import HttpReadDisconnect, HttpException
from netlib.tcp import Address
import netlib.tutils
from netlib import tcp, http, socks
from netlib.certutils import SSLCert
from netlib.http import authentication, CONTENT_MISSING, http1
from netlib.tutils import raises
from libpathod import pathoc, pathod
from libmproxy.proxy.config import HostMatcher
from libmproxy.protocol import Kill
from libmproxy.models import Error, HTTPResponse
import tutils
import tservers
"""
Note that the choice of response code in these tests matters more than you
might think. libcurl treats a 304 response code differently from, say, a
200 response code - it will correctly terminate a 304 response with no
content-length header, whereas it will block forever waiting for content
for a 200 response.
"""
class CommonMixin:
def test_large(self):
assert len(self.pathod("200:b@50k").content) == 1024 * 50
@staticmethod
def wait_until_not_live(flow):
"""
Race condition: We don't want to replay the flow while it is still live.
"""
s = time.time()
while flow.live:
time.sleep(0.001)
if time.time() - s > 5:
raise RuntimeError("Flow is live for too long.")
def test_replay(self):
assert self.pathod("304").status_code == 304
if isinstance(self, tservers.HTTPUpstreamProxTest) and self.ssl:
assert len(self.master.state.view) == 2
else:
assert len(self.master.state.view) == 1
l = self.master.state.view[-1]
assert l.response.status_code == 304
l.request.path = "/p/305"
self.wait_until_not_live(l)
rt = self.master.replay_request(l, block=True)
assert l.response.status_code == 305
# Disconnect error
l.request.path = "/p/305:d0"
rt = self.master.replay_request(l, block=True)
assert not rt
if isinstance(self, tservers.HTTPUpstreamProxTest):
assert l.response.status_code == 502
else:
assert l.error
# Port error
l.request.port = 1
# In upstream mode, we get a 502 response from the upstream proxy server.
# In upstream mode with ssl, the replay will fail as we cannot establish
# SSL with the upstream proxy.
rt = self.master.replay_request(l, block=True)
assert not rt
if isinstance(self, tservers.HTTPUpstreamProxTest):
assert l.response.status_code == 502
else:
assert l.error
def test_http(self):
f = self.pathod("304")
assert f.status_code == 304
# In Upstream mode with SSL, we may already have a previous CONNECT
# request.
l = self.master.state.view[-1]
assert l.client_conn.address
assert "host" in l.request.headers
assert l.response.status_code == 304
def test_invalid_http(self):
t = tcp.TCPClient(("127.0.0.1", self.proxy.port))
t.connect()
t.wfile.write("invalid\r\n\r\n")
t.wfile.flush()
line = t.rfile.readline()
assert ("Bad Request" in line) or ("Bad Gateway" in line)
def test_sni(self):
if not self.ssl:
return
f = self.pathod("304", sni="testserver.com")
assert f.status_code == 304
log = self.server.last_log()
assert log["request"]["sni"] == "testserver.com"
class TcpMixin:
def _ignore_on(self):
assert not hasattr(self, "_ignore_backup")
self._ignore_backup = self.config.check_ignore
self.config.check_ignore = HostMatcher(
[".+:%s" % self.server.port] + self.config.check_ignore.patterns)
def _ignore_off(self):
assert hasattr(self, "_ignore_backup")
self.config.check_ignore = self._ignore_backup
del self._ignore_backup
def test_ignore(self):
n = self.pathod("304")
self._ignore_on()
i = self.pathod("305")
i2 = self.pathod("306")
self._ignore_off()
self.master.masterq.join()
assert n.status_code == 304
assert i.status_code == 305
assert i2.status_code == 306
assert any(f.response.status_code == 304 for f in self.master.state.flows)
assert not any(f.response.status_code == 305 for f in self.master.state.flows)
assert not any(f.response.status_code == 306 for f in self.master.state.flows)
# Test that we get the original SSL cert
if self.ssl:
i_cert = SSLCert(i.sslinfo.certchain[0])
i2_cert = SSLCert(i2.sslinfo.certchain[0])
n_cert = SSLCert(n.sslinfo.certchain[0])
assert i_cert == i2_cert
assert i_cert != n_cert
# Test Non-HTTP traffic
spec = "200:i0,@100:d0" # this results in just 100 random bytes
# mitmproxy responds with bad gateway
assert self.pathod(spec).status_code == 502
self._ignore_on()
with raises(HttpException):
self.pathod(spec) # pathoc tries to parse answer as HTTP
self._ignore_off()
def _tcpproxy_on(self):
assert not hasattr(self, "_tcpproxy_backup")
self._tcpproxy_backup = self.config.check_tcp
self.config.check_tcp = HostMatcher(
[".+:%s" % self.server.port] + self.config.check_tcp.patterns)
def _tcpproxy_off(self):
assert hasattr(self, "_tcpproxy_backup")
self.config.check_tcp = self._tcpproxy_backup
del self._tcpproxy_backup
def test_tcp(self):
n = self.pathod("304")
self._tcpproxy_on()
i = self.pathod("305")
i2 = self.pathod("306")
self._tcpproxy_off()
self.master.masterq.join()
assert n.status_code == 304
assert i.status_code == 305
assert i2.status_code == 306
assert any(f.response.status_code == 304 for f in self.master.state.flows)
assert not any(f.response.status_code == 305 for f in self.master.state.flows)
assert not any(f.response.status_code == 306 for f in self.master.state.flows)
# Test that we get the original SSL cert
if self.ssl:
i_cert = SSLCert(i.sslinfo.certchain[0])
i2_cert = SSLCert(i2.sslinfo.certchain[0])
n_cert = SSLCert(n.sslinfo.certchain[0])
assert i_cert == i2_cert == n_cert
# Make sure that TCP messages are in the event log.
assert any("305" in m for m in self.master.log)
assert any("306" in m for m in self.master.log)
class AppMixin:
def test_app(self):
ret = self.app("/")
assert ret.status_code == 200
assert "mitmproxy" in ret.content
class TestHTTP(tservers.HTTPProxTest, CommonMixin, AppMixin):
def test_app_err(self):
p = self.pathoc()
ret = p.request("get:'http://errapp/'")
assert ret.status_code == 500
assert "ValueError" in ret.content
def test_invalid_connect(self):
t = tcp.TCPClient(("127.0.0.1", self.proxy.port))
t.connect()
t.wfile.write("CONNECT invalid\n\n")
t.wfile.flush()
assert "Bad Request" in t.rfile.readline()
def test_upstream_ssl_error(self):
p = self.pathoc()
ret = p.request("get:'https://localhost:%s/'" % self.server.port)
assert ret.status_code == 400
def test_connection_close(self):
# Add a body, so we have a content-length header, which combined with
# HTTP1.1 means the connection is kept alive.
response = '%s/p/200:b@1' % self.server.urlbase
# Lets sanity check that the connection does indeed stay open by
# issuing two requests over the same connection
p = self.pathoc()
assert p.request("get:'%s'" % response)
assert p.request("get:'%s'" % response)
# Now check that the connection is closed as the client specifies
p = self.pathoc()
assert p.request("get:'%s':h'Connection'='close'" % response)
# There's a race here, which means we can get any of a number of errors.
# Rather than introduce yet another sleep into the test suite, we just
# relax the Exception specification.
with raises(Exception):
p.request("get:'%s'" % response)
def test_reconnect(self):
req = "get:'%s/p/200:b@1:da'" % self.server.urlbase
p = self.pathoc()
assert p.request(req)
# Server has disconnected. Mitmproxy should detect this, and reconnect.
assert p.request(req)
assert p.request(req)
def test_get_connection_switching(self):
def switched(l):
for i in l:
if "serverdisconnect" in i:
return True
req = "get:'%s/p/200:b@1'"
p = self.pathoc()
assert p.request(req % self.server.urlbase)
assert p.request(req % self.server2.urlbase)
assert switched(self.proxy.log)
def test_blank_leading_line(self):
p = self.pathoc()
req = "get:'%s/p/201':i0,'\r\n'"
assert p.request(req % self.server.urlbase).status_code == 201
def test_invalid_headers(self):
p = self.pathoc()
resp = p.request("get:'http://foo':h':foo'='bar'")
assert resp.status_code == 400
def test_stream(self):
self.master.set_stream_large_bodies(1024 * 2)
self.pathod("200:b@1k")
assert not self.master.state.view[-1].response.stream
assert len(self.master.state.view[-1].response.content) == 1024 * 1
self.pathod("200:b@3k")
assert self.master.state.view[-1].response.stream
assert self.master.state.view[-1].response.content == CONTENT_MISSING
self.master.set_stream_large_bodies(None)
def test_stream_modify(self):
self.master.load_script(
tutils.test_data.path("scripts/stream_modify.py"))
d = self.pathod('200:b"foo"')
assert d.content == "bar"
self.master.unload_scripts()
class TestHTTPAuth(tservers.HTTPProxTest):
authenticator = http.authentication.BasicProxyAuth(
http.authentication.PassManSingleUser(
"test",
"test"),
"realm")
def test_auth(self):
assert self.pathod("202").status_code == 407
p = self.pathoc()
ret = p.request("""
get
'http://localhost:%s/p/202'
h'%s'='%s'
""" % (
self.server.port,
http.authentication.BasicProxyAuth.AUTH_HEADER,
authentication.assemble_http_basic_auth("basic", "test", "test")
))
assert ret.status_code == 202
class TestHTTPS(tservers.HTTPProxTest, CommonMixin, TcpMixin):
ssl = True
ssloptions = pathod.SSLOptions(request_client_cert=True)
def test_clientcert_file(self):
try:
self.config.clientcerts = os.path.join(
tutils.test_data.path("data/clientcert"), "client.pem")
f = self.pathod("304")
assert f.status_code == 304
assert self.server.last_log()["request"]["clientcert"]["keyinfo"]
finally:
self.config.clientcerts = None
def test_clientcert_dir(self):
try:
self.config.clientcerts = tutils.test_data.path("data/clientcert")
f = self.pathod("304")
assert f.status_code == 304
assert self.server.last_log()["request"]["clientcert"]["keyinfo"]
finally:
self.config.clientcerts = None
def test_error_post_connect(self):
p = self.pathoc()
assert p.request("get:/:i0,'invalid\r\n\r\n'").status_code == 400
class TestHTTPSCertfile(tservers.HTTPProxTest, CommonMixin):
ssl = True
certfile = True
def test_certfile(self):
assert self.pathod("304")
class TestHTTPSUpstreamServerVerificationWTrustedCert(tservers.HTTPProxTest):
"""
Test upstream server certificate verification with a trusted server cert.
"""
ssl = True
ssloptions = pathod.SSLOptions(
cn="trusted-cert",
certs=[
("trusted-cert", tutils.test_data.path("data/trusted-server.crt"))
])
def test_verification_w_cadir(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_PEER
self.config.openssl_trusted_cadir_server = tutils.test_data.path(
"data/trusted-cadir/")
self.pathoc()
def test_verification_w_pemfile(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_PEER
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
self.pathoc()
class TestHTTPSUpstreamServerVerificationWBadCert(tservers.HTTPProxTest):
"""
Test upstream server certificate verification with an untrusted server cert.
"""
ssl = True
ssloptions = pathod.SSLOptions(
cn="untrusted-cert",
certs=[
("untrusted-cert", tutils.test_data.path("data/untrusted-server.crt"))
])
def _request(self):
p = self.pathoc()
# We need to make an actual request because the upstream connection is lazy-loaded.
return p.request("get:/p/242")
def test_default_verification_w_bad_cert(self):
"""Should use no verification."""
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
assert self._request().status_code == 242
def test_no_verification_w_bad_cert(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_NONE
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
assert self._request().status_code == 242
def test_verification_w_bad_cert(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_PEER
self.config.openssl_trusted_ca_server = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
assert self._request().status_code == 502
class TestHTTPSNoCommonName(tservers.HTTPProxTest):
"""
Test what happens if we get a cert without common name back.
"""
ssl = True
ssloptions = pathod.SSLOptions(
certs=[
("*", tutils.test_data.path("data/no_common_name.pem"))
]
)
def test_http(self):
f = self.pathod("202")
assert f.sslinfo.certchain[0].get_subject().CN == "127.0.0.1"
class TestReverse(tservers.ReverseProxTest, CommonMixin, TcpMixin):
reverse = True
class TestSocks5(tservers.SocksModeTest):
def test_simple(self):
p = self.pathoc()
p.socks_connect(("localhost", self.server.port))
f = p.request("get:/p/200")
assert f.status_code == 200
def test_with_authentication_only(self):
p = self.pathoc()
f = p.request("get:/p/200")
assert f.status_code == 502
assert "SOCKS5 mode failure" in f.content
def test_no_connect(self):
"""
mitmproxy doesn't support UDP or BIND SOCKS CMDs
"""
p = self.pathoc()
socks.ClientGreeting(
socks.VERSION.SOCKS5,
[socks.METHOD.NO_AUTHENTICATION_REQUIRED]
).to_file(p.wfile)
socks.Message(
socks.VERSION.SOCKS5,
socks.CMD.BIND,
socks.ATYP.DOMAINNAME,
("example.com", 8080)
).to_file(p.wfile)
p.wfile.flush()
p.rfile.read(2) # read server greeting
f = p.request("get:/p/200") # the request doesn't matter, error response from handshake will be read anyway.
assert f.status_code == 502
assert "SOCKS5 mode failure" in f.content
class TestHttps2Http(tservers.ReverseProxTest):
@classmethod
def get_proxy_config(cls):
d = super(TestHttps2Http, cls).get_proxy_config()
d["upstream_server"] = ("http", d["upstream_server"][1])
return d
def pathoc(self, ssl, sni=None):
"""
Returns a connected Pathoc instance.
"""
p = pathoc.Pathoc(
("localhost", self.proxy.port), ssl=True, sni=sni, fp=None
)
p.connect()
return p
def test_all(self):
p = self.pathoc(ssl=True)
assert p.request("get:'/p/200'").status_code == 200
def test_sni(self):
p = self.pathoc(ssl=True, sni="example.com")
assert p.request("get:'/p/200'").status_code == 200
assert all("Error in handle_sni" not in msg for msg in self.proxy.log)
def test_http(self):
p = self.pathoc(ssl=False)
assert p.request("get:'/p/200'").status_code == 200
class TestTransparent(tservers.TransparentProxTest, CommonMixin, TcpMixin):
ssl = False
class TestTransparentSSL(tservers.TransparentProxTest, CommonMixin, TcpMixin):
ssl = True
def test_sslerr(self):
p = pathoc.Pathoc(("localhost", self.proxy.port), fp=None)
p.connect()
r = p.request("get:/")
assert r.status_code == 502
class TestProxy(tservers.HTTPProxTest):
def test_http(self):
f = self.pathod("304")
assert f.status_code == 304
f = self.master.state.view[0]
assert f.client_conn.address
assert "host" in f.request.headers
assert f.response.status_code == 304
@tutils.skip_appveyor
def test_response_timestamps(self):
# test that we notice at least 1 sec delay between timestamps
# in response object
f = self.pathod("304:b@1k:p50,1")
assert f.status_code == 304
response = self.master.state.view[0].response
# timestamp_start might fire a bit late, so we play safe and only require 300ms.
assert 0.3 <= response.timestamp_end - response.timestamp_start
@tutils.skip_appveyor
def test_request_timestamps(self):
# test that we notice a delay between timestamps in request object
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("127.0.0.1", self.proxy.port))
# call pathod server, wait a second to complete the request
connection.send(
"GET http://localhost:%d/p/304:b@1k HTTP/1.1\r\n" %
self.server.port)
time.sleep(1)
connection.send("\r\n")
connection.recv(50000)
connection.close()
request, response = self.master.state.view[
0].request, self.master.state.view[0].response
assert response.status_code == 304 # sanity test for our low level request
# timestamp_start might fire a bit late, so we play safe and only require 300ms.
assert 0.3 <= request.timestamp_end - request.timestamp_start
def test_request_tcp_setup_timestamp_presence(self):
# tests that the client_conn a tcp connection has a tcp_setup_timestamp
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("localhost", self.proxy.port))
connection.send(
"GET http://localhost:%d/p/200:b@1k HTTP/1.1\r\n" %
self.server.port)
connection.send("\r\n")
# a bit hacky: make sure that we don't just read the headers only.
recvd = 0
while recvd < 1024:
recvd += len(connection.recv(5000))
connection.send(
"GET http://localhost:%d/p/200:b@1k HTTP/1.1\r\n" %
self.server.port)
connection.send("\r\n")
recvd = 0
while recvd < 1024:
recvd += len(connection.recv(5000))
connection.close()
first_flow = self.master.state.view[0]
second_flow = self.master.state.view[1]
assert first_flow.server_conn.timestamp_tcp_setup
assert first_flow.server_conn.timestamp_ssl_setup is None
assert second_flow.server_conn.timestamp_tcp_setup
assert first_flow.server_conn.timestamp_tcp_setup == second_flow.server_conn.timestamp_tcp_setup
def test_request_ip(self):
f = self.pathod("200:b@100")
assert f.status_code == 200
f = self.master.state.view[0]
assert f.server_conn.address == ("127.0.0.1", self.server.port)
class TestProxySSL(tservers.HTTPProxTest):
ssl = True
def test_request_ssl_setup_timestamp_presence(self):
# tests that the ssl timestamp is present when ssl is used
f = self.pathod("304:b@10k")
assert f.status_code == 304
first_flow = self.master.state.view[0]
assert first_flow.server_conn.timestamp_ssl_setup
class MasterRedirectRequest(tservers.TestMaster):
redirect_port = None # Set by TestRedirectRequest
def handle_request(self, f):
if f.request.path == "/p/201":
# This part should have no impact, but it should also not cause any exceptions.
addr = f.live.server_conn.address
addr2 = Address(("127.0.0.1", self.redirect_port))
f.live.set_server(addr2)
f.live.set_server(addr)
# This is the actual redirection.
f.request.port = self.redirect_port
super(MasterRedirectRequest, self).handle_request(f)
def handle_response(self, f):
f.response.content = str(f.client_conn.address.port)
f.response.headers["server-conn-id"] = str(f.server_conn.source_address.port)
super(MasterRedirectRequest, self).handle_response(f)
class TestRedirectRequest(tservers.HTTPProxTest):
masterclass = MasterRedirectRequest
ssl = True
def test_redirect(self):
"""
Imagine a single HTTPS connection with three requests:
1. First request should pass through unmodified
2. Second request will be redirected to a different host by an inline script
3. Third request should pass through unmodified
This test verifies that the original destination is restored for the third request.
"""
self.master.redirect_port = self.server2.port
p = self.pathoc()
self.server.clear_log()
self.server2.clear_log()
r1 = p.request("get:'/p/200'")
assert r1.status_code == 200
assert self.server.last_log()
assert not self.server2.last_log()
self.server.clear_log()
self.server2.clear_log()
r2 = p.request("get:'/p/201'")
assert r2.status_code == 201
assert not self.server.last_log()
assert self.server2.last_log()
self.server.clear_log()
self.server2.clear_log()
r3 = p.request("get:'/p/202'")
assert r3.status_code == 202
assert self.server.last_log()
assert not self.server2.last_log()
assert r1.content == r2.content == r3.content
class MasterStreamRequest(tservers.TestMaster):
"""
Enables the stream flag on the flow for all requests
"""
def handle_responseheaders(self, f):
f.response.stream = True
f.reply()
class TestStreamRequest(tservers.HTTPProxTest):
masterclass = MasterStreamRequest
def test_stream_simple(self):
p = self.pathoc()
# a request with 100k of data but without content-length
r1 = p.request("get:'%s/p/200:r:b@100k:d102400'" % self.server.urlbase)
assert r1.status_code == 200
assert len(r1.content) > 100000
def test_stream_multiple(self):
p = self.pathoc()
# simple request with streaming turned on
r1 = p.request("get:'%s/p/200'" % self.server.urlbase)
assert r1.status_code == 200
# now send back 100k of data, streamed but not chunked
r1 = p.request("get:'%s/p/201:b@100k'" % self.server.urlbase)
assert r1.status_code == 201
def test_stream_chunked(self):
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("127.0.0.1", self.proxy.port))
fconn = connection.makefile()
spec = '200:h"Transfer-Encoding"="chunked":r:b"4\\r\\nthis\\r\\n11\\r\\nisatest__reachhex\\r\\n0\\r\\n\\r\\n"'
connection.send(
"GET %s/p/%s HTTP/1.1\r\n" %
(self.server.urlbase, spec))
connection.send("\r\n")
resp = http1.read_response_head(fconn)
assert resp.headers["Transfer-Encoding"] == 'chunked'
assert resp.status_code == 200
chunks = list(http1.read_body(fconn, None))
assert chunks == ["this", "isatest__reachhex"]
connection.close()
class MasterFakeResponse(tservers.TestMaster):
def handle_request(self, f):
resp = HTTPResponse.wrap(netlib.tutils.tresp())
f.reply(resp)
class TestFakeResponse(tservers.HTTPProxTest):
masterclass = MasterFakeResponse
def test_fake(self):
f = self.pathod("200")
assert "header-response" in f.headers
class TestServerConnect(tservers.HTTPProxTest):
masterclass = MasterFakeResponse
no_upstream_cert = True
ssl = True
def test_unnecessary_serverconnect(self):
"""A replayed/fake response with no_upstream_cert should not connect to an upstream server"""
assert self.pathod("200").status_code == 200
for msg in self.proxy.tmaster.log:
assert "serverconnect" not in msg
class MasterKillRequest(tservers.TestMaster):
def handle_request(self, f):
f.reply(Kill)
class TestKillRequest(tservers.HTTPProxTest):
masterclass = MasterKillRequest
def test_kill(self):
with raises(HttpReadDisconnect):
self.pathod("200")
# Nothing should have hit the server
assert not self.server.last_log()
class MasterKillResponse(tservers.TestMaster):
def handle_response(self, f):
f.reply(Kill)
class TestKillResponse(tservers.HTTPProxTest):
masterclass = MasterKillResponse
def test_kill(self):
with raises(HttpReadDisconnect):
self.pathod("200")
# The server should have seen a request
assert self.server.last_log()
class EResolver(tservers.TResolver):
def original_addr(self, sock):
raise RuntimeError("Could not resolve original destination.")
class TestTransparentResolveError(tservers.TransparentProxTest):
resolver = EResolver
def test_resolve_error(self):
assert self.pathod("304").status_code == 502
class MasterIncomplete(tservers.TestMaster):
def handle_request(self, f):
resp = HTTPResponse.wrap(netlib.tutils.tresp())
resp.content = CONTENT_MISSING
f.reply(resp)
class TestIncompleteResponse(tservers.HTTPProxTest):
masterclass = MasterIncomplete
def test_incomplete(self):
assert self.pathod("200").status_code == 502
class TestUpstreamProxy(tservers.HTTPUpstreamProxTest, CommonMixin, AppMixin):
ssl = False
def test_order(self):
self.proxy.tmaster.replacehooks.add(
"~q",
"foo",
"bar") # replace in request
self.chain[0].tmaster.replacehooks.add("~q", "bar", "baz")
self.chain[1].tmaster.replacehooks.add("~q", "foo", "oh noes!")
self.chain[0].tmaster.replacehooks.add(
"~s",
"baz",
"ORLY") # replace in response
p = self.pathoc()
req = p.request("get:'%s/p/418:b\"foo\"'" % self.server.urlbase)
assert req.content == "ORLY"
assert req.status_code == 418
class TestUpstreamProxySSL(
tservers.HTTPUpstreamProxTest,
CommonMixin,
TcpMixin):
ssl = True
def _host_pattern_on(self, attr):
"""
Updates config.check_tcp or check_ignore, depending on attr.
"""
assert not hasattr(self, "_ignore_%s_backup" % attr)
backup = []
for proxy in self.chain:
old_matcher = getattr(
proxy.tmaster.server.config,
"check_%s" %
attr)
backup.append(old_matcher)
setattr(
proxy.tmaster.server.config,
"check_%s" % attr,
HostMatcher([".+:%s" % self.server.port] + old_matcher.patterns)
)
setattr(self, "_ignore_%s_backup" % attr, backup)
def _host_pattern_off(self, attr):
backup = getattr(self, "_ignore_%s_backup" % attr)
for proxy in reversed(self.chain):
setattr(
proxy.tmaster.server.config,
"check_%s" % attr,
backup.pop()
)
assert not backup
delattr(self, "_ignore_%s_backup" % attr)
def _ignore_on(self):
super(TestUpstreamProxySSL, self)._ignore_on()
self._host_pattern_on("ignore")
def _ignore_off(self):
super(TestUpstreamProxySSL, self)._ignore_off()
self._host_pattern_off("ignore")
def _tcpproxy_on(self):
super(TestUpstreamProxySSL, self)._tcpproxy_on()
self._host_pattern_on("tcp")
def _tcpproxy_off(self):
super(TestUpstreamProxySSL, self)._tcpproxy_off()
self._host_pattern_off("tcp")
def test_simple(self):
p = self.pathoc()
req = p.request("get:'/p/418:b\"content\"'")
assert req.content == "content"
assert req.status_code == 418
# CONNECT from pathoc to chain[0],
assert self.proxy.tmaster.state.flow_count() == 2
# request from pathoc to chain[0]
# CONNECT from proxy to chain[1],
assert self.chain[0].tmaster.state.flow_count() == 2
# request from proxy to chain[1]
# request from chain[0] (regular proxy doesn't store CONNECTs)
assert self.chain[1].tmaster.state.flow_count() == 1
class TestProxyChainingSSLReconnect(tservers.HTTPUpstreamProxTest):
ssl = True
def test_reconnect(self):
"""
Tests proper functionality of ConnectionHandler.server_reconnect mock.
If we have a disconnect on a secure connection that's transparently proxified to
an upstream http proxy, we need to send the CONNECT request again.
"""
def kill_requests(master, attr, exclude):
k = [0] # variable scope workaround: put into array
_func = getattr(master, attr)
def handler(f):
k[0] += 1
if not (k[0] in exclude):
f.client_conn.finish()
f.error = Error("terminated")
f.reply(Kill)
return _func(f)
setattr(master, attr, handler)
kill_requests(self.chain[1].tmaster, "handle_request",
exclude=[
# fail first request
2, # allow second request
])
kill_requests(self.chain[0].tmaster, "handle_request",
exclude=[
1, # CONNECT
# fail first request
3, # reCONNECT
4, # request
])
p = self.pathoc()
req = p.request("get:'/p/418:b\"content\"'")
assert req.content == "content"
assert req.status_code == 418
assert self.proxy.tmaster.state.flow_count() == 2 # CONNECT and request
# CONNECT, failing request,
assert self.chain[0].tmaster.state.flow_count() == 4
# reCONNECT, request
# failing request, request
assert self.chain[1].tmaster.state.flow_count() == 2
# (doesn't store (repeated) CONNECTs from chain[0]
# as it is a regular proxy)
assert not self.chain[1].tmaster.state.flows[0].response # killed
assert self.chain[1].tmaster.state.flows[1].response
assert self.proxy.tmaster.state.flows[0].request.form_in == "authority"
assert self.proxy.tmaster.state.flows[1].request.form_in == "relative"
assert self.chain[0].tmaster.state.flows[
0].request.form_in == "authority"
assert self.chain[0].tmaster.state.flows[
1].request.form_in == "relative"
assert self.chain[0].tmaster.state.flows[
2].request.form_in == "authority"
assert self.chain[0].tmaster.state.flows[
3].request.form_in == "relative"
assert self.chain[1].tmaster.state.flows[
0].request.form_in == "relative"
assert self.chain[1].tmaster.state.flows[
1].request.form_in == "relative"
req = p.request("get:'/p/418:b\"content2\"'")
assert req.status_code == 502
assert self.proxy.tmaster.state.flow_count() == 3 # + new request
# + new request, repeated CONNECT from chain[1]
assert self.chain[0].tmaster.state.flow_count() == 6
# (both terminated)
# nothing happened here
assert self.chain[1].tmaster.state.flow_count() == 2
| 1 | 10,941 | We should check if the response (`d`) contains bar as response, screw the log. :smile: | mitmproxy-mitmproxy | py |
@@ -538,7 +538,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
}
// data transfer time
std::chrono::duration<double, std::milli> end_time = std::chrono::steady_clock::now() - start_time;
- Log::Info("%d dense feature groups (%.2f MB) transfered to GPU in %f secs. %d sparse feature groups",
+ Log::Info("%d dense feature groups (%.2f MB) transferred to GPU in %f secs. %d sparse feature groups",
dense_feature_group_map_.size(), ((dense_feature_group_map_.size() + (dword_features_ - 1)) / dword_features_) * num_data_ * sizeof(Feature4) / (1024.0 * 1024.0),
end_time * 1e-3, sparse_feature_group_map_.size());
#if GPU_DEBUG >= 1 | 1 | #ifdef USE_GPU
#include "gpu_tree_learner.h"
#include "../io/dense_bin.hpp"
#include "../io/dense_nbits_bin.hpp"
#include <LightGBM/utils/array_args.h>
#include <LightGBM/network.h>
#include <LightGBM/bin.h>
#include <algorithm>
#include <vector>
#define GPU_DEBUG 0
namespace LightGBM {
GPUTreeLearner::GPUTreeLearner(const Config* config)
:SerialTreeLearner(config) {
use_bagging_ = false;
Log::Info("This is the GPU trainer!!");
}
GPUTreeLearner::~GPUTreeLearner() {
if (ptr_pinned_gradients_) {
queue_.enqueue_unmap_buffer(pinned_gradients_, ptr_pinned_gradients_);
}
if (ptr_pinned_hessians_) {
queue_.enqueue_unmap_buffer(pinned_hessians_, ptr_pinned_hessians_);
}
if (ptr_pinned_feature_masks_) {
queue_.enqueue_unmap_buffer(pinned_feature_masks_, ptr_pinned_feature_masks_);
}
}
void GPUTreeLearner::Init(const Dataset* train_data, bool is_constant_hessian) {
// initialize SerialTreeLearner
SerialTreeLearner::Init(train_data, is_constant_hessian);
// some additional variables needed for GPU trainer
num_feature_groups_ = train_data_->num_feature_groups();
// Initialize GPU buffers and kernels
InitGPU(config_->gpu_platform_id, config_->gpu_device_id);
}
// some functions used for debugging the GPU histogram construction
#if GPU_DEBUG > 0
void PrintHistograms(HistogramBinEntry* h, size_t size) {
size_t total = 0;
for (size_t i = 0; i < size; ++i) {
printf("%03lu=%9.3g,%9.3g,%7d\t", i, h[i].sum_gradients, h[i].sum_hessians, h[i].cnt);
total += h[i].cnt;
if ((i & 3) == 3)
printf("\n");
}
printf("\nTotal examples: %lu\n", total);
}
union Float_t
{
int64_t i;
double f;
static int64_t ulp_diff(Float_t a, Float_t b) {
return abs(a.i - b.i);
}
};
void CompareHistograms(HistogramBinEntry* h1, HistogramBinEntry* h2, size_t size, int feature_id) {
size_t i;
Float_t a, b;
for (i = 0; i < size; ++i) {
a.f = h1[i].sum_gradients;
b.f = h2[i].sum_gradients;
int32_t ulps = Float_t::ulp_diff(a, b);
if (fabs(h1[i].cnt - h2[i].cnt != 0)) {
printf("%d != %d\n", h1[i].cnt, h2[i].cnt);
goto err;
}
if (ulps > 0) {
// printf("grad %g != %g (%d ULPs)\n", h1[i].sum_gradients, h2[i].sum_gradients, ulps);
// goto err;
}
a.f = h1[i].sum_hessians;
b.f = h2[i].sum_hessians;
ulps = Float_t::ulp_diff(a, b);
if (ulps > 0) {
// printf("hessian %g != %g (%d ULPs)\n", h1[i].sum_hessians, h2[i].sum_hessians, ulps);
// goto err;
}
}
return;
err:
Log::Warning("Mismatched histograms found for feature %d at location %lu.", feature_id, i);
std::cin.get();
PrintHistograms(h1, size);
printf("\n");
PrintHistograms(h2, size);
std::cin.get();
}
#endif
int GPUTreeLearner::GetNumWorkgroupsPerFeature(data_size_t leaf_num_data) {
// we roughly want 256 workgroups per device, and we have num_dense_feature4_ feature tuples.
// also guarantee that there are at least 2K examples per workgroup
double x = 256.0 / num_dense_feature4_;
int exp_workgroups_per_feature = (int)ceil(log2(x));
double t = leaf_num_data / 1024.0;
#if GPU_DEBUG >= 4
printf("Computing histogram for %d examples and (%d * %d) feature groups\n", leaf_num_data, dword_features_, num_dense_feature4_);
printf("We can have at most %d workgroups per feature4 for efficiency reasons.\n"
"Best workgroup size per feature for full utilization is %d\n", (int)ceil(t), (1 << exp_workgroups_per_feature));
#endif
exp_workgroups_per_feature = std::min(exp_workgroups_per_feature, (int)ceil(log((double)t)/log(2.0)));
if (exp_workgroups_per_feature < 0)
exp_workgroups_per_feature = 0;
if (exp_workgroups_per_feature > kMaxLogWorkgroupsPerFeature)
exp_workgroups_per_feature = kMaxLogWorkgroupsPerFeature;
// return 0;
return exp_workgroups_per_feature;
}
void GPUTreeLearner::GPUHistogram(data_size_t leaf_num_data, bool use_all_features) {
// we have already copied ordered gradients, ordered hessians and indices to GPU
// decide the best number of workgroups working on one feature4 tuple
// set work group size based on feature size
// each 2^exp_workgroups_per_feature workgroups work on a feature4 tuple
int exp_workgroups_per_feature = GetNumWorkgroupsPerFeature(leaf_num_data);
int num_workgroups = (1 << exp_workgroups_per_feature) * num_dense_feature4_;
if (num_workgroups > preallocd_max_num_wg_) {
preallocd_max_num_wg_ = num_workgroups;
Log::Info("Increasing preallocd_max_num_wg_ to %d for launching more workgroups", preallocd_max_num_wg_);
device_subhistograms_.reset(new boost::compute::vector<char>(
preallocd_max_num_wg_ * dword_features_ * device_bin_size_ * hist_bin_entry_sz_, ctx_));
// we need to refresh the kernel arguments after reallocating
for (int i = 0; i <= kMaxLogWorkgroupsPerFeature; ++i) {
// The only argument that needs to be changed later is num_data_
histogram_kernels_[i].set_arg(7, *device_subhistograms_);
histogram_allfeats_kernels_[i].set_arg(7, *device_subhistograms_);
histogram_fulldata_kernels_[i].set_arg(7, *device_subhistograms_);
}
}
#if GPU_DEBUG >= 4
printf("Setting exp_workgroups_per_feature to %d, using %u work groups\n", exp_workgroups_per_feature, num_workgroups);
printf("Constructing histogram with %d examples\n", leaf_num_data);
#endif
// the GPU kernel will process all features in one call, and each
// 2^exp_workgroups_per_feature (compile time constant) workgroup will
// process one feature4 tuple
if (use_all_features) {
histogram_allfeats_kernels_[exp_workgroups_per_feature].set_arg(4, leaf_num_data);
}
else {
histogram_kernels_[exp_workgroups_per_feature].set_arg(4, leaf_num_data);
}
// for the root node, indices are not copied
if (leaf_num_data != num_data_) {
indices_future_.wait();
}
// for constant hessian, hessians are not copied except for the root node
if (!is_constant_hessian_) {
hessians_future_.wait();
}
gradients_future_.wait();
// there will be 2^exp_workgroups_per_feature = num_workgroups / num_dense_feature4 sub-histogram per feature4
// and we will launch num_feature workgroups for this kernel
// will launch threads for all features
// the queue should be asynchrounous, and we will can WaitAndGetHistograms() before we start processing dense feature groups
if (leaf_num_data == num_data_) {
kernel_wait_obj_ = boost::compute::wait_list(queue_.enqueue_1d_range_kernel(histogram_fulldata_kernels_[exp_workgroups_per_feature], 0, num_workgroups * 256, 256));
}
else {
if (use_all_features) {
kernel_wait_obj_ = boost::compute::wait_list(
queue_.enqueue_1d_range_kernel(histogram_allfeats_kernels_[exp_workgroups_per_feature], 0, num_workgroups * 256, 256));
}
else {
kernel_wait_obj_ = boost::compute::wait_list(
queue_.enqueue_1d_range_kernel(histogram_kernels_[exp_workgroups_per_feature], 0, num_workgroups * 256, 256));
}
}
// copy the results asynchronously. Size depends on if double precision is used
size_t output_size = num_dense_feature4_ * dword_features_ * device_bin_size_ * hist_bin_entry_sz_;
boost::compute::event histogram_wait_event;
host_histogram_outputs_ = (void*)queue_.enqueue_map_buffer_async(device_histogram_outputs_, boost::compute::command_queue::map_read,
0, output_size, histogram_wait_event, kernel_wait_obj_);
// we will wait for this object in WaitAndGetHistograms
histograms_wait_obj_ = boost::compute::wait_list(histogram_wait_event);
}
template <typename HistType>
void GPUTreeLearner::WaitAndGetHistograms(HistogramBinEntry* histograms) {
HistType* hist_outputs = (HistType*) host_histogram_outputs_;
// when the output is ready, the computation is done
histograms_wait_obj_.wait();
#pragma omp parallel for schedule(static)
for(int i = 0; i < num_dense_feature_groups_; ++i) {
if (!feature_masks_[i]) {
continue;
}
int dense_group_index = dense_feature_group_map_[i];
auto old_histogram_array = histograms + train_data_->GroupBinBoundary(dense_group_index);
int bin_size = train_data_->FeatureGroupNumBin(dense_group_index);
if (device_bin_mults_[i] == 1) {
for (int j = 0; j < bin_size; ++j) {
old_histogram_array[j].sum_gradients = hist_outputs[i * device_bin_size_+ j].sum_gradients;
old_histogram_array[j].sum_hessians = hist_outputs[i * device_bin_size_ + j].sum_hessians;
old_histogram_array[j].cnt = (data_size_t)hist_outputs[i * device_bin_size_ + j].cnt;
}
}
else {
// values of this feature has been redistributed to multiple bins; need a reduction here
int ind = 0;
for (int j = 0; j < bin_size; ++j) {
double sum_g = 0.0, sum_h = 0.0;
size_t cnt = 0;
for (int k = 0; k < device_bin_mults_[i]; ++k) {
sum_g += hist_outputs[i * device_bin_size_+ ind].sum_gradients;
sum_h += hist_outputs[i * device_bin_size_+ ind].sum_hessians;
cnt += hist_outputs[i * device_bin_size_ + ind].cnt;
ind++;
}
old_histogram_array[j].sum_gradients = sum_g;
old_histogram_array[j].sum_hessians = sum_h;
old_histogram_array[j].cnt = (data_size_t)cnt;
}
}
}
queue_.enqueue_unmap_buffer(device_histogram_outputs_, host_histogram_outputs_);
}
void GPUTreeLearner::AllocateGPUMemory() {
num_dense_feature_groups_ = 0;
for (int i = 0; i < num_feature_groups_; ++i) {
if (ordered_bins_[i] == nullptr) {
num_dense_feature_groups_++;
}
}
// how many feature-group tuples we have
num_dense_feature4_ = (num_dense_feature_groups_ + (dword_features_ - 1)) / dword_features_;
// leave some safe margin for prefetching
// 256 work-items per workgroup. Each work-item prefetches one tuple for that feature
int allocated_num_data_ = num_data_ + 256 * (1 << kMaxLogWorkgroupsPerFeature);
// clear sparse/dense maps
dense_feature_group_map_.clear();
device_bin_mults_.clear();
sparse_feature_group_map_.clear();
// do nothing if no features can be processed on GPU
if (!num_dense_feature_groups_) {
Log::Warning("GPU acceleration is disabled because no non-trivial dense features can be found");
return;
}
// allocate memory for all features (FIXME: 4 GB barrier on some devices, need to split to multiple buffers)
device_features_.reset();
device_features_ = std::unique_ptr<boost::compute::vector<Feature4>>(new boost::compute::vector<Feature4>(num_dense_feature4_ * num_data_, ctx_));
// unpin old buffer if necessary before destructing them
if (ptr_pinned_gradients_) {
queue_.enqueue_unmap_buffer(pinned_gradients_, ptr_pinned_gradients_);
}
if (ptr_pinned_hessians_) {
queue_.enqueue_unmap_buffer(pinned_hessians_, ptr_pinned_hessians_);
}
if (ptr_pinned_feature_masks_) {
queue_.enqueue_unmap_buffer(pinned_feature_masks_, ptr_pinned_feature_masks_);
}
// make ordered_gradients and hessians larger (including extra room for prefetching), and pin them
ordered_gradients_.reserve(allocated_num_data_);
ordered_hessians_.reserve(allocated_num_data_);
pinned_gradients_ = boost::compute::buffer(); // deallocate
pinned_gradients_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t),
boost::compute::memory_object::read_write | boost::compute::memory_object::use_host_ptr,
ordered_gradients_.data());
ptr_pinned_gradients_ = queue_.enqueue_map_buffer(pinned_gradients_, boost::compute::command_queue::map_write_invalidate_region,
0, allocated_num_data_ * sizeof(score_t));
pinned_hessians_ = boost::compute::buffer(); // deallocate
pinned_hessians_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t),
boost::compute::memory_object::read_write | boost::compute::memory_object::use_host_ptr,
ordered_hessians_.data());
ptr_pinned_hessians_ = queue_.enqueue_map_buffer(pinned_hessians_, boost::compute::command_queue::map_write_invalidate_region,
0, allocated_num_data_ * sizeof(score_t));
// allocate space for gradients and hessians on device
// we will copy gradients and hessians in after ordered_gradients_ and ordered_hessians_ are constructed
device_gradients_ = boost::compute::buffer(); // deallocate
device_gradients_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t),
boost::compute::memory_object::read_only, nullptr);
device_hessians_ = boost::compute::buffer(); // deallocate
device_hessians_ = boost::compute::buffer(ctx_, allocated_num_data_ * sizeof(score_t),
boost::compute::memory_object::read_only, nullptr);
// allocate feature mask, for disabling some feature-groups' histogram calculation
feature_masks_.resize(num_dense_feature4_ * dword_features_);
device_feature_masks_ = boost::compute::buffer(); // deallocate
device_feature_masks_ = boost::compute::buffer(ctx_, num_dense_feature4_ * dword_features_,
boost::compute::memory_object::read_only, nullptr);
pinned_feature_masks_ = boost::compute::buffer(ctx_, num_dense_feature4_ * dword_features_,
boost::compute::memory_object::read_write | boost::compute::memory_object::use_host_ptr,
feature_masks_.data());
ptr_pinned_feature_masks_ = queue_.enqueue_map_buffer(pinned_feature_masks_, boost::compute::command_queue::map_write_invalidate_region,
0, num_dense_feature4_ * dword_features_);
memset(ptr_pinned_feature_masks_, 0, num_dense_feature4_ * dword_features_);
// copy indices to the device
device_data_indices_.reset();
device_data_indices_ = std::unique_ptr<boost::compute::vector<data_size_t>>(new boost::compute::vector<data_size_t>(allocated_num_data_, ctx_));
boost::compute::fill(device_data_indices_->begin(), device_data_indices_->end(), 0, queue_);
// histogram bin entry size depends on the precision (single/double)
hist_bin_entry_sz_ = config_->gpu_use_dp ? sizeof(HistogramBinEntry) : sizeof(GPUHistogramBinEntry);
Log::Info("Size of histogram bin entry: %d", hist_bin_entry_sz_);
// create output buffer, each feature has a histogram with device_bin_size_ bins,
// each work group generates a sub-histogram of dword_features_ features.
if (!device_subhistograms_) {
// only initialize once here, as this will not need to change when ResetTrainingData() is called
device_subhistograms_ = std::unique_ptr<boost::compute::vector<char>>(new boost::compute::vector<char>(
preallocd_max_num_wg_ * dword_features_ * device_bin_size_ * hist_bin_entry_sz_, ctx_));
}
// create atomic counters for inter-group coordination
sync_counters_.reset();
sync_counters_ = std::unique_ptr<boost::compute::vector<int>>(new boost::compute::vector<int>(
num_dense_feature4_, ctx_));
boost::compute::fill(sync_counters_->begin(), sync_counters_->end(), 0, queue_);
// The output buffer is allocated to host directly, to overlap compute and data transfer
device_histogram_outputs_ = boost::compute::buffer(); // deallocate
device_histogram_outputs_ = boost::compute::buffer(ctx_, num_dense_feature4_ * dword_features_ * device_bin_size_ * hist_bin_entry_sz_,
boost::compute::memory_object::write_only | boost::compute::memory_object::alloc_host_ptr, nullptr);
// find the dense feature-groups and group then into Feature4 data structure (several feature-groups packed into 4 bytes)
int k = 0, copied_feature4 = 0;
std::vector<int> dense_dword_ind(dword_features_);
for (int i = 0; i < num_feature_groups_; ++i) {
// looking for dword_features_ non-sparse feature-groups
if (ordered_bins_[i] == nullptr) {
dense_dword_ind[k] = i;
// decide if we need to redistribute the bin
double t = device_bin_size_ / (double)train_data_->FeatureGroupNumBin(i);
// multiplier must be a power of 2
device_bin_mults_.push_back((int)round(pow(2, floor(log2(t)))));
// device_bin_mults_.push_back(1);
#if GPU_DEBUG >= 1
printf("feature-group %d using multiplier %d\n", i, device_bin_mults_.back());
#endif
k++;
}
else {
sparse_feature_group_map_.push_back(i);
}
// found
if (k == dword_features_) {
k = 0;
for (int j = 0; j < dword_features_; ++j) {
dense_feature_group_map_.push_back(dense_dword_ind[j]);
}
copied_feature4++;
}
}
// for data transfer time
auto start_time = std::chrono::steady_clock::now();
// Now generate new data structure feature4, and copy data to the device
int nthreads = std::min(omp_get_max_threads(), (int)dense_feature_group_map_.size() / dword_features_);
nthreads = std::max(nthreads, 1);
std::vector<Feature4*> host4_vecs(nthreads);
std::vector<boost::compute::buffer> host4_bufs(nthreads);
std::vector<Feature4*> host4_ptrs(nthreads);
// preallocate arrays for all threads, and pin them
for (int i = 0; i < nthreads; ++i) {
host4_vecs[i] = (Feature4*)boost::alignment::aligned_alloc(4096, num_data_ * sizeof(Feature4));
host4_bufs[i] = boost::compute::buffer(ctx_, num_data_ * sizeof(Feature4),
boost::compute::memory_object::read_write | boost::compute::memory_object::use_host_ptr,
host4_vecs[i]);
host4_ptrs[i] = (Feature4*)queue_.enqueue_map_buffer(host4_bufs[i], boost::compute::command_queue::map_write_invalidate_region,
0, num_data_ * sizeof(Feature4));
}
// building Feature4 bundles; each thread handles dword_features_ features
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)(dense_feature_group_map_.size() / dword_features_); ++i) {
int tid = omp_get_thread_num();
Feature4* host4 = host4_ptrs[tid];
auto dense_ind = dense_feature_group_map_.begin() + i * dword_features_;
auto dev_bin_mult = device_bin_mults_.begin() + i * dword_features_;
#if GPU_DEBUG >= 1
printf("Copying feature group ");
for (int l = 0; l < dword_features_; ++l) {
printf("%d ", dense_ind[l]);
}
printf("to devices\n");
#endif
if (dword_features_ == 8) {
// one feature datapoint is 4 bits
BinIterator* bin_iters[8];
for (int s_idx = 0; s_idx < 8; ++s_idx) {
bin_iters[s_idx] = train_data_->FeatureGroupIterator(dense_ind[s_idx]);
if (dynamic_cast<Dense4bitsBinIterator*>(bin_iters[s_idx]) == 0) {
Log::Fatal("GPU tree learner assumes that all bins are Dense4bitsBin when num_bin <= 16, but feature %d is not", dense_ind[s_idx]);
}
}
// this guarantees that the RawGet() function is inlined, rather than using virtual function dispatching
Dense4bitsBinIterator iters[8] = {
*static_cast<Dense4bitsBinIterator*>(bin_iters[0]),
*static_cast<Dense4bitsBinIterator*>(bin_iters[1]),
*static_cast<Dense4bitsBinIterator*>(bin_iters[2]),
*static_cast<Dense4bitsBinIterator*>(bin_iters[3]),
*static_cast<Dense4bitsBinIterator*>(bin_iters[4]),
*static_cast<Dense4bitsBinIterator*>(bin_iters[5]),
*static_cast<Dense4bitsBinIterator*>(bin_iters[6]),
*static_cast<Dense4bitsBinIterator*>(bin_iters[7])};
for (int j = 0; j < num_data_; ++j) {
host4[j].s[0] = (uint8_t)((iters[0].RawGet(j) * dev_bin_mult[0] + ((j+0) & (dev_bin_mult[0] - 1)))
|((iters[1].RawGet(j) * dev_bin_mult[1] + ((j+1) & (dev_bin_mult[1] - 1))) << 4));
host4[j].s[1] = (uint8_t)((iters[2].RawGet(j) * dev_bin_mult[2] + ((j+2) & (dev_bin_mult[2] - 1)))
|((iters[3].RawGet(j) * dev_bin_mult[3] + ((j+3) & (dev_bin_mult[3] - 1))) << 4));
host4[j].s[2] = (uint8_t)((iters[4].RawGet(j) * dev_bin_mult[4] + ((j+4) & (dev_bin_mult[4] - 1)))
|((iters[5].RawGet(j) * dev_bin_mult[5] + ((j+5) & (dev_bin_mult[5] - 1))) << 4));
host4[j].s[3] = (uint8_t)((iters[6].RawGet(j) * dev_bin_mult[6] + ((j+6) & (dev_bin_mult[6] - 1)))
|((iters[7].RawGet(j) * dev_bin_mult[7] + ((j+7) & (dev_bin_mult[7] - 1))) << 4));
}
}
else if (dword_features_ == 4) {
// one feature datapoint is one byte
for (int s_idx = 0; s_idx < 4; ++s_idx) {
BinIterator* bin_iter = train_data_->FeatureGroupIterator(dense_ind[s_idx]);
// this guarantees that the RawGet() function is inlined, rather than using virtual function dispatching
if (dynamic_cast<DenseBinIterator<uint8_t>*>(bin_iter) != 0) {
// Dense bin
DenseBinIterator<uint8_t> iter = *static_cast<DenseBinIterator<uint8_t>*>(bin_iter);
for (int j = 0; j < num_data_; ++j) {
host4[j].s[s_idx] = (uint8_t)(iter.RawGet(j) * dev_bin_mult[s_idx] + ((j+s_idx) & (dev_bin_mult[s_idx] - 1)));
}
}
else if (dynamic_cast<Dense4bitsBinIterator*>(bin_iter) != 0) {
// Dense 4-bit bin
Dense4bitsBinIterator iter = *static_cast<Dense4bitsBinIterator*>(bin_iter);
for (int j = 0; j < num_data_; ++j) {
host4[j].s[s_idx] = (uint8_t)(iter.RawGet(j) * dev_bin_mult[s_idx] + ((j+s_idx) & (dev_bin_mult[s_idx] - 1)));
}
}
else {
Log::Fatal("Bug in GPU tree builder: only DenseBin and Dense4bitsBin are supported");
}
}
}
else {
Log::Fatal("Bug in GPU tree builder: dword_features_ can only be 4 or 8");
}
queue_.enqueue_write_buffer(device_features_->get_buffer(),
i * num_data_ * sizeof(Feature4), num_data_ * sizeof(Feature4), host4);
#if GPU_DEBUG >= 1
printf("first example of feature-group tuple is: %d %d %d %d\n", host4[0].s0, host4[0].s1, host4[0].s2, host4[0].s3);
printf("Feature-groups copied to device with multipliers ");
for (int l = 0; l < dword_features_; ++l) {
printf("%d ", dev_bin_mult[l]);
}
printf("\n");
#endif
}
// working on the remaining (less than dword_features_) feature groups
if (k != 0) {
Feature4* host4 = host4_ptrs[0];
if (dword_features_ == 8) {
memset(host4, 0, num_data_ * sizeof(Feature4));
}
#if GPU_DEBUG >= 1
printf("%d features left\n", k);
#endif
for (int i = 0; i < k; ++i) {
if (dword_features_ == 8) {
BinIterator* bin_iter = train_data_->FeatureGroupIterator(dense_dword_ind[i]);
if (dynamic_cast<Dense4bitsBinIterator*>(bin_iter) != 0) {
Dense4bitsBinIterator iter = *static_cast<Dense4bitsBinIterator*>(bin_iter);
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_; ++j) {
host4[j].s[i >> 1] |= (uint8_t)((iter.RawGet(j) * device_bin_mults_[copied_feature4 * dword_features_ + i]
+ ((j+i) & (device_bin_mults_[copied_feature4 * dword_features_ + i] - 1)))
<< ((i & 1) << 2));
}
}
else {
Log::Fatal("GPU tree learner assumes that all bins are Dense4bitsBin when num_bin <= 16, but feature %d is not", dense_dword_ind[i]);
}
}
else if (dword_features_ == 4) {
BinIterator* bin_iter = train_data_->FeatureGroupIterator(dense_dword_ind[i]);
if (dynamic_cast<DenseBinIterator<uint8_t>*>(bin_iter) != 0) {
DenseBinIterator<uint8_t> iter = *static_cast<DenseBinIterator<uint8_t>*>(bin_iter);
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_; ++j) {
host4[j].s[i] = (uint8_t)(iter.RawGet(j) * device_bin_mults_[copied_feature4 * dword_features_ + i]
+ ((j+i) & (device_bin_mults_[copied_feature4 * dword_features_ + i] - 1)));
}
}
else if (dynamic_cast<Dense4bitsBinIterator*>(bin_iter) != 0) {
Dense4bitsBinIterator iter = *static_cast<Dense4bitsBinIterator*>(bin_iter);
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_; ++j) {
host4[j].s[i] = (uint8_t)(iter.RawGet(j) * device_bin_mults_[copied_feature4 * dword_features_ + i]
+ ((j+i) & (device_bin_mults_[copied_feature4 * dword_features_ + i] - 1)));
}
}
else {
Log::Fatal("BUG in GPU tree builder: only DenseBin and Dense4bitsBin are supported");
}
}
else {
Log::Fatal("Bug in GPU tree builder: dword_features_ can only be 4 or 8");
}
}
// fill the leftover features
if (dword_features_ == 8) {
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_; ++j) {
for (int i = k; i < dword_features_; ++i) {
// fill this empty feature with some "random" value
host4[j].s[i >> 1] |= (uint8_t)((j & 0xf) << ((i & 1) << 2));
}
}
}
else if (dword_features_ == 4) {
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_; ++j) {
for (int i = k; i < dword_features_; ++i) {
// fill this empty feature with some "random" value
host4[j].s[i] = (uint8_t)j;
}
}
}
// copying the last 1 to (dword_features - 1) feature-groups in the last tuple
queue_.enqueue_write_buffer(device_features_->get_buffer(),
(num_dense_feature4_ - 1) * num_data_ * sizeof(Feature4), num_data_ * sizeof(Feature4), host4);
#if GPU_DEBUG >= 1
printf("Last features copied to device\n");
#endif
for (int i = 0; i < k; ++i) {
dense_feature_group_map_.push_back(dense_dword_ind[i]);
}
}
// deallocate pinned space for feature copying
for (int i = 0; i < nthreads; ++i) {
queue_.enqueue_unmap_buffer(host4_bufs[i], host4_ptrs[i]);
host4_bufs[i] = boost::compute::buffer();
boost::alignment::aligned_free(host4_vecs[i]);
}
// data transfer time
std::chrono::duration<double, std::milli> end_time = std::chrono::steady_clock::now() - start_time;
Log::Info("%d dense feature groups (%.2f MB) transfered to GPU in %f secs. %d sparse feature groups",
dense_feature_group_map_.size(), ((dense_feature_group_map_.size() + (dword_features_ - 1)) / dword_features_) * num_data_ * sizeof(Feature4) / (1024.0 * 1024.0),
end_time * 1e-3, sparse_feature_group_map_.size());
#if GPU_DEBUG >= 1
printf("Dense feature group list (size %lu): ", dense_feature_group_map_.size());
for (int i = 0; i < num_dense_feature_groups_; ++i) {
printf("%d ", dense_feature_group_map_[i]);
}
printf("\n");
printf("Sparse feature group list (size %lu): ", sparse_feature_group_map_.size());
for (int i = 0; i < num_feature_groups_ - num_dense_feature_groups_; ++i) {
printf("%d ", sparse_feature_group_map_[i]);
}
printf("\n");
#endif
}
std::string GPUTreeLearner::GetBuildLog(const std::string &opts) {
boost::compute::program program = boost::compute::program::create_with_source(kernel_source_, ctx_);
try {
program.build(opts);
}
catch (boost::compute::opencl_error &e) {
auto error_code = e.error_code();
std::string log("No log available.\n");
// for other types of failure, build log might not be available; program.build_log() can crash
if (error_code == CL_INVALID_PROGRAM || error_code == CL_BUILD_PROGRAM_FAILURE) {
try {
log = program.build_log();
}
catch(...) {
// Something bad happened. Just return "No log available."
}
}
return log;
}
// build is okay, log may contain warnings
return program.build_log();
}
void GPUTreeLearner::BuildGPUKernels() {
Log::Info("Compiling OpenCL Kernel with %d bins...", device_bin_size_);
// destroy any old kernels
histogram_kernels_.clear();
histogram_allfeats_kernels_.clear();
histogram_fulldata_kernels_.clear();
// create OpenCL kernels for different number of workgroups per feature
histogram_kernels_.resize(kMaxLogWorkgroupsPerFeature+1);
histogram_allfeats_kernels_.resize(kMaxLogWorkgroupsPerFeature+1);
histogram_fulldata_kernels_.resize(kMaxLogWorkgroupsPerFeature+1);
// currently we don't use constant memory
int use_constants = 0;
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i <= kMaxLogWorkgroupsPerFeature; ++i) {
OMP_LOOP_EX_BEGIN();
boost::compute::program program;
std::ostringstream opts;
// compile the GPU kernel depending if double precision is used, constant hessian is used, etc
opts << " -D POWER_FEATURE_WORKGROUPS=" << i
<< " -D USE_CONSTANT_BUF=" << use_constants << " -D USE_DP_FLOAT=" << int(config_->gpu_use_dp)
<< " -D CONST_HESSIAN=" << int(is_constant_hessian_)
<< " -cl-mad-enable -cl-no-signed-zeros -cl-fast-relaxed-math";
#if GPU_DEBUG >= 1
std::cout << "Building GPU kernels with options: " << opts.str() << std::endl;
#endif
// kernel with indices in an array
try {
program = boost::compute::program::build_with_source(kernel_source_, ctx_, opts.str());
}
catch (boost::compute::opencl_error &e) {
#pragma omp critical
{
std::cerr << "Build Options:" << opts.str() << std::endl;
std::cerr << "Build Log:" << std::endl << GetBuildLog(opts.str()) << std::endl;
Log::Fatal("Cannot build GPU program: %s", e.what());
}
}
histogram_kernels_[i] = program.create_kernel(kernel_name_);
// kernel with all features enabled, with elimited branches
opts << " -D ENABLE_ALL_FEATURES=1";
try {
program = boost::compute::program::build_with_source(kernel_source_, ctx_, opts.str());
}
catch (boost::compute::opencl_error &e) {
#pragma omp critical
{
std::cerr << "Build Options:" << opts.str() << std::endl;
std::cerr << "Build Log:" << std::endl << GetBuildLog(opts.str()) << std::endl;
Log::Fatal("Cannot build GPU program: %s", e.what());
}
}
histogram_allfeats_kernels_[i] = program.create_kernel(kernel_name_);
// kernel with all data indices (for root node, and assumes that root node always uses all features)
opts << " -D IGNORE_INDICES=1";
try {
program = boost::compute::program::build_with_source(kernel_source_, ctx_, opts.str());
}
catch (boost::compute::opencl_error &e) {
#pragma omp critical
{
std::cerr << "Build Options:" << opts.str() << std::endl;
std::cerr << "Build Log:" << std::endl << GetBuildLog(opts.str()) << std::endl;
Log::Fatal("Cannot build GPU program: %s", e.what());
}
}
histogram_fulldata_kernels_[i] = program.create_kernel(kernel_name_);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
Log::Info("GPU programs have been built");
}
void GPUTreeLearner::SetupKernelArguments() {
// do nothing if no features can be processed on GPU
if (!num_dense_feature_groups_) {
return;
}
for (int i = 0; i <= kMaxLogWorkgroupsPerFeature; ++i) {
// The only argument that needs to be changed later is num_data_
if (is_constant_hessian_) {
// hessian is passed as a parameter, but it is not available now.
// hessian will be set in BeforeTrain()
histogram_kernels_[i].set_args(*device_features_, device_feature_masks_, num_data_,
*device_data_indices_, num_data_, device_gradients_, 0.0f,
*device_subhistograms_, *sync_counters_, device_histogram_outputs_);
histogram_allfeats_kernels_[i].set_args(*device_features_, device_feature_masks_, num_data_,
*device_data_indices_, num_data_, device_gradients_, 0.0f,
*device_subhistograms_, *sync_counters_, device_histogram_outputs_);
histogram_fulldata_kernels_[i].set_args(*device_features_, device_feature_masks_, num_data_,
*device_data_indices_, num_data_, device_gradients_, 0.0f,
*device_subhistograms_, *sync_counters_, device_histogram_outputs_);
}
else {
histogram_kernels_[i].set_args(*device_features_, device_feature_masks_, num_data_,
*device_data_indices_, num_data_, device_gradients_, device_hessians_,
*device_subhistograms_, *sync_counters_, device_histogram_outputs_);
histogram_allfeats_kernels_[i].set_args(*device_features_, device_feature_masks_, num_data_,
*device_data_indices_, num_data_, device_gradients_, device_hessians_,
*device_subhistograms_, *sync_counters_, device_histogram_outputs_);
histogram_fulldata_kernels_[i].set_args(*device_features_, device_feature_masks_, num_data_,
*device_data_indices_, num_data_, device_gradients_, device_hessians_,
*device_subhistograms_, *sync_counters_, device_histogram_outputs_);
}
}
}
void GPUTreeLearner::InitGPU(int platform_id, int device_id) {
// Get the max bin size, used for selecting best GPU kernel
max_num_bin_ = 0;
#if GPU_DEBUG >= 1
printf("bin size: ");
#endif
for (int i = 0; i < num_feature_groups_; ++i) {
#if GPU_DEBUG >= 1
printf("%d, ", train_data_->FeatureGroupNumBin(i));
#endif
max_num_bin_ = std::max(max_num_bin_, train_data_->FeatureGroupNumBin(i));
}
#if GPU_DEBUG >= 1
printf("\n");
#endif
// initialize GPU
dev_ = boost::compute::system::default_device();
if (platform_id >= 0 && device_id >= 0) {
const std::vector<boost::compute::platform> platforms = boost::compute::system::platforms();
if ((int)platforms.size() > platform_id) {
const std::vector<boost::compute::device> platform_devices = platforms[platform_id].devices();
if ((int)platform_devices.size() > device_id) {
Log::Info("Using requested OpenCL platform %d device %d", platform_id, device_id);
dev_ = platform_devices[device_id];
}
}
}
// determine which kernel to use based on the max number of bins
if (max_num_bin_ <= 16) {
kernel_source_ = kernel16_src_;
kernel_name_ = "histogram16";
device_bin_size_ = 16;
dword_features_ = 8;
}
else if (max_num_bin_ <= 64) {
kernel_source_ = kernel64_src_;
kernel_name_ = "histogram64";
device_bin_size_ = 64;
dword_features_ = 4;
}
else if ( max_num_bin_ <= 256) {
kernel_source_ = kernel256_src_;
kernel_name_ = "histogram256";
device_bin_size_ = 256;
dword_features_ = 4;
}
else {
Log::Fatal("bin size %d cannot run on GPU", max_num_bin_);
}
if(max_num_bin_ == 65) {
Log::Warning("Setting max_bin to 63 is sugguested for best performance");
}
if(max_num_bin_ == 17) {
Log::Warning("Setting max_bin to 15 is sugguested for best performance");
}
ctx_ = boost::compute::context(dev_);
queue_ = boost::compute::command_queue(ctx_, dev_);
Log::Info("Using GPU Device: %s, Vendor: %s", dev_.name().c_str(), dev_.vendor().c_str());
BuildGPUKernels();
AllocateGPUMemory();
// setup GPU kernel arguments after we allocating all the buffers
SetupKernelArguments();
}
Tree* GPUTreeLearner::Train(const score_t* gradients, const score_t *hessians,
bool is_constant_hessian, Json& forced_split_json) {
// check if we need to recompile the GPU kernel (is_constant_hessian changed)
// this should rarely occur
if (is_constant_hessian != is_constant_hessian_) {
Log::Info("Recompiling GPU kernel because hessian is %sa constant now", is_constant_hessian ? "" : "not ");
is_constant_hessian_ = is_constant_hessian;
BuildGPUKernels();
SetupKernelArguments();
}
return SerialTreeLearner::Train(gradients, hessians, is_constant_hessian, forced_split_json);
}
void GPUTreeLearner::ResetTrainingData(const Dataset* train_data) {
SerialTreeLearner::ResetTrainingData(train_data);
num_feature_groups_ = train_data_->num_feature_groups();
// GPU memory has to been reallocated because data may have been changed
AllocateGPUMemory();
// setup GPU kernel arguments after we allocating all the buffers
SetupKernelArguments();
}
void GPUTreeLearner::BeforeTrain() {
#if GPU_DEBUG >= 2
printf("Copying intial full gradients and hessians to device\n");
#endif
// Copy initial full hessians and gradients to GPU.
// We start copying as early as possible, instead of at ConstructHistogram().
if (!use_bagging_ && num_dense_feature_groups_) {
if (!is_constant_hessian_) {
hessians_future_ = queue_.enqueue_write_buffer_async(device_hessians_, 0, num_data_ * sizeof(score_t), hessians_);
}
else {
// setup hessian parameters only
score_t const_hessian = hessians_[0];
for (int i = 0; i <= kMaxLogWorkgroupsPerFeature; ++i) {
// hessian is passed as a parameter
histogram_kernels_[i].set_arg(6, const_hessian);
histogram_allfeats_kernels_[i].set_arg(6, const_hessian);
histogram_fulldata_kernels_[i].set_arg(6, const_hessian);
}
}
gradients_future_ = queue_.enqueue_write_buffer_async(device_gradients_, 0, num_data_ * sizeof(score_t), gradients_);
}
SerialTreeLearner::BeforeTrain();
// use bagging
if (data_partition_->leaf_count(0) != num_data_ && num_dense_feature_groups_) {
// On GPU, we start copying indices, gradients and hessians now, instead at ConstructHistogram()
// copy used gradients and hessians to ordered buffer
const data_size_t* indices = data_partition_->indices();
data_size_t cnt = data_partition_->leaf_count(0);
#if GPU_DEBUG > 0
printf("Using bagging, examples count = %d\n", cnt);
#endif
// transfer the indices to GPU
indices_future_ = boost::compute::copy_async(indices, indices + cnt, device_data_indices_->begin(), queue_);
if (!is_constant_hessian_) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < cnt; ++i) {
ordered_hessians_[i] = hessians_[indices[i]];
}
// transfer hessian to GPU
hessians_future_ = queue_.enqueue_write_buffer_async(device_hessians_, 0, cnt * sizeof(score_t), ordered_hessians_.data());
}
else {
// setup hessian parameters only
score_t const_hessian = hessians_[indices[0]];
for (int i = 0; i <= kMaxLogWorkgroupsPerFeature; ++i) {
// hessian is passed as a parameter
histogram_kernels_[i].set_arg(6, const_hessian);
histogram_allfeats_kernels_[i].set_arg(6, const_hessian);
histogram_fulldata_kernels_[i].set_arg(6, const_hessian);
}
}
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < cnt; ++i) {
ordered_gradients_[i] = gradients_[indices[i]];
}
// transfer gradients to GPU
gradients_future_ = queue_.enqueue_write_buffer_async(device_gradients_, 0, cnt * sizeof(score_t), ordered_gradients_.data());
}
}
bool GPUTreeLearner::BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf) {
int smaller_leaf;
data_size_t num_data_in_left_child = GetGlobalDataCountInLeaf(left_leaf);
data_size_t num_data_in_right_child = GetGlobalDataCountInLeaf(right_leaf);
// only have root
if (right_leaf < 0) {
smaller_leaf = -1;
} else if (num_data_in_left_child < num_data_in_right_child) {
smaller_leaf = left_leaf;
} else {
smaller_leaf = right_leaf;
}
// Copy indices, gradients and hessians as early as possible
if (smaller_leaf >= 0 && num_dense_feature_groups_) {
// only need to initialize for smaller leaf
// Get leaf boundary
const data_size_t* indices = data_partition_->indices();
data_size_t begin = data_partition_->leaf_begin(smaller_leaf);
data_size_t end = begin + data_partition_->leaf_count(smaller_leaf);
// copy indices to the GPU:
#if GPU_DEBUG >= 2
Log::Info("Copying indices, gradients and hessians to GPU...");
printf("Indices size %d being copied (left = %d, right = %d)\n", end - begin,num_data_in_left_child,num_data_in_right_child);
#endif
indices_future_ = boost::compute::copy_async(indices + begin, indices + end, device_data_indices_->begin(), queue_);
if (!is_constant_hessian_) {
#pragma omp parallel for schedule(static)
for (data_size_t i = begin; i < end; ++i) {
ordered_hessians_[i - begin] = hessians_[indices[i]];
}
// copy ordered hessians to the GPU:
hessians_future_ = queue_.enqueue_write_buffer_async(device_hessians_, 0, (end - begin) * sizeof(score_t), ptr_pinned_hessians_);
}
#pragma omp parallel for schedule(static)
for (data_size_t i = begin; i < end; ++i) {
ordered_gradients_[i - begin] = gradients_[indices[i]];
}
// copy ordered gradients to the GPU:
gradients_future_ = queue_.enqueue_write_buffer_async(device_gradients_, 0, (end - begin) * sizeof(score_t), ptr_pinned_gradients_);
#if GPU_DEBUG >= 2
Log::Info("Gradients/hessians/indices copied to device with size %d", end - begin);
#endif
}
return SerialTreeLearner::BeforeFindBestSplit(tree, left_leaf, right_leaf);
}
bool GPUTreeLearner::ConstructGPUHistogramsAsync(
const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians) {
if (num_data <= 0) {
return false;
}
// do nothing if no features can be processed on GPU
if (!num_dense_feature_groups_) {
return false;
}
// copy data indices if it is not null
if (data_indices != nullptr && num_data != num_data_) {
indices_future_ = boost::compute::copy_async(data_indices, data_indices + num_data, device_data_indices_->begin(), queue_);
}
// generate and copy ordered_gradients if gradients is not null
if (gradients != nullptr) {
if (num_data != num_data_) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
ordered_gradients[i] = gradients[data_indices[i]];
}
gradients_future_ = queue_.enqueue_write_buffer_async(device_gradients_, 0, num_data * sizeof(score_t), ptr_pinned_gradients_);
}
else {
gradients_future_ = queue_.enqueue_write_buffer_async(device_gradients_, 0, num_data * sizeof(score_t), gradients);
}
}
// generate and copy ordered_hessians if hessians is not null
if (hessians != nullptr && !is_constant_hessian_) {
if (num_data != num_data_) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
ordered_hessians[i] = hessians[data_indices[i]];
}
hessians_future_ = queue_.enqueue_write_buffer_async(device_hessians_, 0, num_data * sizeof(score_t), ptr_pinned_hessians_);
}
else {
hessians_future_ = queue_.enqueue_write_buffer_async(device_hessians_, 0, num_data * sizeof(score_t), hessians);
}
}
// converted indices in is_feature_used to feature-group indices
std::vector<int8_t> is_feature_group_used(num_feature_groups_, 0);
#pragma omp parallel for schedule(static,1024) if (num_features_ >= 2048)
for (int i = 0; i < num_features_; ++i) {
if(is_feature_used[i]) {
is_feature_group_used[train_data_->Feature2Group(i)] = 1;
}
}
// construct the feature masks for dense feature-groups
int used_dense_feature_groups = 0;
#pragma omp parallel for schedule(static,1024) reduction(+:used_dense_feature_groups) if (num_dense_feature_groups_ >= 2048)
for (int i = 0; i < num_dense_feature_groups_; ++i) {
if (is_feature_group_used[dense_feature_group_map_[i]]) {
feature_masks_[i] = 1;
++used_dense_feature_groups;
}
else {
feature_masks_[i] = 0;
}
}
bool use_all_features = used_dense_feature_groups == num_dense_feature_groups_;
// if no feature group is used, just return and do not use GPU
if (used_dense_feature_groups == 0) {
return false;
}
#if GPU_DEBUG >= 1
printf("Feature masks:\n");
for (unsigned int i = 0; i < feature_masks_.size(); ++i) {
printf("%d ", feature_masks_[i]);
}
printf("\n");
printf("%d feature groups, %d used, %d\n", num_dense_feature_groups_, used_dense_feature_groups, use_all_features);
#endif
// if not all feature groups are used, we need to transfer the feature mask to GPU
// otherwise, we will use a specialized GPU kernel with all feature groups enabled
if (!use_all_features) {
queue_.enqueue_write_buffer(device_feature_masks_, 0, num_dense_feature4_ * dword_features_, ptr_pinned_feature_masks_);
}
// All data have been prepared, now run the GPU kernel
GPUHistogram(num_data, use_all_features);
return true;
}
void GPUTreeLearner::ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract) {
std::vector<int8_t> is_sparse_feature_used(num_features_, 0);
std::vector<int8_t> is_dense_feature_used(num_features_, 0);
#pragma omp parallel for schedule(static)
for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
if (!is_feature_used_[feature_index]) continue;
if (!is_feature_used[feature_index]) continue;
if (ordered_bins_[train_data_->Feature2Group(feature_index)]) {
is_sparse_feature_used[feature_index] = 1;
}
else {
is_dense_feature_used[feature_index] = 1;
}
}
// construct smaller leaf
HistogramBinEntry* ptr_smaller_leaf_hist_data = smaller_leaf_histogram_array_[0].RawData() - 1;
// ConstructGPUHistogramsAsync will return true if there are availabe feature gourps dispatched to GPU
bool is_gpu_used = ConstructGPUHistogramsAsync(is_feature_used,
nullptr, smaller_leaf_splits_->num_data_in_leaf(),
nullptr, nullptr,
nullptr, nullptr);
// then construct sparse features on CPU
// We set data_indices to null to avoid rebuilding ordered gradients/hessians
train_data_->ConstructHistograms(is_sparse_feature_used,
nullptr, smaller_leaf_splits_->num_data_in_leaf(),
smaller_leaf_splits_->LeafIndex(),
ordered_bins_, gradients_, hessians_,
ordered_gradients_.data(), ordered_hessians_.data(), is_constant_hessian_,
ptr_smaller_leaf_hist_data);
// wait for GPU to finish, only if GPU is actually used
if (is_gpu_used) {
if (config_->gpu_use_dp) {
// use double precision
WaitAndGetHistograms<HistogramBinEntry>(ptr_smaller_leaf_hist_data);
}
else {
// use single precision
WaitAndGetHistograms<GPUHistogramBinEntry>(ptr_smaller_leaf_hist_data);
}
}
// Compare GPU histogram with CPU histogram, useful for debuggin GPU code problem
// #define GPU_DEBUG_COMPARE
#ifdef GPU_DEBUG_COMPARE
for (int i = 0; i < num_dense_feature_groups_; ++i) {
if (!feature_masks_[i])
continue;
int dense_feature_group_index = dense_feature_group_map_[i];
size_t size = train_data_->FeatureGroupNumBin(dense_feature_group_index);
HistogramBinEntry* ptr_smaller_leaf_hist_data = smaller_leaf_histogram_array_[0].RawData() - 1;
HistogramBinEntry* current_histogram = ptr_smaller_leaf_hist_data + train_data_->GroupBinBoundary(dense_feature_group_index);
HistogramBinEntry* gpu_histogram = new HistogramBinEntry[size];
data_size_t num_data = smaller_leaf_splits_->num_data_in_leaf();
printf("Comparing histogram for feature %d size %d, %lu bins\n", dense_feature_group_index, num_data, size);
std::copy(current_histogram, current_histogram + size, gpu_histogram);
std::memset(current_histogram, 0, train_data_->FeatureGroupNumBin(dense_feature_group_index) * sizeof(HistogramBinEntry));
train_data_->FeatureGroupBin(dense_feature_group_index)->ConstructHistogram(
num_data != num_data_ ? smaller_leaf_splits_->data_indices() : nullptr,
num_data,
num_data != num_data_ ? ordered_gradients_.data() : gradients_,
num_data != num_data_ ? ordered_hessians_.data() : hessians_,
current_histogram);
CompareHistograms(gpu_histogram, current_histogram, size, dense_feature_group_index);
std::copy(gpu_histogram, gpu_histogram + size, current_histogram);
delete [] gpu_histogram;
}
#endif
if (larger_leaf_histogram_array_ != nullptr && !use_subtract) {
// construct larger leaf
HistogramBinEntry* ptr_larger_leaf_hist_data = larger_leaf_histogram_array_[0].RawData() - 1;
is_gpu_used = ConstructGPUHistogramsAsync(is_feature_used,
larger_leaf_splits_->data_indices(), larger_leaf_splits_->num_data_in_leaf(),
gradients_, hessians_,
ordered_gradients_.data(), ordered_hessians_.data());
// then construct sparse features on CPU
// We set data_indices to null to avoid rebuilding ordered gradients/hessians
train_data_->ConstructHistograms(is_sparse_feature_used,
nullptr, larger_leaf_splits_->num_data_in_leaf(),
larger_leaf_splits_->LeafIndex(),
ordered_bins_, gradients_, hessians_,
ordered_gradients_.data(), ordered_hessians_.data(), is_constant_hessian_,
ptr_larger_leaf_hist_data);
// wait for GPU to finish, only if GPU is actually used
if (is_gpu_used) {
if (config_->gpu_use_dp) {
// use double precision
WaitAndGetHistograms<HistogramBinEntry>(ptr_larger_leaf_hist_data);
}
else {
// use single precision
WaitAndGetHistograms<GPUHistogramBinEntry>(ptr_larger_leaf_hist_data);
}
}
}
}
void GPUTreeLearner::FindBestSplits() {
SerialTreeLearner::FindBestSplits();
#if GPU_DEBUG >= 3
for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
if (!is_feature_used_[feature_index]) continue;
if (parent_leaf_histogram_array_ != nullptr
&& !parent_leaf_histogram_array_[feature_index].is_splittable()) {
smaller_leaf_histogram_array_[feature_index].set_is_splittable(false);
continue;
}
size_t bin_size = train_data_->FeatureNumBin(feature_index) + 1;
printf("Feature %d smaller leaf:\n", feature_index);
PrintHistograms(smaller_leaf_histogram_array_[feature_index].RawData() - 1, bin_size);
if (larger_leaf_splits_ == nullptr || larger_leaf_splits_->LeafIndex() < 0) { continue; }
printf("Feature %d larger leaf:\n", feature_index);
PrintHistograms(larger_leaf_histogram_array_[feature_index].RawData() - 1, bin_size);
}
#endif
}
void GPUTreeLearner::Split(Tree* tree, int best_Leaf, int* left_leaf, int* right_leaf) {
const SplitInfo& best_split_info = best_split_per_leaf_[best_Leaf];
#if GPU_DEBUG >= 2
printf("Splitting leaf %d with feature %d thresh %d gain %f stat %f %f %f %f\n", best_Leaf, best_split_info.feature, best_split_info.threshold, best_split_info.gain, best_split_info.left_sum_gradient, best_split_info.right_sum_gradient, best_split_info.left_sum_hessian, best_split_info.right_sum_hessian);
#endif
SerialTreeLearner::Split(tree, best_Leaf, left_leaf, right_leaf);
if (Network::num_machines() == 1) {
// do some sanity check for the GPU algorithm
if (best_split_info.left_count < best_split_info.right_count) {
if ((best_split_info.left_count != smaller_leaf_splits_->num_data_in_leaf()) ||
(best_split_info.right_count!= larger_leaf_splits_->num_data_in_leaf())) {
Log::Fatal("Bug in GPU histogram! split %d: %d, smaller_leaf: %d, larger_leaf: %d\n", best_split_info.left_count, best_split_info.right_count, smaller_leaf_splits_->num_data_in_leaf(), larger_leaf_splits_->num_data_in_leaf());
}
} else {
double smaller_min = smaller_leaf_splits_->min_constraint();
double smaller_max = smaller_leaf_splits_->max_constraint();
double larger_min = larger_leaf_splits_->min_constraint();
double larger_max = larger_leaf_splits_->max_constraint();
smaller_leaf_splits_->Init(*right_leaf, data_partition_.get(), best_split_info.right_sum_gradient, best_split_info.right_sum_hessian);
larger_leaf_splits_->Init(*left_leaf, data_partition_.get(), best_split_info.left_sum_gradient, best_split_info.left_sum_hessian);
smaller_leaf_splits_->SetValueConstraint(smaller_min, smaller_max);
larger_leaf_splits_->SetValueConstraint(larger_min, larger_max);
if ((best_split_info.left_count != larger_leaf_splits_->num_data_in_leaf()) ||
(best_split_info.right_count!= smaller_leaf_splits_->num_data_in_leaf())) {
Log::Fatal("Bug in GPU histogram! split %d: %d, smaller_leaf: %d, larger_leaf: %d\n", best_split_info.left_count, best_split_info.right_count, smaller_leaf_splits_->num_data_in_leaf(), larger_leaf_splits_->num_data_in_leaf());
}
}
}
}
} // namespace LightGBM
#endif // USE_GPU
| 1 | 19,627 | @Laurae2 good call. This is the only one I found (with `git grep transfered`) | microsoft-LightGBM | cpp |
@@ -1805,7 +1805,7 @@ class VariablesChecker(BaseChecker):
"""
if (
node.frame().parent == defstmt
- and node.statement(future=True) not in node.frame().body
+ and node.statement(future=True) == node.frame()
):
# Check if used as type annotation
# Break but don't emit message if postponed evaluation is enabled | 1 | # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2009 Mads Kiilerich <[email protected]>
# Copyright (c) 2010 Daniel Harding <[email protected]>
# Copyright (c) 2011-2014, 2017 Google, Inc.
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2013-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Michal Nowikowski <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Ricardo Gemignani <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2015 Radu Ciorba <[email protected]>
# Copyright (c) 2015 Simu Toni <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016, 2018-2019 Ashley Whetter <[email protected]>
# Copyright (c) 2016, 2018 Jakub Wilk <[email protected]>
# Copyright (c) 2016-2017 Derek Gustafson <[email protected]>
# Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016 Grant Welch <[email protected]>
# Copyright (c) 2017-2018, 2021 Ville Skyttä <[email protected]>
# Copyright (c) 2017-2018, 2020 hippo91 <[email protected]>
# Copyright (c) 2017 Dan Garrette <[email protected]>
# Copyright (c) 2018-2019 Jim Robertson <[email protected]>
# Copyright (c) 2018 Mike Miller <[email protected]>
# Copyright (c) 2018 Lucas Cimon <[email protected]>
# Copyright (c) 2018 Drew <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Mike Frysinger <[email protected]>
# Copyright (c) 2018 Marianna Polatoglou <[email protected]>
# Copyright (c) 2018 mar-chi-pan <[email protected]>
# Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019, 2021 Nick Drozd <[email protected]>
# Copyright (c) 2019 Djailla <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2020 Andrew Simmons <[email protected]>
# Copyright (c) 2020 Andrew Simmons <[email protected]>
# Copyright (c) 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2020 Ashley Whetter <[email protected]>
# Copyright (c) 2021 Daniël van Noord <[email protected]>
# Copyright (c) 2021 Tushar Sadhwani <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Copyright (c) 2021 bot <[email protected]>
# Copyright (c) 2021 David Liu <[email protected]>
# Copyright (c) 2021 kasium <[email protected]>
# Copyright (c) 2021 Marcin Kurczewski <[email protected]>
# Copyright (c) 2021 Sergei Lebedev <[email protected]>
# Copyright (c) 2021 Lorena B <[email protected]>
# Copyright (c) 2021 haasea <[email protected]>
# Copyright (c) 2021 Alexander Kapshuna <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""variables checkers for Python code
"""
import collections
import copy
import itertools
import os
import re
import sys
from enum import Enum
from functools import lru_cache
from typing import Any, DefaultDict, List, Optional, Set, Tuple, Union
import astroid
from astroid import nodes
from pylint.checkers import BaseChecker, utils
from pylint.checkers.utils import is_postponed_evaluation_enabled
from pylint.constants import PY39_PLUS
from pylint.interfaces import HIGH, INFERENCE, INFERENCE_FAILURE, IAstroidChecker
from pylint.utils import get_global_option
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
SPECIAL_OBJ = re.compile("^_{2}[a-z]+_{2}$")
FUTURE = "__future__"
# regexp for ignored argument name
IGNORED_ARGUMENT_NAMES = re.compile("_.*|^ignored_|^unused_")
# In Python 3.7 abc has a Python implementation which is preferred
# by astroid. Unfortunately this also messes up our explicit checks
# for `abc`
METACLASS_NAME_TRANSFORMS = {"_py_abc": "abc"}
TYPING_TYPE_CHECKS_GUARDS = frozenset({"typing.TYPE_CHECKING", "TYPE_CHECKING"})
BUILTIN_RANGE = "builtins.range"
TYPING_MODULE = "typing"
TYPING_NAMES = frozenset(
{
"Any",
"Callable",
"ClassVar",
"Generic",
"Optional",
"Tuple",
"Type",
"TypeVar",
"Union",
"AbstractSet",
"ByteString",
"Container",
"ContextManager",
"Hashable",
"ItemsView",
"Iterable",
"Iterator",
"KeysView",
"Mapping",
"MappingView",
"MutableMapping",
"MutableSequence",
"MutableSet",
"Sequence",
"Sized",
"ValuesView",
"Awaitable",
"AsyncIterator",
"AsyncIterable",
"Coroutine",
"Collection",
"AsyncGenerator",
"AsyncContextManager",
"Reversible",
"SupportsAbs",
"SupportsBytes",
"SupportsComplex",
"SupportsFloat",
"SupportsInt",
"SupportsRound",
"Counter",
"Deque",
"Dict",
"DefaultDict",
"List",
"Set",
"FrozenSet",
"NamedTuple",
"Generator",
"AnyStr",
"Text",
"Pattern",
"BinaryIO",
}
)
class VariableVisitConsumerAction(Enum):
"""Used after _visit_consumer to determine the action to be taken
Continue -> continue loop to next consumer
Return -> return and thereby break the loop
Consume -> consume the found nodes (second return value) and return
"""
CONTINUE = 0
RETURN = 1
CONSUME = 2
def _is_from_future_import(stmt, name):
"""Check if the name is a future import from another module."""
try:
module = stmt.do_import_module(stmt.modname)
except astroid.AstroidBuildingException:
return None
for local_node in module.locals.get(name, []):
if isinstance(local_node, nodes.ImportFrom) and local_node.modname == FUTURE:
return True
return None
def in_for_else_branch(parent, stmt):
"""Returns True if stmt in inside the else branch for a parent For stmt."""
return isinstance(parent, nodes.For) and any(
else_stmt.parent_of(stmt) or else_stmt == stmt for else_stmt in parent.orelse
)
@lru_cache(maxsize=1000)
def overridden_method(klass, name):
"""get overridden method if any"""
try:
parent = next(klass.local_attr_ancestors(name))
except (StopIteration, KeyError):
return None
try:
meth_node = parent[name]
except KeyError:
# We have found an ancestor defining <name> but it's not in the local
# dictionary. This may happen with astroid built from living objects.
return None
if isinstance(meth_node, nodes.FunctionDef):
return meth_node
return None
def _get_unpacking_extra_info(node, inferred):
"""return extra information to add to the message for unpacking-non-sequence
and unbalanced-tuple-unpacking errors
"""
more = ""
inferred_module = inferred.root().name
if node.root().name == inferred_module:
if node.lineno == inferred.lineno:
more = f" {inferred.as_string()}"
elif inferred.lineno:
more = f" defined at line {inferred.lineno}"
elif inferred.lineno:
more = f" defined at line {inferred.lineno} of {inferred_module}"
return more
def _detect_global_scope(node, frame, defframe):
"""Detect that the given frames shares a global
scope.
Two frames shares a global scope when neither
of them are hidden under a function scope, as well
as any of parent scope of them, until the root scope.
In this case, depending from something defined later on
will not work, because it is still undefined.
Example:
class A:
# B has the same global scope as `C`, leading to a NameError.
class B(C): ...
class C: ...
"""
def_scope = scope = None
if frame and frame.parent:
scope = frame.parent.scope()
if defframe and defframe.parent:
def_scope = defframe.parent.scope()
if isinstance(frame, nodes.FunctionDef):
# If the parent of the current node is a
# function, then it can be under its scope
# (defined in, which doesn't concern us) or
# the `->` part of annotations. The same goes
# for annotations of function arguments, they'll have
# their parent the Arguments node.
if not isinstance(node.parent, (nodes.FunctionDef, nodes.Arguments)):
return False
elif any(
not isinstance(f, (nodes.ClassDef, nodes.Module)) for f in (frame, defframe)
):
# Not interested in other frames, since they are already
# not in a global scope.
return False
break_scopes = []
for current_scope in (scope, def_scope):
# Look for parent scopes. If there is anything different
# than a module or a class scope, then they frames don't
# share a global scope.
parent_scope = current_scope
while parent_scope:
if not isinstance(parent_scope, (nodes.ClassDef, nodes.Module)):
break_scopes.append(parent_scope)
break
if parent_scope.parent:
parent_scope = parent_scope.parent.scope()
else:
break
if break_scopes and len(set(break_scopes)) != 1:
# Store different scopes than expected.
# If the stored scopes are, in fact, the very same, then it means
# that the two frames (frame and defframe) shares the same scope,
# and we could apply our lineno analysis over them.
# For instance, this works when they are inside a function, the node
# that uses a definition and the definition itself.
return False
# At this point, we are certain that frame and defframe shares a scope
# and the definition of the first depends on the second.
return frame.lineno < defframe.lineno
def _infer_name_module(node, name):
context = astroid.context.InferenceContext()
context.lookupname = name
return node.infer(context, asname=False)
def _fix_dot_imports(not_consumed):
"""Try to fix imports with multiple dots, by returning a dictionary
with the import names expanded. The function unflattens root imports,
like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree'
and 'xml.sax' respectively.
"""
names = {}
for name, stmts in not_consumed.items():
if any(
isinstance(stmt, nodes.AssignName)
and isinstance(stmt.assign_type(), nodes.AugAssign)
for stmt in stmts
):
continue
for stmt in stmts:
if not isinstance(stmt, (nodes.ImportFrom, nodes.Import)):
continue
for imports in stmt.names:
second_name = None
import_module_name = imports[0]
if import_module_name == "*":
# In case of wildcard imports,
# pick the name from inside the imported module.
second_name = name
else:
name_matches_dotted_import = False
if (
import_module_name.startswith(name)
and import_module_name.find(".") > -1
):
name_matches_dotted_import = True
if name_matches_dotted_import or name in imports:
# Most likely something like 'xml.etree',
# which will appear in the .locals as 'xml'.
# Only pick the name if it wasn't consumed.
second_name = import_module_name
if second_name and second_name not in names:
names[second_name] = stmt
return sorted(names.items(), key=lambda a: a[1].fromlineno)
def _find_frame_imports(name, frame):
"""
Detect imports in the frame, with the required
*name*. Such imports can be considered assignments.
Returns True if an import for the given name was found.
"""
imports = frame.nodes_of_class((nodes.Import, nodes.ImportFrom))
for import_node in imports:
for import_name, import_alias in import_node.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias == name:
return True
elif import_name and import_name == name:
return True
return None
def _import_name_is_global(stmt, global_names):
for import_name, import_alias in stmt.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias in global_names:
return True
elif import_name in global_names:
return True
return False
def _flattened_scope_names(iterator):
values = (set(stmt.names) for stmt in iterator)
return set(itertools.chain.from_iterable(values))
def _assigned_locally(name_node):
"""
Checks if name_node has corresponding assign statement in same scope
"""
assign_stmts = name_node.scope().nodes_of_class(nodes.AssignName)
return any(a.name == name_node.name for a in assign_stmts)
def _is_type_checking_import(node: Union[nodes.Import, nodes.ImportFrom]) -> bool:
"""Check if an import node is guarded by a TYPE_CHECKS guard"""
return any(
isinstance(ancestor, nodes.If)
and ancestor.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
for ancestor in node.node_ancestors()
)
def _has_locals_call_after_node(stmt, scope):
skip_nodes = (
nodes.FunctionDef,
nodes.ClassDef,
nodes.Import,
nodes.ImportFrom,
)
for call in scope.nodes_of_class(nodes.Call, skip_klass=skip_nodes):
inferred = utils.safe_infer(call.func)
if (
utils.is_builtin_object(inferred)
and getattr(inferred, "name", None) == "locals"
):
if stmt.lineno < call.lineno:
return True
return False
MSGS = {
"E0601": (
"Using variable %r before assignment",
"used-before-assignment",
"Emitted when a local variable is accessed before its assignment took place. "
"Assignments in try blocks are assumed not to have occurred when evaluating "
"associated except/finally blocks. Assignments in except blocks are assumed "
"not to have occurred when evaluating statements outside the block, except "
"when the associated try block contains a return statement.",
),
"E0602": (
"Undefined variable %r",
"undefined-variable",
"Used when an undefined variable is accessed.",
),
"E0603": (
"Undefined variable name %r in __all__",
"undefined-all-variable",
"Used when an undefined variable name is referenced in __all__.",
),
"E0604": (
"Invalid object %r in __all__, must contain only strings",
"invalid-all-object",
"Used when an invalid (non-string) object occurs in __all__.",
),
"E0605": (
"Invalid format for __all__, must be tuple or list",
"invalid-all-format",
"Used when __all__ has an invalid format.",
),
"E0611": (
"No name %r in module %r",
"no-name-in-module",
"Used when a name cannot be found in a module.",
),
"W0601": (
"Global variable %r undefined at the module level",
"global-variable-undefined",
'Used when a variable is defined through the "global" statement '
"but the variable is not defined in the module scope.",
),
"W0602": (
"Using global for %r but no assignment is done",
"global-variable-not-assigned",
'Used when a variable is defined through the "global" statement '
"but no assignment to this variable is done.",
),
"W0603": (
"Using the global statement", # W0121
"global-statement",
'Used when you use the "global" statement to update a global '
"variable. Pylint just try to discourage this "
"usage. That doesn't mean you cannot use it !",
),
"W0604": (
"Using the global statement at the module level", # W0103
"global-at-module-level",
'Used when you use the "global" statement at the module level '
"since it has no effect",
),
"W0611": (
"Unused %s",
"unused-import",
"Used when an imported module or variable is not used.",
),
"W0612": (
"Unused variable %r",
"unused-variable",
"Used when a variable is defined but not used.",
),
"W0613": (
"Unused argument %r",
"unused-argument",
"Used when a function or method argument is not used.",
),
"W0614": (
"Unused import(s) %s from wildcard import of %s",
"unused-wildcard-import",
"Used when an imported module or variable is not used from a "
"`'from X import *'` style import.",
),
"W0621": (
"Redefining name %r from outer scope (line %s)",
"redefined-outer-name",
"Used when a variable's name hides a name defined in the outer scope.",
),
"W0622": (
"Redefining built-in %r",
"redefined-builtin",
"Used when a variable or function override a built-in.",
),
"W0631": (
"Using possibly undefined loop variable %r",
"undefined-loop-variable",
"Used when a loop variable (i.e. defined by a for loop or "
"a list comprehension or a generator expression) is used outside "
"the loop.",
),
"W0632": (
"Possible unbalanced tuple unpacking with "
"sequence%s: "
"left side has %d label(s), right side has %d value(s)",
"unbalanced-tuple-unpacking",
"Used when there is an unbalanced tuple unpacking in assignment",
{"old_names": [("E0632", "old-unbalanced-tuple-unpacking")]},
),
"E0633": (
"Attempting to unpack a non-sequence%s",
"unpacking-non-sequence",
"Used when something which is not "
"a sequence is used in an unpack assignment",
{"old_names": [("W0633", "old-unpacking-non-sequence")]},
),
"W0640": (
"Cell variable %s defined in loop",
"cell-var-from-loop",
"A variable used in a closure is defined in a loop. "
"This will result in all closures using the same value for "
"the closed-over variable.",
),
"W0641": (
"Possibly unused variable %r",
"possibly-unused-variable",
"Used when a variable is defined but might not be used. "
"The possibility comes from the fact that locals() might be used, "
"which could consume or not the said variable",
),
"W0642": (
"Invalid assignment to %s in method",
"self-cls-assignment",
"Invalid assignment to self or cls in instance or class method "
"respectively.",
),
}
ScopeConsumer = collections.namedtuple(
"ScopeConsumer", "to_consume consumed consumed_uncertain scope_type"
)
class NamesConsumer:
"""
A simple class to handle consumed, to consume and scope type info of node locals
"""
def __init__(self, node, scope_type):
self._atomic = ScopeConsumer(
copy.copy(node.locals), {}, collections.defaultdict(list), scope_type
)
self.node = node
def __repr__(self):
to_consumes = [f"{k}->{v}" for k, v in self._atomic.to_consume.items()]
consumed = [f"{k}->{v}" for k, v in self._atomic.consumed.items()]
consumed_uncertain = [
f"{k}->{v}" for k, v in self._atomic.consumed_uncertain.items()
]
to_consumes = ", ".join(to_consumes)
consumed = ", ".join(consumed)
consumed_uncertain = ", ".join(consumed_uncertain)
return f"""
to_consume : {to_consumes}
consumed : {consumed}
consumed_uncertain: {consumed_uncertain}
scope_type : {self._atomic.scope_type}
"""
def __iter__(self):
return iter(self._atomic)
@property
def to_consume(self):
return self._atomic.to_consume
@property
def consumed(self):
return self._atomic.consumed
@property
def consumed_uncertain(self) -> DefaultDict[str, List[nodes.NodeNG]]:
"""
Retrieves nodes filtered out by get_next_to_consume() that may not
have executed, such as statements in except blocks, or statements
in try blocks (when evaluating their corresponding except and finally
blocks). Checkers that want to treat the statements as executed
(e.g. for unused-variable) may need to add them back.
"""
return self._atomic.consumed_uncertain
@property
def scope_type(self):
return self._atomic.scope_type
def mark_as_consumed(self, name, consumed_nodes):
"""
Mark the given nodes as consumed for the name.
If all of the nodes for the name were consumed, delete the name from
the to_consume dictionary
"""
unconsumed = [n for n in self.to_consume[name] if n not in set(consumed_nodes)]
self.consumed[name] = consumed_nodes
if unconsumed:
self.to_consume[name] = unconsumed
else:
del self.to_consume[name]
def get_next_to_consume(self, node):
"""
Return a list of the nodes that define `node` from this scope. If it is
uncertain whether a node will be consumed, such as for statements in
except blocks, add it to self.consumed_uncertain instead of returning it.
Return None to indicate a special case that needs to be handled by the caller.
"""
name = node.name
parent_node = node.parent
found_nodes = self.to_consume.get(name)
node_statement = node.statement(future=True)
if (
found_nodes
and isinstance(parent_node, nodes.Assign)
and parent_node == found_nodes[0].parent
):
lhs = found_nodes[0].parent.targets[0]
if lhs.name == name: # this name is defined in this very statement
found_nodes = None
if (
found_nodes
and isinstance(parent_node, nodes.For)
and parent_node.iter == node
and parent_node.target in found_nodes
):
found_nodes = None
# Filter out assignments in ExceptHandlers that node is not contained in
if found_nodes:
found_nodes = [
n
for n in found_nodes
if not isinstance(n.statement(future=True), nodes.ExceptHandler)
or n.statement(future=True).parent_of(node)
]
# Filter out assignments in an Except clause that the node is not
# contained in, assuming they may fail
if found_nodes:
filtered_nodes = [
n
for n in found_nodes
if not (
isinstance(n.statement(future=True).parent, nodes.ExceptHandler)
and isinstance(
n.statement(future=True).parent.parent, nodes.TryExcept
)
# If the try block returns we assume that assignments in the except
# handlers could have happened.
and (
not any(
isinstance(try_statement, nodes.Return)
for try_statement in n.statement(
future=True
).parent.parent.body
)
# But not if this node is in the final block, which will
# execute before the return.
or (
isinstance(node_statement.parent, nodes.TryFinally)
and node_statement in node_statement.parent.finalbody
and n.statement(future=True).parent.parent.parent.parent_of(
node_statement
)
)
)
)
or n.statement(future=True).parent.parent_of(node)
]
filtered_nodes_set = set(filtered_nodes)
difference = [n for n in found_nodes if n not in filtered_nodes_set]
self.consumed_uncertain[node.name] += difference
found_nodes = filtered_nodes
# If this node is in a Finally block of a Try/Finally,
# filter out assignments in the try portion, assuming they may fail
if (
found_nodes
and isinstance(node_statement.parent, nodes.TryFinally)
and node_statement in node_statement.parent.finalbody
):
filtered_nodes = [
n
for n in found_nodes
if not (
n.statement(future=True).parent is node_statement.parent
and n.statement(future=True) in n.statement(future=True).parent.body
)
]
filtered_nodes_set = set(filtered_nodes)
difference = [n for n in found_nodes if n not in filtered_nodes_set]
self.consumed_uncertain[node.name] += difference
found_nodes = filtered_nodes
# If this node is in an ExceptHandler,
# filter out assignments in the try portion, assuming they may fail
if found_nodes and isinstance(node_statement.parent, nodes.ExceptHandler):
filtered_nodes = [
n
for n in found_nodes
if not (
isinstance(n.statement(future=True).parent, nodes.TryExcept)
and n.statement(future=True) in n.statement(future=True).parent.body
and node_statement.parent
in n.statement(future=True).parent.handlers
)
]
filtered_nodes_set = set(filtered_nodes)
difference = [n for n in found_nodes if n not in filtered_nodes_set]
self.consumed_uncertain[node.name] += difference
found_nodes = filtered_nodes
return found_nodes
# pylint: disable=too-many-public-methods
class VariablesChecker(BaseChecker):
"""checks for
* unused variables / imports
* undefined variables
* redefinition of variable from builtins or from an outer scope
* use of variable before assignment
* __all__ consistency
* self/cls assignment
"""
__implements__ = IAstroidChecker
name = "variables"
msgs = MSGS
priority = -1
options = (
(
"init-import",
{
"default": 0,
"type": "yn",
"metavar": "<y or n>",
"help": "Tells whether we should check for unused import in "
"__init__ files.",
},
),
(
"dummy-variables-rgx",
{
"default": "_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_",
"type": "regexp",
"metavar": "<regexp>",
"help": "A regular expression matching the name of dummy "
"variables (i.e. expected to not be used).",
},
),
(
"additional-builtins",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of additional names supposed to be defined in "
"builtins. Remember that you should avoid defining new builtins "
"when possible.",
},
),
(
"callbacks",
{
"default": ("cb_", "_cb"),
"type": "csv",
"metavar": "<callbacks>",
"help": "List of strings which can identify a callback "
"function by name. A callback name must start or "
"end with one of those strings.",
},
),
(
"redefining-builtins-modules",
{
"default": (
"six.moves",
"past.builtins",
"future.builtins",
"builtins",
"io",
),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of qualified module names which can have objects "
"that can redefine builtins.",
},
),
(
"ignored-argument-names",
{
"default": IGNORED_ARGUMENT_NAMES,
"type": "regexp",
"metavar": "<regexp>",
"help": "Argument names that match this expression will be "
"ignored. Default to name with leading underscore.",
},
),
(
"allow-global-unused-variables",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Tells whether unused global variables should be treated as a violation.",
},
),
(
"allowed-redefined-builtins",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of names allowed to shadow builtins",
},
),
)
def __init__(self, linter=None):
super().__init__(linter)
self._to_consume: List[NamesConsumer] = []
self._checking_mod_attr = None
self._loop_variables = []
self._type_annotation_names = []
self._postponed_evaluation_enabled = False
def open(self) -> None:
"""Called when loading the checker"""
self._is_undefined_variable_enabled = self.linter.is_message_enabled(
"undefined-variable"
)
self._is_used_before_assignment_enabled = self.linter.is_message_enabled(
"used-before-assignment"
)
self._is_undefined_loop_variable_enabled = self.linter.is_message_enabled(
"undefined-loop-variable"
)
@utils.check_messages("redefined-outer-name")
def visit_for(self, node: nodes.For) -> None:
assigned_to = [a.name for a in node.target.nodes_of_class(nodes.AssignName)]
# Only check variables that are used
dummy_rgx = self.config.dummy_variables_rgx
assigned_to = [var for var in assigned_to if not dummy_rgx.match(var)]
for variable in assigned_to:
for outer_for, outer_variables in self._loop_variables:
if variable in outer_variables and not in_for_else_branch(
outer_for, node
):
self.add_message(
"redefined-outer-name",
args=(variable, outer_for.fromlineno),
node=node,
)
break
self._loop_variables.append((node, assigned_to))
@utils.check_messages("redefined-outer-name")
def leave_for(self, node: nodes.For) -> None:
self._loop_variables.pop()
self._store_type_annotation_names(node)
def visit_module(self, node: nodes.Module) -> None:
"""visit module : update consumption analysis variable
checks globals doesn't overrides builtins
"""
self._to_consume = [NamesConsumer(node, "module")]
self._postponed_evaluation_enabled = is_postponed_evaluation_enabled(node)
for name, stmts in node.locals.items():
if utils.is_builtin(name):
if self._should_ignore_redefined_builtin(stmts[0]) or name == "__doc__":
continue
self.add_message("redefined-builtin", args=name, node=stmts[0])
@utils.check_messages(
"unused-import",
"unused-wildcard-import",
"redefined-builtin",
"undefined-all-variable",
"invalid-all-object",
"invalid-all-format",
"unused-variable",
)
def leave_module(self, node: nodes.Module) -> None:
"""leave module: check globals"""
assert len(self._to_consume) == 1
self._check_metaclasses(node)
not_consumed = self._to_consume.pop().to_consume
# attempt to check for __all__ if defined
if "__all__" in node.locals:
self._check_all(node, not_consumed)
# check for unused globals
self._check_globals(not_consumed)
# don't check unused imports in __init__ files
if not self.config.init_import and node.package:
return
self._check_imports(not_consumed)
def visit_classdef(self, node: nodes.ClassDef) -> None:
"""visit class: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "class"))
def leave_classdef(self, _: nodes.ClassDef) -> None:
"""leave class: update consumption analysis variable"""
# do not check for not used locals here (no sense)
self._to_consume.pop()
def visit_lambda(self, node: nodes.Lambda) -> None:
"""visit lambda: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "lambda"))
def leave_lambda(self, _: nodes.Lambda) -> None:
"""leave lambda: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_generatorexp(self, node: nodes.GeneratorExp) -> None:
"""visit genexpr: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_generatorexp(self, _: nodes.GeneratorExp) -> None:
"""leave genexpr: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_dictcomp(self, node: nodes.DictComp) -> None:
"""visit dictcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_dictcomp(self, _: nodes.DictComp) -> None:
"""leave dictcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_setcomp(self, node: nodes.SetComp) -> None:
"""visit setcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_setcomp(self, _: nodes.SetComp) -> None:
"""leave setcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
"""visit function: update consumption analysis variable and check locals"""
self._to_consume.append(NamesConsumer(node, "function"))
if not (
self.linter.is_message_enabled("redefined-outer-name")
or self.linter.is_message_enabled("redefined-builtin")
):
return
globs = node.root().globals
for name, stmt in node.items():
if name in globs and not isinstance(stmt, nodes.Global):
definition = globs[name][0]
if (
isinstance(definition, nodes.ImportFrom)
and definition.modname == FUTURE
):
# It is a __future__ directive, not a symbol.
continue
# Do not take in account redefined names for the purpose
# of type checking.:
if any(
isinstance(definition.parent, nodes.If)
and definition.parent.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
for definition in globs[name]
):
continue
line = definition.fromlineno
if not self._is_name_ignored(stmt, name):
self.add_message(
"redefined-outer-name", args=(name, line), node=stmt
)
elif (
utils.is_builtin(name)
and not self._allowed_redefined_builtin(name)
and not self._should_ignore_redefined_builtin(stmt)
):
# do not print Redefining builtin for additional builtins
self.add_message("redefined-builtin", args=name, node=stmt)
def leave_functiondef(self, node: nodes.FunctionDef) -> None:
"""leave function: check function's locals are consumed"""
self._check_metaclasses(node)
if node.type_comment_returns:
self._store_type_annotation_node(node.type_comment_returns)
if node.type_comment_args:
for argument_annotation in node.type_comment_args:
self._store_type_annotation_node(argument_annotation)
not_consumed = self._to_consume.pop().to_consume
if not (
self.linter.is_message_enabled("unused-variable")
or self.linter.is_message_enabled("possibly-unused-variable")
or self.linter.is_message_enabled("unused-argument")
):
return
# Don't check arguments of function which are only raising an exception.
if utils.is_error(node):
return
# Don't check arguments of abstract methods or within an interface.
is_method = node.is_method()
if is_method and node.is_abstract():
return
global_names = _flattened_scope_names(node.nodes_of_class(nodes.Global))
nonlocal_names = _flattened_scope_names(node.nodes_of_class(nodes.Nonlocal))
for name, stmts in not_consumed.items():
self._check_is_unused(name, node, stmts[0], global_names, nonlocal_names)
visit_asyncfunctiondef = visit_functiondef
leave_asyncfunctiondef = leave_functiondef
@utils.check_messages(
"global-variable-undefined",
"global-variable-not-assigned",
"global-statement",
"global-at-module-level",
"redefined-builtin",
)
def visit_global(self, node: nodes.Global) -> None:
"""check names imported exists in the global scope"""
frame = node.frame()
if isinstance(frame, nodes.Module):
self.add_message("global-at-module-level", node=node)
return
module = frame.root()
default_message = True
locals_ = node.scope().locals
for name in node.names:
try:
assign_nodes = module.getattr(name)
except astroid.NotFoundError:
# unassigned global, skip
assign_nodes = []
not_defined_locally_by_import = not any(
isinstance(local, nodes.Import) for local in locals_.get(name, ())
)
if (
not utils.is_reassigned_after_current(node, name)
and not_defined_locally_by_import
):
self.add_message("global-variable-not-assigned", args=name, node=node)
default_message = False
continue
for anode in assign_nodes:
if (
isinstance(anode, nodes.AssignName)
and anode.name in module.special_attributes
):
self.add_message("redefined-builtin", args=name, node=node)
break
if anode.frame() is module:
# module level assignment
break
if isinstance(anode, nodes.FunctionDef) and anode.parent is module:
# module level function assignment
break
else:
if not_defined_locally_by_import:
# global undefined at the module scope
self.add_message("global-variable-undefined", args=name, node=node)
default_message = False
if default_message:
self.add_message("global-statement", node=node)
def visit_assignname(self, node: nodes.AssignName) -> None:
if isinstance(node.assign_type(), nodes.AugAssign):
self.visit_name(node)
def visit_delname(self, node: nodes.DelName) -> None:
self.visit_name(node)
def visit_name(self, node: nodes.Name) -> None:
"""Don't add the 'utils.check_messages' decorator here!
It's important that all 'Name' nodes are visited, otherwise the
'NamesConsumers' won't be correct.
"""
stmt = node.statement()
if stmt.fromlineno is None:
# name node from an astroid built from live code, skip
assert not stmt.root().file.endswith(".py")
return
self._undefined_and_used_before_checker(node, stmt)
if self._is_undefined_loop_variable_enabled:
self._loopvar_name(node)
def _undefined_and_used_before_checker(
self, node: nodes.Name, stmt: nodes.NodeNG
) -> None:
frame = stmt.scope()
start_index = len(self._to_consume) - 1
# iterates through parent scopes, from the inner to the outer
base_scope_type = self._to_consume[start_index].scope_type
for i in range(start_index, -1, -1):
current_consumer = self._to_consume[i]
# Certain nodes shouldn't be checked as they get checked another time
if self._should_node_be_skipped(node, current_consumer, i == start_index):
continue
action, found_nodes = self._check_consumer(
node, stmt, frame, current_consumer, i, base_scope_type
)
if action is VariableVisitConsumerAction.CONTINUE:
continue
if action is VariableVisitConsumerAction.CONSUME:
# pylint: disable-next=fixme
# TODO: remove assert after _check_consumer return value better typed
assert found_nodes is not None, "Cannot consume an empty list of nodes."
# Any nodes added to consumed_uncertain by get_next_to_consume()
# should be added back so that they are marked as used.
# They will have already had a chance to emit used-before-assignment.
# We check here instead of before every single return in _check_consumer()
found_nodes += current_consumer.consumed_uncertain[node.name]
current_consumer.mark_as_consumed(node.name, found_nodes)
if action in {
VariableVisitConsumerAction.RETURN,
VariableVisitConsumerAction.CONSUME,
}:
return
# we have not found the name, if it isn't a builtin, that's an
# undefined name !
if (
self._is_undefined_variable_enabled
and not (
node.name in nodes.Module.scope_attrs
or utils.is_builtin(node.name)
or node.name in self.config.additional_builtins
or (
node.name == "__class__"
and isinstance(frame, nodes.FunctionDef)
and frame.is_method()
)
)
and not utils.node_ignores_exception(node, NameError)
):
self.add_message("undefined-variable", args=node.name, node=node)
def _should_node_be_skipped(
self, node: nodes.Name, consumer: NamesConsumer, is_start_index: bool
) -> bool:
"""Tests a consumer and node for various conditions in which the node
shouldn't be checked for the undefined-variable and used-before-assignment checks.
"""
if consumer.scope_type == "class":
# The list of base classes in the class definition is not part
# of the class body.
# If the current scope is a class scope but it's not the inner
# scope, ignore it. This prevents to access this scope instead of
# the globals one in function members when there are some common
# names.
if utils.is_ancestor_name(consumer.node, node) or (
not is_start_index and self._ignore_class_scope(node)
):
return True
# Ignore inner class scope for keywords in class definition
if isinstance(node.parent, nodes.Keyword) and isinstance(
node.parent.parent, nodes.ClassDef
):
return True
elif consumer.scope_type == "function" and self._defined_in_function_definition(
node, consumer.node
):
# If the name node is used as a function default argument's value or as
# a decorator, then start from the parent frame of the function instead
# of the function frame - and thus open an inner class scope
return True
elif consumer.scope_type == "lambda" and utils.is_default_argument(
node, consumer.node
):
return True
return False
# pylint: disable=too-many-return-statements
def _check_consumer(
self,
node: nodes.Name,
stmt: nodes.NodeNG,
frame: nodes.LocalsDictNodeNG,
current_consumer: NamesConsumer,
consumer_level: int,
base_scope_type: Any,
) -> Tuple[VariableVisitConsumerAction, Optional[Any]]:
"""Checks a consumer for conditions that should trigger messages"""
# If the name has already been consumed, only check it's not a loop
# variable used outside the loop.
# Avoid the case where there are homonyms inside function scope and
# comprehension current scope (avoid bug #1731)
if node.name in current_consumer.consumed:
if utils.is_func_decorator(current_consumer.node) or not (
current_consumer.scope_type == "comprehension"
and self._has_homonym_in_upper_function_scope(node, consumer_level)
):
self._check_late_binding_closure(node)
self._loopvar_name(node)
return (VariableVisitConsumerAction.RETURN, None)
found_nodes = current_consumer.get_next_to_consume(node)
if found_nodes is None:
return (VariableVisitConsumerAction.CONTINUE, None)
if not found_nodes:
self.add_message("used-before-assignment", args=node.name, node=node)
if current_consumer.consumed_uncertain[node.name]:
# If there are nodes added to consumed_uncertain by
# get_next_to_consume() because they might not have executed,
# return a CONSUME action so that _undefined_and_used_before_checker()
# will mark them as used
return (VariableVisitConsumerAction.CONSUME, found_nodes)
return (VariableVisitConsumerAction.RETURN, found_nodes)
self._check_late_binding_closure(node)
if not (
self._is_undefined_variable_enabled
or self._is_used_before_assignment_enabled
):
return (VariableVisitConsumerAction.CONSUME, found_nodes)
defnode = utils.assign_parent(found_nodes[0])
defstmt = defnode.statement()
defframe = defstmt.frame()
# The class reuses itself in the class scope.
is_recursive_klass = (
frame is defframe
and defframe.parent_of(node)
and isinstance(defframe, nodes.ClassDef)
and node.name == defframe.name
)
if (
is_recursive_klass
and utils.get_node_first_ancestor_of_type(node, nodes.Lambda)
and (
not utils.is_default_argument(node)
or node.scope().parent.scope() is not defframe
)
):
# Self-referential class references are fine in lambda's --
# As long as they are not part of the default argument directly
# under the scope of the parent self-referring class.
# Example of valid default argument:
# class MyName3:
# myattr = 1
# mylambda3 = lambda: lambda a=MyName3: a
# Example of invalid default argument:
# class MyName4:
# myattr = 1
# mylambda4 = lambda a=MyName4: lambda: a
# If the above conditional is True,
# there is no possibility of undefined-variable
# Also do not consume class name
# (since consuming blocks subsequent checks)
# -- quit
return (VariableVisitConsumerAction.RETURN, None)
(
maybe_before_assign,
annotation_return,
use_outer_definition,
) = self._is_variable_violation(
node,
defnode,
stmt,
defstmt,
frame,
defframe,
base_scope_type,
is_recursive_klass,
)
if use_outer_definition:
return (VariableVisitConsumerAction.CONTINUE, None)
if (
maybe_before_assign
and not utils.is_defined_before(node)
and not astroid.are_exclusive(stmt, defstmt, ("NameError",))
):
# Used and defined in the same place, e.g `x += 1` and `del x`
defined_by_stmt = defstmt is stmt and isinstance(
node, (nodes.DelName, nodes.AssignName)
)
if (
is_recursive_klass
or defined_by_stmt
or annotation_return
or isinstance(defstmt, nodes.Delete)
):
if not utils.node_ignores_exception(node, NameError):
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(
stmt,
(
nodes.AnnAssign,
nodes.FunctionDef,
nodes.Arguments,
),
)
and node.name in node.root().locals
):
self.add_message(
"undefined-variable", args=node.name, node=node
)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
elif base_scope_type != "lambda":
# E0601 may *not* occurs in lambda scope.
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(stmt, (nodes.AnnAssign, nodes.FunctionDef))
):
self.add_message(
"used-before-assignment", args=node.name, node=node
)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
elif base_scope_type == "lambda":
# E0601 can occur in class-level scope in lambdas, as in
# the following example:
# class A:
# x = lambda attr: f + attr
# f = 42
if isinstance(frame, nodes.ClassDef) and node.name in frame.locals:
if isinstance(node.parent, nodes.Arguments):
if stmt.fromlineno <= defstmt.fromlineno:
# Doing the following is fine:
# class A:
# x = 42
# y = lambda attr=x: attr
self.add_message(
"used-before-assignment",
args=node.name,
node=node,
)
else:
self.add_message(
"undefined-variable", args=node.name, node=node
)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
elif current_consumer.scope_type == "lambda":
self.add_message("undefined-variable", args=node.name, node=node)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
elif self._is_only_type_assignment(node, defstmt):
self.add_message("undefined-variable", args=node.name, node=node)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
elif isinstance(defstmt, nodes.ClassDef):
is_first_level_ref = self._is_first_level_self_reference(node, defstmt)
if is_first_level_ref == 2:
self.add_message("used-before-assignment", node=node, args=node.name)
if is_first_level_ref:
return (VariableVisitConsumerAction.RETURN, None)
elif isinstance(defnode, nodes.NamedExpr):
if isinstance(defnode.parent, nodes.IfExp):
if self._is_never_evaluated(defnode, defnode.parent):
self.add_message("undefined-variable", args=node.name, node=node)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
return (VariableVisitConsumerAction.CONSUME, found_nodes)
@utils.check_messages("no-name-in-module")
def visit_import(self, node: nodes.Import) -> None:
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
if utils.is_node_in_guarded_import_block(node) is True:
# Don't verify import if part of guarded import block
# I.e. `sys.version_info` or `typing.TYPE_CHECKING`
return
for name, _ in node.names:
parts = name.split(".")
try:
module = next(_infer_name_module(node, parts[0]))
except astroid.ResolveError:
continue
if not isinstance(module, nodes.Module):
continue
self._check_module_attrs(node, module, parts[1:])
@utils.check_messages("no-name-in-module")
def visit_importfrom(self, node: nodes.ImportFrom) -> None:
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
if utils.is_node_in_guarded_import_block(node) is True:
# Don't verify import if part of guarded import block
# I.e. `sys.version_info` or `typing.TYPE_CHECKING`
return
name_parts = node.modname.split(".")
try:
module = node.do_import_module(name_parts[0])
except astroid.AstroidBuildingException:
return
module = self._check_module_attrs(node, module, name_parts[1:])
if not module:
return
for name, _ in node.names:
if name == "*":
continue
self._check_module_attrs(node, module, name.split("."))
@utils.check_messages(
"unbalanced-tuple-unpacking", "unpacking-non-sequence", "self-cls-assignment"
)
def visit_assign(self, node: nodes.Assign) -> None:
"""Check unbalanced tuple unpacking for assignments
and unpacking non-sequences as well as in case self/cls
get assigned.
"""
self._check_self_cls_assign(node)
if not isinstance(node.targets[0], (nodes.Tuple, nodes.List)):
return
targets = node.targets[0].itered()
try:
inferred = utils.safe_infer(node.value)
if inferred is not None:
self._check_unpacking(inferred, node, targets)
except astroid.InferenceError:
return
# listcomp have now also their scope
def visit_listcomp(self, node: nodes.ListComp) -> None:
"""visit dictcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_listcomp(self, _: nodes.ListComp) -> None:
"""leave dictcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def leave_assign(self, node: nodes.Assign) -> None:
self._store_type_annotation_names(node)
def leave_with(self, node: nodes.With) -> None:
self._store_type_annotation_names(node)
def visit_arguments(self, node: nodes.Arguments) -> None:
for annotation in node.type_comment_args:
self._store_type_annotation_node(annotation)
# Relying on other checker's options, which might not have been initialized yet.
@astroid.decorators.cachedproperty
def _analyse_fallback_blocks(self):
return get_global_option(self, "analyse-fallback-blocks", default=False)
@astroid.decorators.cachedproperty
def _ignored_modules(self):
return get_global_option(self, "ignored-modules", default=[])
@astroid.decorators.cachedproperty
def _allow_global_unused_variables(self):
return get_global_option(self, "allow-global-unused-variables", default=True)
@staticmethod
def _defined_in_function_definition(node, frame):
in_annotation_or_default_or_decorator = False
if isinstance(frame, nodes.FunctionDef) and node.statement() is frame:
in_annotation_or_default_or_decorator = (
(
node in frame.args.annotations
or node in frame.args.posonlyargs_annotations
or node in frame.args.kwonlyargs_annotations
or node is frame.args.varargannotation
or node is frame.args.kwargannotation
)
or frame.args.parent_of(node)
or (frame.decorators and frame.decorators.parent_of(node))
or (
frame.returns
and (node is frame.returns or frame.returns.parent_of(node))
)
)
return in_annotation_or_default_or_decorator
@staticmethod
def _in_lambda_or_comprehension_body(
node: nodes.NodeNG, frame: nodes.NodeNG
) -> bool:
"""return True if node within a lambda/comprehension body (or similar) and thus should not have access to class attributes in frame"""
child = node
parent = node.parent
while parent is not None:
if parent is frame:
return False
if isinstance(parent, nodes.Lambda) and child is not parent.args:
# Body of lambda should not have access to class attributes.
return True
if isinstance(parent, nodes.Comprehension) and child is not parent.iter:
# Only iter of list/set/dict/generator comprehension should have access.
return True
if isinstance(parent, nodes.ComprehensionScope) and not (
parent.generators and child is parent.generators[0]
):
# Body of list/set/dict/generator comprehension should not have access to class attributes.
# Furthermore, only the first generator (if multiple) in comprehension should have access.
return True
child = parent
parent = parent.parent
return False
@staticmethod
def _is_variable_violation(
node: nodes.Name,
defnode,
stmt,
defstmt,
frame, # scope of statement of node
defframe,
base_scope_type,
is_recursive_klass,
) -> Tuple[bool, bool, bool]:
# pylint: disable=too-many-nested-blocks
maybe_before_assign = True
annotation_return = False
use_outer_definition = False
if frame is not defframe:
maybe_before_assign = _detect_global_scope(node, frame, defframe)
elif defframe.parent is None:
# we are at the module level, check the name is not
# defined in builtins
if (
node.name in defframe.scope_attrs
or astroid.builtin_lookup(node.name)[1]
):
maybe_before_assign = False
else:
# we are in a local scope, check the name is not
# defined in global or builtin scope
# skip this lookup if name is assigned later in function scope/lambda
# Note: the node.frame() is not the same as the `frame` argument which is
# equivalent to frame.statement().scope()
forbid_lookup = (
isinstance(frame, nodes.FunctionDef)
or isinstance(node.frame(), nodes.Lambda)
) and _assigned_locally(node)
if not forbid_lookup and defframe.root().lookup(node.name)[1]:
maybe_before_assign = False
use_outer_definition = stmt == defstmt and not isinstance(
defnode, nodes.Comprehension
)
# check if we have a nonlocal
elif node.name in defframe.locals:
maybe_before_assign = not any(
isinstance(child, nodes.Nonlocal) and node.name in child.names
for child in defframe.get_children()
)
if (
base_scope_type == "lambda"
and isinstance(frame, nodes.ClassDef)
and node.name in frame.locals
):
# This rule verifies that if the definition node of the
# checked name is an Arguments node and if the name
# is used a default value in the arguments defaults
# and the actual definition of the variable label
# is happening before the Arguments definition.
#
# bar = None
# foo = lambda bar=bar: bar
#
# In this case, maybe_before_assign should be False, otherwise
# it should be True.
maybe_before_assign = not (
isinstance(defnode, nodes.Arguments)
and node in defnode.defaults
and frame.locals[node.name][0].fromlineno < defstmt.fromlineno
)
elif isinstance(defframe, nodes.ClassDef) and isinstance(
frame, nodes.FunctionDef
):
# Special rule for function return annotations,
# which uses the same name as the class where
# the function lives.
if node is frame.returns and defframe.parent_of(frame.returns):
maybe_before_assign = annotation_return = True
if (
maybe_before_assign
and defframe.name in defframe.locals
and defframe.locals[node.name][0].lineno < frame.lineno
):
# Detect class assignments with the same
# name as the class. In this case, no warning
# should be raised.
maybe_before_assign = False
if isinstance(node.parent, nodes.Arguments):
maybe_before_assign = stmt.fromlineno <= defstmt.fromlineno
elif is_recursive_klass:
maybe_before_assign = True
else:
maybe_before_assign = (
maybe_before_assign and stmt.fromlineno <= defstmt.fromlineno
)
if maybe_before_assign and stmt.fromlineno == defstmt.fromlineno:
if (
isinstance(defframe, nodes.FunctionDef)
and frame is defframe
and defframe.parent_of(node)
and stmt is not defstmt
):
# Single statement function, with the statement on the
# same line as the function definition
maybe_before_assign = False
elif (
isinstance( # pylint: disable=too-many-boolean-expressions
defstmt,
(
nodes.Assign,
nodes.AnnAssign,
nodes.AugAssign,
nodes.Expr,
nodes.Return,
),
)
and (
isinstance(defstmt.value, nodes.IfExp)
or isinstance(defstmt.value, nodes.Lambda)
and isinstance(defstmt.value.body, nodes.IfExp)
)
and frame is defframe
and defframe.parent_of(node)
and stmt is defstmt
):
# Single statement if, with assignment expression on same
# line as assignment
# x = b if (b := True) else False
maybe_before_assign = False
elif (
isinstance( # pylint: disable=too-many-boolean-expressions
defnode, nodes.NamedExpr
)
and frame is defframe
and defframe.parent_of(stmt)
and stmt is defstmt
and (
(
defnode.lineno == node.lineno
and defnode.col_offset < node.col_offset
)
or (defnode.lineno < node.lineno)
or (
# Issue in the `ast` module until py39
# Nodes in a multiline string have the same lineno
# Could be false-positive without check
not PY39_PLUS
and defnode.lineno == node.lineno
and isinstance(
defstmt,
(
nodes.Assign,
nodes.AnnAssign,
nodes.AugAssign,
nodes.Return,
),
)
and isinstance(defstmt.value, nodes.JoinedStr)
)
)
):
# Expressions, with assignment expressions
# Use only after assignment
# b = (c := 2) and c
maybe_before_assign = False
# Look for type checking definitions inside a type checking guard.
if isinstance(defstmt, (nodes.Import, nodes.ImportFrom)):
defstmt_parent = defstmt.parent
if (
isinstance(defstmt_parent, nodes.If)
and defstmt_parent.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
):
# Exempt those definitions that are used inside the type checking
# guard or that are defined in both type checking guard branches.
used_in_branch = defstmt_parent.parent_of(node)
defined_in_or_else = False
for definition in defstmt_parent.orelse:
if isinstance(definition, nodes.Assign):
defined_in_or_else = any(
target.name == node.name
for target in definition.targets
if isinstance(target, nodes.AssignName)
)
if defined_in_or_else:
break
if not used_in_branch and not defined_in_or_else:
maybe_before_assign = True
return maybe_before_assign, annotation_return, use_outer_definition
# pylint: disable-next=fixme
# TODO: The typing of `NodeNG.statement()` in astroid is non-specific
# After this has been updated the typing of `defstmt` should reflect this
# See: https://github.com/PyCQA/astroid/pull/1217
@staticmethod
def _is_only_type_assignment(node: nodes.Name, defstmt: nodes.NodeNG) -> bool:
"""Check if variable only gets assigned a type and never a value"""
if not isinstance(defstmt, nodes.AnnAssign) or defstmt.value:
return False
defstmt_frame = defstmt.frame()
node_frame = node.frame()
parent = node
while parent is not defstmt_frame.parent:
parent_scope = parent.scope()
local_refs = parent_scope.locals.get(node.name, [])
for ref_node in local_refs:
# If local ref is in the same frame as our node, but on a later lineno
# we don't actually care about this local ref.
# Local refs are ordered, so we break.
# print(var)
# var = 1 # <- irrelevant
if defstmt_frame == node_frame and not ref_node.lineno < node.lineno:
break
# If the parent of the local refence is anything but a AnnAssign
# Or if the AnnAssign adds a value the variable will now have a value
# var = 1 # OR
# var: int = 1
if (
not isinstance(ref_node.parent, nodes.AnnAssign)
or ref_node.parent.value
):
return False
parent = parent_scope.parent
return True
@staticmethod
def _is_first_level_self_reference(
node: nodes.Name, defstmt: nodes.ClassDef
) -> Literal[0, 1, 2]:
"""Check if a first level method's annotation or default values
refers to its own class.
Return values correspond to:
0 = Continue
1 = Break
2 = Break + emit message
"""
if (
node.frame().parent == defstmt
and node.statement(future=True) not in node.frame().body
):
# Check if used as type annotation
# Break but don't emit message if postponed evaluation is enabled
if utils.is_node_in_type_annotation_context(node):
if not utils.is_postponed_evaluation_enabled(node):
return 2
return 1
# Check if used as default value by calling the class
if isinstance(node.parent, nodes.Call) and isinstance(
node.parent.parent, nodes.Arguments
):
return 2
return 0
@staticmethod
def _is_never_evaluated(
defnode: nodes.NamedExpr, defnode_parent: nodes.IfExp
) -> bool:
"""Check if a NamedExpr is inside a side of if ... else that never
gets evaluated
"""
inferred_test = utils.safe_infer(defnode_parent.test)
if isinstance(inferred_test, nodes.Const):
if inferred_test.value is True and defnode == defnode_parent.orelse:
return True
if inferred_test.value is False and defnode == defnode_parent.body:
return True
return False
def _ignore_class_scope(self, node):
"""
Return True if the node is in a local class scope, as an assignment.
:param node: Node considered
:type node: astroid.Node
:return: True if the node is in a local class scope, as an assignment. False otherwise.
:rtype: bool
"""
# Detect if we are in a local class scope, as an assignment.
# For example, the following is fair game.
#
# class A:
# b = 1
# c = lambda b=b: b * b
#
# class B:
# tp = 1
# def func(self, arg: tp):
# ...
# class C:
# tp = 2
# def func(self, arg=tp):
# ...
# class C:
# class Tp:
# pass
# class D(Tp):
# ...
name = node.name
frame = node.statement().scope()
in_annotation_or_default_or_decorator = self._defined_in_function_definition(
node, frame
)
in_ancestor_list = utils.is_ancestor_name(frame, node)
if in_annotation_or_default_or_decorator or in_ancestor_list:
frame_locals = frame.parent.scope().locals
else:
frame_locals = frame.locals
return not (
(isinstance(frame, nodes.ClassDef) or in_annotation_or_default_or_decorator)
and not self._in_lambda_or_comprehension_body(node, frame)
and name in frame_locals
)
def _loopvar_name(self, node: astroid.Name) -> None:
# filter variables according to node's scope
astmts = [s for s in node.lookup(node.name)[1] if hasattr(s, "assign_type")]
# If this variable usage exists inside a function definition
# that exists in the same loop,
# the usage is safe because the function will not be defined either if
# the variable is not defined.
scope = node.scope()
if isinstance(scope, nodes.FunctionDef) and any(
asmt.statement().parent_of(scope) for asmt in astmts
):
return
# filter variables according their respective scope test is_statement
# and parent to avoid #74747. This is not a total fix, which would
# introduce a mechanism similar to special attribute lookup in
# modules. Also, in order to get correct inference in this case, the
# scope lookup rules would need to be changed to return the initial
# assignment (which does not exist in code per se) as well as any later
# modifications.
if (
not astmts
or (astmts[0].is_statement or astmts[0].parent)
and astmts[0].statement().parent_of(node)
):
_astmts = []
else:
_astmts = astmts[:1]
for i, stmt in enumerate(astmts[1:]):
if astmts[i].statement().parent_of(stmt) and not in_for_else_branch(
astmts[i].statement(), stmt
):
continue
_astmts.append(stmt)
astmts = _astmts
if len(astmts) != 1:
return
assign = astmts[0].assign_type()
if not (
isinstance(assign, (nodes.For, nodes.Comprehension, nodes.GeneratorExp))
and assign.statement() is not node.statement()
):
return
# For functions we can do more by inferring the length of the itered object
if not isinstance(assign, nodes.For):
self.add_message("undefined-loop-variable", args=node.name, node=node)
return
try:
inferred = next(assign.iter.infer())
except astroid.InferenceError:
self.add_message("undefined-loop-variable", args=node.name, node=node)
else:
if (
isinstance(inferred, astroid.Instance)
and inferred.qname() == BUILTIN_RANGE
):
# Consider range() objects safe, even if they might not yield any results.
return
# Consider sequences.
sequences = (
nodes.List,
nodes.Tuple,
nodes.Dict,
nodes.Set,
astroid.objects.FrozenSet,
)
if not isinstance(inferred, sequences):
self.add_message("undefined-loop-variable", args=node.name, node=node)
return
elements = getattr(inferred, "elts", getattr(inferred, "items", []))
if not elements:
self.add_message("undefined-loop-variable", args=node.name, node=node)
def _check_is_unused(self, name, node, stmt, global_names, nonlocal_names):
# Ignore some special names specified by user configuration.
if self._is_name_ignored(stmt, name):
return
# Ignore names that were added dynamically to the Function scope
if (
isinstance(node, nodes.FunctionDef)
and name == "__class__"
and len(node.locals["__class__"]) == 1
and isinstance(node.locals["__class__"][0], nodes.ClassDef)
):
return
# Ignore names imported by the global statement.
if isinstance(stmt, (nodes.Global, nodes.Import, nodes.ImportFrom)):
# Detect imports, assigned to global statements.
if global_names and _import_name_is_global(stmt, global_names):
return
argnames = list(
itertools.chain(node.argnames(), [arg.name for arg in node.args.kwonlyargs])
)
# Care about functions with unknown argument (builtins)
if name in argnames:
self._check_unused_arguments(name, node, stmt, argnames)
else:
if stmt.parent and isinstance(stmt.parent, (nodes.Assign, nodes.AnnAssign)):
if name in nonlocal_names:
return
qname = asname = None
if isinstance(stmt, (nodes.Import, nodes.ImportFrom)):
# Need the complete name, which we don't have in .locals.
if len(stmt.names) > 1:
import_names = next(
(names for names in stmt.names if name in names), None
)
else:
import_names = stmt.names[0]
if import_names:
qname, asname = import_names
name = asname or qname
if _has_locals_call_after_node(stmt, node.scope()):
message_name = "possibly-unused-variable"
else:
if isinstance(stmt, nodes.Import):
if asname is not None:
msg = f"{qname} imported as {asname}"
else:
msg = f"import {name}"
self.add_message("unused-import", args=msg, node=stmt)
return
if isinstance(stmt, nodes.ImportFrom):
if asname is not None:
msg = f"{qname} imported from {stmt.modname} as {asname}"
else:
msg = f"{name} imported from {stmt.modname}"
self.add_message("unused-import", args=msg, node=stmt)
return
message_name = "unused-variable"
if isinstance(stmt, nodes.FunctionDef) and stmt.decorators:
return
# Don't check function stubs created only for type information
if utils.is_overload_stub(node):
return
# Special case for exception variable
if isinstance(stmt.parent, nodes.ExceptHandler) and any(
n.name == name for n in stmt.parent.nodes_of_class(nodes.Name)
):
return
self.add_message(message_name, args=name, node=stmt)
def _is_name_ignored(self, stmt, name):
authorized_rgx = self.config.dummy_variables_rgx
if (
isinstance(stmt, nodes.AssignName)
and isinstance(stmt.parent, nodes.Arguments)
or isinstance(stmt, nodes.Arguments)
):
regex = self.config.ignored_argument_names
else:
regex = authorized_rgx
return regex and regex.match(name)
def _check_unused_arguments(self, name, node, stmt, argnames):
is_method = node.is_method()
klass = node.parent.frame()
if is_method and isinstance(klass, nodes.ClassDef):
confidence = (
INFERENCE if utils.has_known_bases(klass) else INFERENCE_FAILURE
)
else:
confidence = HIGH
if is_method:
# Don't warn for the first argument of a (non static) method
if node.type != "staticmethod" and name == argnames[0]:
return
# Don't warn for argument of an overridden method
overridden = overridden_method(klass, node.name)
if overridden is not None and name in overridden.argnames():
return
if node.name in utils.PYMETHODS and node.name not in (
"__init__",
"__new__",
):
return
# Don't check callback arguments
if any(
node.name.startswith(cb) or node.name.endswith(cb)
for cb in self.config.callbacks
):
return
# Don't check arguments of singledispatch.register function.
if utils.is_registered_in_singledispatch_function(node):
return
# Don't check function stubs created only for type information
if utils.is_overload_stub(node):
return
# Don't check protocol classes
if utils.is_protocol_class(klass):
return
self.add_message("unused-argument", args=name, node=stmt, confidence=confidence)
def _check_late_binding_closure(self, node: nodes.Name) -> None:
"""Check whether node is a cell var that is assigned within a containing loop.
Special cases where we don't care about the error:
1. When the node's function is immediately called, e.g. (lambda: i)()
2. When the node's function is returned from within the loop, e.g. return lambda: i
"""
if not self.linter.is_message_enabled("cell-var-from-loop"):
return
node_scope = node.frame()
# If node appears in a default argument expression,
# look at the next enclosing frame instead
if utils.is_default_argument(node, node_scope):
node_scope = node_scope.parent.frame()
# Check if node is a cell var
if (
not isinstance(node_scope, (nodes.Lambda, nodes.FunctionDef))
or node.name in node_scope.locals
):
return
assign_scope, stmts = node.lookup(node.name)
if not stmts or not assign_scope.parent_of(node_scope):
return
if utils.is_comprehension(assign_scope):
self.add_message("cell-var-from-loop", node=node, args=node.name)
else:
# Look for an enclosing For loop.
# Currently, we only consider the first assignment
assignment_node = stmts[0]
maybe_for = assignment_node
while maybe_for and not isinstance(maybe_for, nodes.For):
if maybe_for is assign_scope:
break
maybe_for = maybe_for.parent
else:
if (
maybe_for
and maybe_for.parent_of(node_scope)
and not utils.is_being_called(node_scope)
and not isinstance(node_scope.statement(), nodes.Return)
):
self.add_message("cell-var-from-loop", node=node, args=node.name)
def _should_ignore_redefined_builtin(self, stmt):
if not isinstance(stmt, nodes.ImportFrom):
return False
return stmt.modname in self.config.redefining_builtins_modules
def _allowed_redefined_builtin(self, name):
return name in self.config.allowed_redefined_builtins
def _has_homonym_in_upper_function_scope(
self, node: nodes.Name, index: int
) -> bool:
"""
Return whether there is a node with the same name in the
to_consume dict of an upper scope and if that scope is a
function
:param node: node to check for
:param index: index of the current consumer inside self._to_consume
:return: True if there is a node with the same name in the
to_consume dict of an upper scope and if that scope
is a function, False otherwise
"""
return any(
_consumer.scope_type == "function" and node.name in _consumer.to_consume
for _consumer in self._to_consume[index - 1 :: -1]
)
def _store_type_annotation_node(self, type_annotation):
"""Given a type annotation, store all the name nodes it refers to"""
if isinstance(type_annotation, nodes.Name):
self._type_annotation_names.append(type_annotation.name)
return
if isinstance(type_annotation, nodes.Attribute):
self._store_type_annotation_node(type_annotation.expr)
return
if not isinstance(type_annotation, nodes.Subscript):
return
if (
isinstance(type_annotation.value, nodes.Attribute)
and isinstance(type_annotation.value.expr, nodes.Name)
and type_annotation.value.expr.name == TYPING_MODULE
):
self._type_annotation_names.append(TYPING_MODULE)
return
self._type_annotation_names.extend(
annotation.name for annotation in type_annotation.nodes_of_class(nodes.Name)
)
def _store_type_annotation_names(self, node):
type_annotation = node.type_annotation
if not type_annotation:
return
self._store_type_annotation_node(node.type_annotation)
def _check_self_cls_assign(self, node: nodes.Assign) -> None:
"""Check that self/cls don't get assigned"""
assign_names: Set[Optional[str]] = set()
for target in node.targets:
if isinstance(target, nodes.AssignName):
assign_names.add(target.name)
elif isinstance(target, nodes.Tuple):
assign_names.update(
elt.name for elt in target.elts if isinstance(elt, nodes.AssignName)
)
scope = node.scope()
nonlocals_with_same_name = any(
child for child in scope.body if isinstance(child, nodes.Nonlocal)
)
if nonlocals_with_same_name:
scope = node.scope().parent.scope()
if not (
isinstance(scope, nodes.FunctionDef)
and scope.is_method()
and "builtins.staticmethod" not in scope.decoratornames()
):
return
argument_names = scope.argnames()
if not argument_names:
return
self_cls_name = argument_names[0]
if self_cls_name in assign_names:
self.add_message("self-cls-assignment", node=node, args=(self_cls_name,))
def _check_unpacking(self, inferred, node, targets):
"""Check for unbalanced tuple unpacking
and unpacking non sequences.
"""
if utils.is_inside_abstract_class(node):
return
if utils.is_comprehension(node):
return
if inferred is astroid.Uninferable:
return
if (
isinstance(inferred.parent, nodes.Arguments)
and isinstance(node.value, nodes.Name)
and node.value.name == inferred.parent.vararg
):
# Variable-length argument, we can't determine the length.
return
# Attempt to check unpacking is properly balanced
values: Optional[List] = None
if isinstance(inferred, (nodes.Tuple, nodes.List)):
values = inferred.itered()
elif isinstance(inferred, astroid.Instance) and any(
ancestor.qname() == "typing.NamedTuple" for ancestor in inferred.ancestors()
):
values = [i for i in inferred.values() if isinstance(i, nodes.AssignName)]
if values:
if len(targets) != len(values):
# Check if we have starred nodes.
if any(isinstance(target, nodes.Starred) for target in targets):
return
self.add_message(
"unbalanced-tuple-unpacking",
node=node,
args=(
_get_unpacking_extra_info(node, inferred),
len(targets),
len(values),
),
)
# attempt to check unpacking may be possible (ie RHS is iterable)
elif not utils.is_iterable(inferred):
self.add_message(
"unpacking-non-sequence",
node=node,
args=(_get_unpacking_extra_info(node, inferred),),
)
def _check_module_attrs(self, node, module, module_names):
"""check that module_names (list of string) are accessible through the
given module
if the latest access name corresponds to a module, return it
"""
while module_names:
name = module_names.pop(0)
if name == "__dict__":
module = None
break
try:
module = next(module.getattr(name)[0].infer())
if module is astroid.Uninferable:
return None
except astroid.NotFoundError:
if module.name in self._ignored_modules:
return None
self.add_message(
"no-name-in-module", args=(name, module.name), node=node
)
return None
except astroid.InferenceError:
return None
if module_names:
modname = module.name if module else "__dict__"
self.add_message(
"no-name-in-module", node=node, args=(".".join(module_names), modname)
)
return None
if isinstance(module, nodes.Module):
return module
return None
def _check_all(self, node: nodes.Module, not_consumed):
assigned = next(node.igetattr("__all__"))
if assigned is astroid.Uninferable:
return
if not assigned.pytype() in {"builtins.list", "builtins.tuple"}:
line, col = assigned.tolineno, assigned.col_offset
self.add_message("invalid-all-format", line=line, col_offset=col, node=node)
return
for elt in getattr(assigned, "elts", ()):
try:
elt_name = next(elt.infer())
except astroid.InferenceError:
continue
if elt_name is astroid.Uninferable:
continue
if not elt_name.parent:
continue
if not isinstance(elt_name, nodes.Const) or not isinstance(
elt_name.value, str
):
self.add_message("invalid-all-object", args=elt.as_string(), node=elt)
continue
elt_name = elt_name.value
# If elt is in not_consumed, remove it from not_consumed
if elt_name in not_consumed:
del not_consumed[elt_name]
continue
if elt_name not in node.locals:
if not node.package:
self.add_message(
"undefined-all-variable", args=(elt_name,), node=elt
)
else:
basename = os.path.splitext(node.file)[0]
if os.path.basename(basename) == "__init__":
name = node.name + "." + elt_name
try:
astroid.modutils.file_from_modpath(name.split("."))
except ImportError:
self.add_message(
"undefined-all-variable", args=(elt_name,), node=elt
)
except SyntaxError:
# don't yield a syntax-error warning,
# because it will be later yielded
# when the file will be checked
pass
def _check_globals(self, not_consumed):
if self._allow_global_unused_variables:
return
for name, node_lst in not_consumed.items():
for node in node_lst:
self.add_message("unused-variable", args=(name,), node=node)
def _check_imports(self, not_consumed):
local_names = _fix_dot_imports(not_consumed)
checked = set()
unused_wildcard_imports: DefaultDict[
Tuple[str, nodes.ImportFrom], List[str]
] = collections.defaultdict(list)
for name, stmt in local_names:
for imports in stmt.names:
real_name = imported_name = imports[0]
if imported_name == "*":
real_name = name
as_name = imports[1]
if real_name in checked:
continue
if name not in (real_name, as_name):
continue
checked.add(real_name)
is_type_annotation_import = (
imported_name in self._type_annotation_names
or as_name in self._type_annotation_names
)
if isinstance(stmt, nodes.Import) or (
isinstance(stmt, nodes.ImportFrom) and not stmt.modname
):
if isinstance(stmt, nodes.ImportFrom) and SPECIAL_OBJ.search(
imported_name
):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if is_type_annotation_import:
# Most likely a typing import if it wasn't used so far.
continue
if as_name == "_":
continue
if as_name is None:
msg = f"import {imported_name}"
else:
msg = f"{imported_name} imported as {as_name}"
if not _is_type_checking_import(stmt):
self.add_message("unused-import", args=msg, node=stmt)
elif isinstance(stmt, nodes.ImportFrom) and stmt.modname != FUTURE:
if SPECIAL_OBJ.search(imported_name):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if _is_from_future_import(stmt, name):
# Check if the name is in fact loaded from a
# __future__ import in another module.
continue
if is_type_annotation_import:
# Most likely a typing import if it wasn't used so far.
continue
if imported_name == "*":
unused_wildcard_imports[(stmt.modname, stmt)].append(name)
else:
if as_name is None:
msg = f"{imported_name} imported from {stmt.modname}"
else:
msg = f"{imported_name} imported from {stmt.modname} as {as_name}"
if not _is_type_checking_import(stmt):
self.add_message("unused-import", args=msg, node=stmt)
# Construct string for unused-wildcard-import message
for module, unused_list in unused_wildcard_imports.items():
if len(unused_list) == 1:
arg_string = unused_list[0]
else:
arg_string = (
f"{', '.join(i for i in unused_list[:-1])} and {unused_list[-1]}"
)
self.add_message(
"unused-wildcard-import", args=(arg_string, module[0]), node=module[1]
)
del self._to_consume
def _check_metaclasses(self, node):
"""Update consumption analysis for metaclasses."""
consumed = [] # [(scope_locals, consumed_key)]
for child_node in node.get_children():
if isinstance(child_node, nodes.ClassDef):
consumed.extend(self._check_classdef_metaclasses(child_node, node))
# Pop the consumed items, in order to avoid having
# unused-import and unused-variable false positives
for scope_locals, name in consumed:
scope_locals.pop(name, None)
def _check_classdef_metaclasses(self, klass, parent_node):
if not klass._metaclass:
# Skip if this class doesn't use explicitly a metaclass, but inherits it from ancestors
return []
consumed = [] # [(scope_locals, consumed_key)]
metaclass = klass.metaclass()
name = None
if isinstance(klass._metaclass, nodes.Name):
name = klass._metaclass.name
elif isinstance(klass._metaclass, nodes.Attribute) and klass._metaclass.expr:
attr = klass._metaclass.expr
while not isinstance(attr, nodes.Name):
attr = attr.expr
name = attr.name
elif metaclass:
name = metaclass.root().name
found = False
name = METACLASS_NAME_TRANSFORMS.get(name, name)
if name:
# check enclosing scopes starting from most local
for scope_locals, _, _, _ in self._to_consume[::-1]:
found_nodes = scope_locals.get(name, [])
for found_node in found_nodes:
if found_node.lineno <= klass.lineno:
consumed.append((scope_locals, name))
found = True
break
# Check parent scope
nodes_in_parent_scope = parent_node.locals.get(name, [])
for found_node_parent in nodes_in_parent_scope:
if found_node_parent.lineno <= klass.lineno:
found = True
break
if (
not found
and not metaclass
and not (
name in nodes.Module.scope_attrs
or utils.is_builtin(name)
or name in self.config.additional_builtins
)
):
self.add_message("undefined-variable", node=klass, args=(name,))
return consumed
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(VariablesChecker(linter))
| 1 | 19,142 | I don't understand how this work, could you explain ? | PyCQA-pylint | py |
@@ -183,7 +183,7 @@ analyze_callee_regs_usage(dcontext_t *dcontext, callee_info_t *ci)
/* XXX implement bitset for optimisation */
memset(ci->reg_used, 0, sizeof(bool) * NUM_GP_REGS);
ci->num_simd_used = 0;
- memset(ci->simd_used, 0, sizeof(bool) * NUM_SIMD_REGS);
+ memset(ci->simd_used, 0, sizeof(bool) * MCTX_NUM_SIMD_SLOTS);
ci->write_flags = false;
num_regparm = MIN(ci->num_args, NUM_REGPARM); | 1 | /* **********************************************************
* Copyright (c) 2016-2018 ARM Limited. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of ARM Limited nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL ARM LIMITED OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* file "clean_call_opt.c" */
#include "../globals.h"
#include "arch.h"
#include "instr_create.h"
#include "instrument.h" /* instrlist_meta_preinsert */
#include "../clean_call_opt.h"
#include "disassemble.h"
#ifdef CLIENT_INTERFACE /* around whole file */
/* Shorten code generation lines. */
# define PRE instrlist_meta_preinsert
# define OPREG opnd_create_reg
/* For fast recognition we do not check the instructions operand by operand.
* Instead we test the encoding directly.
*/
/* remove variable bits in the encoding */
# define STP_LDP_ENC_MASK 0x7fc07fff
# define STR_LDR_ENC_MASK 0xbfc003ff
# define MOV_STK_ENC_MASK 0x7f0003ff
# define STP_LDP_REG_MASK 0xffff83e0
# define STR_LDR_REG_MASK 0xffffffe0
/* stp x29, x30, [sp, #frame_size]! */
# define PUSH_FP_LR_ENC 0x29807bfd
/* ldp x29, x30, [sp], #frame_size */
# define POP_FP_LR_ENC 0x28c07bfd
/* add sp, sp, #frame_size */
# define ADD_SP_ENC 0x110003ff
/* sub sp, sp, #frame_size */
# define SUB_SP_ENC 0x510003ff
/* mov x29, sp */
# define MOV_X29_SP_ENC 0x910003fd
/* stp xx, xx, [sp, #offset] */
# define STP_SP_ENC 0x290003e0
/* ldp xx, xx, [sp, #offset] */
# define LDP_SP_ENC 0x294003e0
/* str xx, [sp, #offset] */
# define STR_SP_ENC 0xb90003e0
/* ldr xx, [sp, #offset] */
# define LDR_SP_ENC 0xb94003e0
static inline bool
instr_is_push_fp_and_lr(instr_t *instr)
{
uint enc = *(uint *)instr->bytes;
return (enc & STP_LDP_ENC_MASK) == PUSH_FP_LR_ENC;
}
static inline bool
instr_is_pop_fp_and_lr(instr_t *instr)
{
uint enc = *(uint *)instr->bytes;
return (enc & STP_LDP_ENC_MASK) == POP_FP_LR_ENC;
}
static inline bool
instr_is_move_frame_ptr(instr_t *instr)
{
uint enc = *(uint *)instr->bytes;
return enc == MOV_X29_SP_ENC;
}
static inline bool
instr_is_add_stk_ptr(instr_t *instr)
{
uint enc = *(uint *)instr->bytes;
return (enc & MOV_STK_ENC_MASK) == ADD_SP_ENC;
}
static inline bool
instr_is_sub_stk_ptr(instr_t *instr)
{
uint enc = *(uint *)instr->bytes;
return (enc & MOV_STK_ENC_MASK) == SUB_SP_ENC;
}
static inline bool
instr_is_push_reg_pair(instr_t *instr, reg_id_t *reg1, reg_id_t *reg2)
{
uint enc = *(uint *)instr->bytes;
enc = enc & STP_LDP_ENC_MASK;
if ((enc & STP_LDP_REG_MASK) != STP_SP_ENC)
return false;
*reg1 = (reg_id_t)(enc & 31) + DR_REG_START_GPR;
*reg2 = (reg_id_t)(enc >> 10 & 31) + DR_REG_START_GPR;
return true;
}
static inline bool
instr_is_pop_reg_pair(instr_t *instr, reg_id_t *reg1, reg_id_t *reg2)
{
uint enc = *(uint *)instr->bytes;
enc = enc & STP_LDP_ENC_MASK;
if ((enc & STP_LDP_REG_MASK) != LDP_SP_ENC)
return false;
*reg1 = (reg_id_t)(enc & 31) + DR_REG_START_GPR;
*reg2 = (reg_id_t)(enc >> 10 & 31) + DR_REG_START_GPR;
return true;
}
static inline bool
instr_is_push_reg(instr_t *instr, reg_id_t *reg)
{
uint enc = *(uint *)instr->bytes;
enc = enc & STR_LDR_ENC_MASK;
if ((enc & STR_LDR_REG_MASK) != STR_SP_ENC)
return false;
*reg = (reg_id_t)(enc & 31) + DR_REG_START_GPR;
return true;
}
static inline bool
instr_is_pop_reg(instr_t *instr, reg_id_t *reg)
{
uint enc = *(uint *)instr->bytes;
enc = enc & STR_LDR_ENC_MASK;
if ((enc & STR_LDR_REG_MASK) != LDR_SP_ENC)
return false;
*reg = (reg_id_t)(enc & 31) + DR_REG_START_GPR;
return true;
}
static inline reg_id_t
find_nzcv_spill_reg(callee_info_t *ci)
{
int i;
reg_id_t spill_reg = DR_REG_INVALID;
for (i = NUM_GP_REGS - 2; i >= 0; i--) {
reg_id_t reg = DR_REG_START_GPR + (reg_id_t)i;
ASSERT(reg != DR_REG_XSP && "hit SP starting at x30");
if (reg == ci->spill_reg || ci->reg_used[i])
continue;
spill_reg = reg;
break;
}
ASSERT(spill_reg != DR_REG_INVALID);
return spill_reg;
}
void
analyze_callee_regs_usage(dcontext_t *dcontext, callee_info_t *ci)
{
instrlist_t *ilist = ci->ilist;
instr_t *instr;
uint i, num_regparm;
/* XXX implement bitset for optimisation */
memset(ci->reg_used, 0, sizeof(bool) * NUM_GP_REGS);
ci->num_simd_used = 0;
memset(ci->simd_used, 0, sizeof(bool) * NUM_SIMD_REGS);
ci->write_flags = false;
num_regparm = MIN(ci->num_args, NUM_REGPARM);
for (i = 0; i < num_regparm; i++) {
reg_id_t reg = regparms[i];
if (!ci->reg_used[reg - DR_REG_START_GPR]) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: callee " PFX " uses REG %s for arg passing\n", ci->start,
reg_names[reg]);
ci->reg_used[reg - DR_REG_START_GPR] = true;
callee_info_reserve_slot(ci, SLOT_REG, reg);
}
}
for (instr = instrlist_first(ilist); instr != NULL; instr = instr_get_next(instr)) {
/* General purpose registers */
for (i = 0; i < NUM_GP_REGS; i++) {
reg_id_t reg = DR_REG_START_GPR + (reg_id_t)i;
if (!ci->reg_used[i] && instr_uses_reg(instr, reg)) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: callee " PFX " uses REG %s at " PFX "\n", ci->start,
reg_names[reg], instr_get_app_pc(instr));
ci->reg_used[i] = true;
callee_info_reserve_slot(ci, SLOT_REG, reg);
}
}
/* SIMD register usage */
for (i = 0; i < NUM_SIMD_REGS; i++) {
if (!ci->simd_used[i] && instr_uses_reg(instr, (DR_REG_Q0 + (reg_id_t)i))) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: callee " PFX " uses VREG%d at " PFX "\n", ci->start, i,
instr_get_app_pc(instr));
ci->simd_used[i] = true;
ci->num_simd_used++;
}
}
/* NZCV register usage */
if (!ci->write_flags &&
TESTANY(EFLAGS_WRITE_ARITH,
instr_get_arith_flags(instr, DR_QUERY_INCLUDE_ALL))) {
LOG(THREAD, LOG_CLEANCALL, 2, "CLEANCALL: callee " PFX " updates aflags\n",
ci->start);
ci->write_flags = true;
}
}
if (ci->write_flags) {
callee_info_reserve_slot(ci, SLOT_FLAGS, 0);
}
}
/* We use stp/ldp/str/ldr [sp, #imm] pattern to detect callee saved registers,
* and assume that the code later won't change those saved value
* on the stack.
*/
void
analyze_callee_save_reg(dcontext_t *dcontext, callee_info_t *ci)
{
instrlist_t *ilist = ci->ilist;
instr_t *top, *bot, *instr;
reg_id_t reg1, reg2;
/* pointers to instructions of interest */
instr_t *enter = NULL, *leave = NULL;
ci->num_callee_save_regs = 0;
top = instrlist_first(ilist);
bot = instrlist_last(ilist);
/* zero or one instruction only, no callee save */
if (top == bot)
return;
/* Stack frame analysis
* A typical function (fewer than 8 arguments) has the following form:
* (a) stp x29, x30, [sp, #-frame_size]!
* (b) mov x29, sp
* (c) stp x19, x20, [sp, #callee_save_offset]
* (c) str x21, [sp, #callee_save_offset+8]
* ...
* (c) ldp x19, x20, [sp, #callee_save_offset]
* (c) ldr x21, [sp, #callee_save_offset+8]
* (a) ldp x29, x30, [sp], #frame_size
* ret
* Pair (a) appears when the callee calls another function.
* If the callee is a leaf function, pair (a) typically has the following form:
* (a) sub, sp, sp, #frame_size
* (a) add, sp, sp, #frame_size
* If (b) is found, x29 is used as the frame pointer.
* Pair (c) may have two forms, using stp/ldp for register pairs
* or str/ldr for a single callee-saved register.
*/
/* Check for pair (a) */
for (instr = top; instr != bot; instr = instr_get_next(instr)) {
if (instr->bytes == NULL)
continue;
if (instr_is_push_fp_and_lr(instr) || instr_is_sub_stk_ptr(instr)) {
enter = instr;
break;
}
}
if (enter != NULL) {
for (instr = bot; instr != enter; instr = instr_get_prev(instr)) {
if (!instr->bytes)
continue;
if (instr_is_pop_fp_and_lr(instr) || instr_is_add_stk_ptr(instr)) {
leave = instr;
break;
}
}
}
/* Check for (b) */
ci->standard_fp = false;
if (enter != NULL && leave != NULL &&
(ci->bwd_tgt == NULL || instr_get_app_pc(enter) < ci->bwd_tgt) &&
(ci->fwd_tgt == NULL || instr_get_app_pc(leave) >= ci->fwd_tgt)) {
for (instr = instr_get_next(enter); instr != leave;
instr = instr_get_next(instr)) {
if (instr_is_move_frame_ptr(instr)) {
ci->standard_fp = true;
/* Remove this instruction. */
instrlist_remove(ilist, instr);
instr_destroy(GLOBAL_DCONTEXT, instr);
break;
}
}
if (ci->standard_fp) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: callee " PFX " use X29 as frame pointer\n", ci->start);
}
/* remove pair (a) */
instrlist_remove(ilist, enter);
instrlist_remove(ilist, leave);
instr_destroy(GLOBAL_DCONTEXT, enter);
instr_destroy(GLOBAL_DCONTEXT, leave);
top = instrlist_first(ilist);
bot = instrlist_last(ilist);
}
/* Check for (c): callee-saved registers */
while (top != NULL && bot != NULL) {
/* if not in the first/last bb, break */
if ((ci->bwd_tgt != NULL && instr_get_app_pc(top) >= ci->bwd_tgt) ||
(ci->fwd_tgt != NULL && instr_get_app_pc(bot) < ci->fwd_tgt) ||
instr_is_cti(top) || instr_is_cti(bot))
break;
if (instr_is_push_reg_pair(top, ®1, ®2)) {
/* If a save reg pair is found and the register, check if the last
* instruction is a corresponding load.
*/
reg_id_t reg1_c, reg2_c;
if (instr_is_pop_reg_pair(bot, ®1_c, ®2_c) && reg1 == reg1_c &&
reg2 == reg2_c) {
/* found a save/restore pair */
ci->callee_save_regs[reg1] = true;
ci->callee_save_regs[reg2] = true;
ci->num_callee_save_regs += 2;
/* remove & destroy the pairs */
instrlist_remove(ilist, top);
instr_destroy(GLOBAL_DCONTEXT, top);
instrlist_remove(ilist, bot);
instr_destroy(GLOBAL_DCONTEXT, bot);
/* get next pair */
top = instrlist_first(ilist);
bot = instrlist_last(ilist);
} else
break;
} else if (instr_is_push_reg(top, ®1)) {
/* If a save reg pair is found and the register, check if the last
* instruction is a corresponding restore.
*/
reg_id_t reg1_c;
if (instr_is_pop_reg(bot, ®1_c) && reg1 == reg1_c) {
/* found a save/restore pair */
ci->callee_save_regs[reg1] = true;
ci->num_callee_save_regs += 1;
/* remove & destroy the pairs */
instrlist_remove(ilist, top);
instr_destroy(GLOBAL_DCONTEXT, top);
instrlist_remove(ilist, bot);
instr_destroy(GLOBAL_DCONTEXT, bot);
/* get next pair */
top = instrlist_first(ilist);
bot = instrlist_last(ilist);
} else
break;
} else
break;
}
}
void
analyze_callee_tls(dcontext_t *dcontext, callee_info_t *ci)
{
instr_t *instr;
ci->tls_used = false;
for (instr = instrlist_first(ci->ilist); instr != NULL;
instr = instr_get_next(instr)) {
if (instr_reads_thread_register(instr) || instr_writes_thread_register(instr)) {
ci->tls_used = true;
break;
}
}
if (ci->tls_used) {
LOG(THREAD, LOG_CLEANCALL, 2, "CLEANCALL: callee " PFX " accesses far memory\n",
ci->start);
}
}
app_pc
check_callee_instr_level2(dcontext_t *dcontext, callee_info_t *ci, app_pc next_pc,
app_pc cur_pc, app_pc tgt_pc)
{
/* FIXME i#2796: For opt level greater than 1, we abort. */
return NULL;
}
bool
check_callee_ilist_inline(dcontext_t *dcontext, callee_info_t *ci)
{
instr_t *instr, *next_instr;
bool opt_inline = true;
/* Now we need scan instructions in the list and check if they all are
* safe to inline.
*/
ci->has_locals = false;
for (instr = instrlist_first(ci->ilist); instr != NULL; instr = next_instr) {
next_instr = instr_get_next(instr);
DOLOG(3, LOG_CLEANCALL,
{ disassemble_with_bytes(dcontext, instr_get_app_pc(instr), THREAD); });
if (ci->standard_fp &&
instr_writes_to_reg(instr, DR_REG_X29, DR_QUERY_INCLUDE_ALL)) {
/* X29 must not be changed if X29 is used for frame pointer. */
LOG(THREAD, LOG_CLEANCALL, 1,
"CLEANCALL: callee " PFX " cannot be inlined: X29 is updated.\n",
ci->start);
opt_inline = false;
break;
} else if (instr_writes_to_reg(instr, DR_REG_XSP, DR_QUERY_INCLUDE_ALL)) {
/* SP must not be changed. */
LOG(THREAD, LOG_CLEANCALL, 1,
"CLEANCALL: callee " PFX " cannot be inlined: XSP is updated.\n",
ci->start);
opt_inline = false;
break;
}
/* For now, any accesses to SP or X29, if it is used as frame pointer,
* prevent inlining.
* FIXME i#2796: Some access to SP or X29 can be re-written.
*/
if ((instr_reg_in_src(instr, DR_REG_XSP) ||
(instr_reg_in_src(instr, DR_REG_X29) && ci->standard_fp)) &&
(instr_reads_memory(instr) || instr_writes_memory(instr))) {
LOG(THREAD, LOG_CLEANCALL, 1,
"CLEANCALL: callee " PFX " cannot be inlined: SP or X29 accessed.\n",
ci->start);
opt_inline = false;
break;
}
}
ASSERT(instr == NULL || opt_inline == false);
return opt_inline;
}
void
analyze_clean_call_aflags(dcontext_t *dcontext, clean_call_info_t *cci, instr_t *where)
{
/* FIXME i#2796: NYI on AArch64
* Non-essential for cleancall_opt=1 optimizations.
*/
}
void
insert_inline_reg_save(dcontext_t *dcontext, clean_call_info_t *cci, instrlist_t *ilist,
instr_t *where, opnd_t *args)
{
callee_info_t *ci = cci->callee_info;
/* Don't spill anything if we don't have to. */
if (cci->num_regs_skip == NUM_GP_REGS && cci->skip_save_flags && !ci->has_locals) {
return;
}
/* Spill a register to TLS and point it at our unprotected_context_t.*/
PRE(ilist, where, instr_create_save_to_tls(dcontext, ci->spill_reg, TLS_REG2_SLOT));
insert_get_mcontext_base(dcontext, ilist, where, ci->spill_reg);
insert_save_inline_registers(dcontext, ilist, where, cci->reg_skip, DR_REG_START_GPR,
true, (void *)ci);
/* Save nzcv */
if (!cci->skip_save_flags && ci->write_flags) {
reg_id_t nzcv_spill_reg = find_nzcv_spill_reg(ci);
PRE(ilist, where,
XINST_CREATE_store(dcontext, callee_info_slot_opnd(ci, SLOT_FLAGS, 0),
opnd_create_reg(nzcv_spill_reg)));
dr_save_arith_flags_to_reg(dcontext, ilist, where, nzcv_spill_reg);
}
/* FIXME i#2796: Save fpcr, fpsr. */
}
void
insert_inline_reg_restore(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *where)
{
callee_info_t *ci = cci->callee_info;
/* Don't restore regs if we don't have to. */
if (cci->num_regs_skip == NUM_GP_REGS && cci->skip_save_flags && !ci->has_locals) {
return;
}
/* Restore nzcv before regs */
if (!cci->skip_save_flags && ci->write_flags) {
reg_id_t nzcv_spill_reg = find_nzcv_spill_reg(ci);
dr_restore_arith_flags_from_reg(dcontext, ilist, where, nzcv_spill_reg);
PRE(ilist, where,
XINST_CREATE_load(dcontext, opnd_create_reg(nzcv_spill_reg),
callee_info_slot_opnd(ci, SLOT_FLAGS, 0)));
}
insert_restore_inline_registers(dcontext, ilist, where, cci->reg_skip, DR_REG_X0,
true, (void *)ci);
/* Restore reg used for unprotected_context_t pointer. */
PRE(ilist, where,
instr_create_restore_from_tls(dcontext, ci->spill_reg, TLS_REG2_SLOT));
/* FIXME i#2796: Restore fpcr, fpsr. */
}
void
insert_inline_arg_setup(dcontext_t *dcontext, clean_call_info_t *cci, instrlist_t *ilist,
instr_t *where, opnd_t *args)
{
callee_info_t *ci = cci->callee_info;
reg_id_t regparm = regparms[0];
opnd_t arg;
if (cci->num_args == 0)
return;
/* If the arg is un-referenced, don't set it up. This is actually necessary
* for correctness because we will not have spilled regparm[0].
*/
if (!ci->reg_used[regparms[0] - DR_REG_START_GPR]) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: callee " PFX " doesn't read arg, skipping arg setup.\n",
ci->start);
return;
}
ASSERT(cci->num_args == 1);
arg = args[0];
if (opnd_uses_reg(arg, ci->spill_reg)) {
/* FIXME i#2796: Try to pass arg via spill register, like on X86. */
ASSERT_NOT_IMPLEMENTED(false);
}
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: inlining clean call " PFX ", passing arg via reg %s.\n", ci->start,
reg_names[regparm]);
if (opnd_is_immed_int(arg)) {
insert_mov_immed_ptrsz(dcontext, opnd_get_immed_int(arg),
opnd_create_reg(regparm), ilist, where, NULL, NULL);
} else {
/* FIXME i#2796: Implement passing additional argument types. */
ASSERT_NOT_IMPLEMENTED(false);
}
}
#endif /* CLIENT_INTERFACE */
| 1 | 15,569 | The abbreviation for "context" used extensively inside DR is "cxt", not "ctx", so s/MCTX/MCXT/. | DynamoRIO-dynamorio | c |
@@ -13,5 +13,5 @@ return [
*/
'failed' => 'یہ تفصیلات ہمارے ریکارڈ سے مطابقت نہیں رکھتیں۔',
- 'throttle' => 'لاگ اِن کرنے کی بہت زیادہ کوششیں۔ براہِ مہربانی :seconds سیکنڈ میں دوبارہ کوشش کریں۔',
+ 'throttle' => 'لاگ اِن کرنے کی بہت زیادہ کوششیں۔ براہِ مہربانی کچھ سیکنڈ میں دوبارہ کوشش کریں۔',
]; | 1 | <?php
return [
/*
|--------------------------------------------------------------------------
| Authentication Language Lines
|--------------------------------------------------------------------------
|
| The following language lines are used during authentication for various
| messages that we need to display to the user. You are free to modify
| these language lines according to your application's requirements.
|
*/
'failed' => 'یہ تفصیلات ہمارے ریکارڈ سے مطابقت نہیں رکھتیں۔',
'throttle' => 'لاگ اِن کرنے کی بہت زیادہ کوششیں۔ براہِ مہربانی :seconds سیکنڈ میں دوبارہ کوشش کریں۔',
];
| 1 | 6,990 | here is `:seconds` missing again | Laravel-Lang-lang | php |
@@ -40,6 +40,7 @@ public class WebSocketConfiguration {
private String authenticationCredentialsFile;
private List<String> hostsAllowlist = Arrays.asList("localhost", "127.0.0.1");
private File authenticationPublicKeyFile;
+ private String authenticationAlgorithm = null;
private long timeoutSec;
private int maxActiveConnections;
| 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.websocket;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.DEFAULT_RPC_APIS;
import org.hyperledger.besu.ethereum.api.handlers.TimeoutOptions;
import java.io.File;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import com.google.common.base.MoreObjects;
public class WebSocketConfiguration {
public static final String DEFAULT_WEBSOCKET_HOST = "127.0.0.1";
public static final int DEFAULT_WEBSOCKET_PORT = 8546;
public static final int DEFAULT_MAX_ACTIVE_CONNECTIONS = 80;
private boolean enabled;
private int port;
private String host;
private List<String> rpcApis;
private boolean authenticationEnabled = false;
private String authenticationCredentialsFile;
private List<String> hostsAllowlist = Arrays.asList("localhost", "127.0.0.1");
private File authenticationPublicKeyFile;
private long timeoutSec;
private int maxActiveConnections;
public static WebSocketConfiguration createDefault() {
final WebSocketConfiguration config = new WebSocketConfiguration();
config.setEnabled(false);
config.setHost(DEFAULT_WEBSOCKET_HOST);
config.setPort(DEFAULT_WEBSOCKET_PORT);
config.setRpcApis(DEFAULT_RPC_APIS);
config.setTimeoutSec(TimeoutOptions.defaultOptions().getTimeoutSeconds());
config.setMaxActiveConnections(DEFAULT_MAX_ACTIVE_CONNECTIONS);
return config;
}
private WebSocketConfiguration() {}
public boolean isEnabled() {
return enabled;
}
public void setEnabled(final boolean enabled) {
this.enabled = enabled;
}
public void setHost(final String host) {
this.host = host;
}
public String getHost() {
return host;
}
public void setPort(final int port) {
this.port = port;
}
public int getPort() {
return port;
}
public Collection<String> getRpcApis() {
return rpcApis;
}
public void setRpcApis(final List<String> rpcApis) {
this.rpcApis = rpcApis;
}
public boolean isAuthenticationEnabled() {
return authenticationEnabled;
}
public void setAuthenticationEnabled(final boolean authenticationEnabled) {
this.authenticationEnabled = authenticationEnabled;
}
public void setAuthenticationCredentialsFile(final String authenticationCredentialsFile) {
this.authenticationCredentialsFile = authenticationCredentialsFile;
}
public String getAuthenticationCredentialsFile() {
return authenticationCredentialsFile;
}
public void setHostsAllowlist(final List<String> hostsAllowlist) {
this.hostsAllowlist = hostsAllowlist;
}
public Collection<String> getHostsAllowlist() {
return Collections.unmodifiableCollection(this.hostsAllowlist);
}
public File getAuthenticationPublicKeyFile() {
return authenticationPublicKeyFile;
}
public void setAuthenticationPublicKeyFile(final File authenticationPublicKeyFile) {
this.authenticationPublicKeyFile = authenticationPublicKeyFile;
}
public long getTimeoutSec() {
return timeoutSec;
}
public void setTimeoutSec(final long timeoutSec) {
this.timeoutSec = timeoutSec;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final WebSocketConfiguration that = (WebSocketConfiguration) o;
return enabled == that.enabled
&& port == that.port
&& authenticationEnabled == that.authenticationEnabled
&& Objects.equals(host, that.host)
&& Objects.equals(rpcApis, that.rpcApis)
&& Objects.equals(authenticationCredentialsFile, that.authenticationCredentialsFile)
&& Objects.equals(hostsAllowlist, that.hostsAllowlist)
&& Objects.equals(authenticationPublicKeyFile, that.authenticationPublicKeyFile)
&& timeoutSec == that.timeoutSec;
}
@Override
public int hashCode() {
return Objects.hash(
enabled,
port,
host,
rpcApis,
authenticationEnabled,
authenticationCredentialsFile,
hostsAllowlist,
authenticationPublicKeyFile,
timeoutSec);
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("enabled", enabled)
.add("port", port)
.add("host", host)
.add("rpcApis", rpcApis)
.add("authenticationEnabled", authenticationEnabled)
.add("authenticationCredentialsFile", authenticationCredentialsFile)
.add("hostsAllowlist", hostsAllowlist)
.add("authenticationPublicKeyFile", authenticationPublicKeyFile)
.add("timeoutSec", timeoutSec)
.toString();
}
public int getMaxActiveConnections() {
return maxActiveConnections;
}
public void setMaxActiveConnections(final int maxActiveConnections) {
this.maxActiveConnections = maxActiveConnections;
}
}
| 1 | 26,569 | initializing to null makes me uncomfortable. let's have a default value | hyperledger-besu | java |
@@ -1129,6 +1129,12 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Internal.Http
}
else if (ch == BytePercentage)
{
+ if (pathStart == -1)
+ {
+ // Empty path is illegal
+ RejectRequestLine(start, end);
+ }
+
needDecode = true;
}
| 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.IO.Pipelines;
using System.IO.Pipelines.Text.Primitives;
using System.Linq;
using System.Net;
using System.Runtime.CompilerServices;
using System.Text;
using System.Text.Encodings.Web.Utf8;
using System.Text.Utf8;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Http.Features;
using Microsoft.AspNetCore.Server.Kestrel.Adapter;
using Microsoft.AspNetCore.Server.Kestrel.Internal.Infrastructure;
using Microsoft.Extensions.Internal;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Primitives;
// ReSharper disable AccessToModifiedClosure
namespace Microsoft.AspNetCore.Server.Kestrel.Internal.Http
{
public abstract partial class Frame : IFrameControl
{
// byte types don't have a data type annotation so we pre-cast them; to avoid in-place casts
private const byte ByteCR = (byte)'\r';
private const byte ByteLF = (byte)'\n';
private const byte ByteColon = (byte)':';
private const byte ByteSpace = (byte)' ';
private const byte ByteTab = (byte)'\t';
private const byte ByteQuestionMark = (byte)'?';
private const byte BytePercentage = (byte)'%';
private static readonly ArraySegment<byte> _endChunkedResponseBytes = CreateAsciiByteArraySegment("0\r\n\r\n");
private static readonly ArraySegment<byte> _continueBytes = CreateAsciiByteArraySegment("HTTP/1.1 100 Continue\r\n\r\n");
private static readonly byte[] _bytesConnectionClose = Encoding.ASCII.GetBytes("\r\nConnection: close");
private static readonly byte[] _bytesConnectionKeepAlive = Encoding.ASCII.GetBytes("\r\nConnection: keep-alive");
private static readonly byte[] _bytesTransferEncodingChunked = Encoding.ASCII.GetBytes("\r\nTransfer-Encoding: chunked");
private static readonly byte[] _bytesHttpVersion11 = Encoding.ASCII.GetBytes("HTTP/1.1 ");
private static readonly byte[] _bytesEndHeaders = Encoding.ASCII.GetBytes("\r\n\r\n");
private static readonly byte[] _bytesServer = Encoding.ASCII.GetBytes("\r\nServer: Kestrel");
private readonly object _onStartingSync = new Object();
private readonly object _onCompletedSync = new Object();
private Streams _frameStreams;
protected Stack<KeyValuePair<Func<object, Task>, object>> _onStarting;
protected Stack<KeyValuePair<Func<object, Task>, object>> _onCompleted;
private TaskCompletionSource<object> _frameStartedTcs = new TaskCompletionSource<object>();
private Task _requestProcessingTask;
protected volatile bool _requestProcessingStopping; // volatile, see: https://msdn.microsoft.com/en-us/library/x13ttww7.aspx
protected int _requestAborted;
private CancellationTokenSource _abortedCts;
private CancellationToken? _manuallySetRequestAbortToken;
private RequestProcessingStatus _requestProcessingStatus;
protected bool _keepAlive;
protected bool _upgrade;
private bool _canHaveBody;
private bool _autoChunk;
protected Exception _applicationException;
private BadHttpRequestException _requestRejectedException;
protected HttpVersion _httpVersion;
private readonly string _pathBase;
private int _remainingRequestHeadersBytesAllowed;
private int _requestHeadersParsed;
protected readonly long _keepAliveMilliseconds;
private readonly long _requestHeadersTimeoutMilliseconds;
protected long _responseBytesWritten;
public Frame(ConnectionContext context)
{
ConnectionContext = context;
Input = context.Input;
Output = context.Output;
ServerOptions = context.ListenerContext.ServiceContext.ServerOptions;
_pathBase = context.ListenerContext.ListenOptions.PathBase;
FrameControl = this;
_keepAliveMilliseconds = (long)ServerOptions.Limits.KeepAliveTimeout.TotalMilliseconds;
_requestHeadersTimeoutMilliseconds = (long)ServerOptions.Limits.RequestHeadersTimeout.TotalMilliseconds;
}
public ConnectionContext ConnectionContext { get; }
public IPipe Input { get; set; }
public ISocketOutput Output { get; set; }
public IEnumerable<IAdaptedConnection> AdaptedConnections { get; set; }
protected IConnectionControl ConnectionControl => ConnectionContext.ConnectionControl;
protected IKestrelTrace Log => ConnectionContext.ListenerContext.ServiceContext.Log;
private DateHeaderValueManager DateHeaderValueManager => ConnectionContext.ListenerContext.ServiceContext.DateHeaderValueManager;
// Hold direct reference to ServerOptions since this is used very often in the request processing path
private KestrelServerOptions ServerOptions { get; }
private IPEndPoint LocalEndPoint => ConnectionContext.LocalEndPoint;
private IPEndPoint RemoteEndPoint => ConnectionContext.RemoteEndPoint;
protected string ConnectionId => ConnectionContext.ConnectionId;
public string ConnectionIdFeature { get; set; }
public IPAddress RemoteIpAddress { get; set; }
public int RemotePort { get; set; }
public IPAddress LocalIpAddress { get; set; }
public int LocalPort { get; set; }
public string Scheme { get; set; }
public string Method { get; set; }
public string PathBase { get; set; }
public string Path { get; set; }
public string QueryString { get; set; }
public string RawTarget { get; set; }
public string HttpVersion
{
get
{
if (_httpVersion == Http.HttpVersion.Http11)
{
return "HTTP/1.1";
}
if (_httpVersion == Http.HttpVersion.Http10)
{
return "HTTP/1.0";
}
return string.Empty;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
set
{
// GetKnownVersion returns versions which ReferenceEquals interned string
// As most common path, check for this only in fast-path and inline
if (ReferenceEquals(value, "HTTP/1.1"))
{
_httpVersion = Http.HttpVersion.Http11;
}
else if (ReferenceEquals(value, "HTTP/1.0"))
{
_httpVersion = Http.HttpVersion.Http10;
}
else
{
HttpVersionSetSlow(value);
}
}
}
[MethodImpl(MethodImplOptions.NoInlining)]
private void HttpVersionSetSlow(string value)
{
if (value == "HTTP/1.1")
{
_httpVersion = Http.HttpVersion.Http11;
}
else if (value == "HTTP/1.0")
{
_httpVersion = Http.HttpVersion.Http10;
}
else
{
_httpVersion = Http.HttpVersion.Unset;
}
}
public IHeaderDictionary RequestHeaders { get; set; }
public Stream RequestBody { get; set; }
private int _statusCode;
public int StatusCode
{
get
{
return _statusCode;
}
set
{
if (HasResponseStarted)
{
ThrowResponseAlreadyStartedException(nameof(StatusCode));
}
_statusCode = value;
}
}
private string _reasonPhrase;
public string ReasonPhrase
{
get
{
return _reasonPhrase;
}
set
{
if (HasResponseStarted)
{
ThrowResponseAlreadyStartedException(nameof(ReasonPhrase));
}
_reasonPhrase = value;
}
}
public IHeaderDictionary ResponseHeaders { get; set; }
public Stream ResponseBody { get; set; }
public Stream DuplexStream { get; set; }
public Task FrameStartedTask => _frameStartedTcs.Task;
public CancellationToken RequestAborted
{
get
{
// If a request abort token was previously explicitly set, return it.
if (_manuallySetRequestAbortToken.HasValue)
{
return _manuallySetRequestAbortToken.Value;
}
// Otherwise, get the abort CTS. If we have one, which would mean that someone previously
// asked for the RequestAborted token, simply return its token. If we don't,
// check to see whether we've already aborted, in which case just return an
// already canceled token. Finally, force a source into existence if we still
// don't have one, and return its token.
var cts = _abortedCts;
return
cts != null ? cts.Token :
(Volatile.Read(ref _requestAborted) == 1) ? new CancellationToken(true) :
RequestAbortedSource.Token;
}
set
{
// Set an abort token, overriding one we create internally. This setter and associated
// field exist purely to support IHttpRequestLifetimeFeature.set_RequestAborted.
_manuallySetRequestAbortToken = value;
}
}
private CancellationTokenSource RequestAbortedSource
{
get
{
// Get the abort token, lazily-initializing it if necessary.
// Make sure it's canceled if an abort request already came in.
// EnsureInitialized can return null since _abortedCts is reset to null
// after it's already been initialized to a non-null value.
// If EnsureInitialized does return null, this property was accessed between
// requests so it's safe to return an ephemeral CancellationTokenSource.
var cts = LazyInitializer.EnsureInitialized(ref _abortedCts, () => new CancellationTokenSource())
?? new CancellationTokenSource();
if (Volatile.Read(ref _requestAborted) == 1)
{
cts.Cancel();
}
return cts;
}
}
public bool HasResponseStarted => _requestProcessingStatus == RequestProcessingStatus.ResponseStarted;
protected FrameRequestHeaders FrameRequestHeaders { get; private set; }
protected FrameResponseHeaders FrameResponseHeaders { get; private set; }
public void InitializeHeaders()
{
if (FrameRequestHeaders == null)
{
FrameRequestHeaders = new FrameRequestHeaders();
}
RequestHeaders = FrameRequestHeaders;
if (FrameResponseHeaders == null)
{
FrameResponseHeaders = new FrameResponseHeaders();
}
ResponseHeaders = FrameResponseHeaders;
}
public void InitializeStreams(MessageBody messageBody)
{
if (_frameStreams == null)
{
_frameStreams = new Streams(this);
}
RequestBody = _frameStreams.RequestBody;
ResponseBody = _frameStreams.ResponseBody;
DuplexStream = _frameStreams.DuplexStream;
_frameStreams.RequestBody.StartAcceptingReads(messageBody);
_frameStreams.ResponseBody.StartAcceptingWrites();
}
public void PauseStreams()
{
_frameStreams.RequestBody.PauseAcceptingReads();
_frameStreams.ResponseBody.PauseAcceptingWrites();
}
public void ResumeStreams()
{
_frameStreams.RequestBody.ResumeAcceptingReads();
_frameStreams.ResponseBody.ResumeAcceptingWrites();
}
public void StopStreams()
{
_frameStreams.RequestBody.StopAcceptingReads();
_frameStreams.ResponseBody.StopAcceptingWrites();
}
public void Reset()
{
FrameRequestHeaders?.Reset();
FrameResponseHeaders?.Reset();
_onStarting = null;
_onCompleted = null;
_requestProcessingStatus = RequestProcessingStatus.RequestPending;
_keepAlive = false;
_autoChunk = false;
_applicationException = null;
ResetFeatureCollection();
Scheme = null;
Method = null;
PathBase = null;
Path = null;
QueryString = null;
_httpVersion = Http.HttpVersion.Unset;
StatusCode = StatusCodes.Status200OK;
ReasonPhrase = null;
RemoteIpAddress = RemoteEndPoint?.Address;
RemotePort = RemoteEndPoint?.Port ?? 0;
LocalIpAddress = LocalEndPoint?.Address;
LocalPort = LocalEndPoint?.Port ?? 0;
ConnectionIdFeature = ConnectionId;
if (AdaptedConnections != null)
{
try
{
foreach (var adaptedConnection in AdaptedConnections)
{
adaptedConnection.PrepareRequest(this);
}
}
catch (Exception ex)
{
Log.LogError(0, ex, $"Uncaught exception from the {nameof(IAdaptedConnection.PrepareRequest)} method of an {nameof(IAdaptedConnection)}.");
}
}
_manuallySetRequestAbortToken = null;
_abortedCts = null;
_remainingRequestHeadersBytesAllowed = ServerOptions.Limits.MaxRequestHeadersTotalSize;
_requestHeadersParsed = 0;
_responseBytesWritten = 0;
}
/// <summary>
/// Called once by Connection class to begin the RequestProcessingAsync loop.
/// </summary>
public void Start()
{
Reset();
_requestProcessingTask = RequestProcessingAsync();
_frameStartedTcs.SetResult(null);
}
/// <summary>
/// Should be called when the server wants to initiate a shutdown. The Task returned will
/// become complete when the RequestProcessingAsync function has exited. It is expected that
/// Stop will be called on all active connections, and Task.WaitAll() will be called on every
/// return value.
/// </summary>
public Task StopAsync()
{
_requestProcessingStopping = true;
Input.Reader.CancelPendingRead();
return _requestProcessingTask ?? TaskCache.CompletedTask;
}
/// <summary>
/// Immediate kill the connection and poison the request and response streams.
/// </summary>
public void Abort(Exception error = null)
{
if (Interlocked.Exchange(ref _requestAborted, 1) == 0)
{
_requestProcessingStopping = true;
_frameStreams?.RequestBody.Abort(error);
_frameStreams?.ResponseBody.Abort();
try
{
ConnectionControl.End(ProduceEndType.SocketDisconnect);
}
catch (Exception ex)
{
Log.LogError(0, ex, "Abort");
}
try
{
RequestAbortedSource.Cancel();
}
catch (Exception ex)
{
Log.LogError(0, ex, "Abort");
}
_abortedCts = null;
}
}
/// <summary>
/// Primary loop which consumes socket input, parses it for protocol framing, and invokes the
/// application delegate for as long as the socket is intended to remain open.
/// The resulting Task from this loop is preserved in a field which is used when the server needs
/// to drain and close all currently active connections.
/// </summary>
public abstract Task RequestProcessingAsync();
public void OnStarting(Func<object, Task> callback, object state)
{
lock (_onStartingSync)
{
if (HasResponseStarted)
{
ThrowResponseAlreadyStartedException(nameof(OnStarting));
}
if (_onStarting == null)
{
_onStarting = new Stack<KeyValuePair<Func<object, Task>, object>>();
}
_onStarting.Push(new KeyValuePair<Func<object, Task>, object>(callback, state));
}
}
public void OnCompleted(Func<object, Task> callback, object state)
{
lock (_onCompletedSync)
{
if (_onCompleted == null)
{
_onCompleted = new Stack<KeyValuePair<Func<object, Task>, object>>();
}
_onCompleted.Push(new KeyValuePair<Func<object, Task>, object>(callback, state));
}
}
protected async Task FireOnStarting()
{
Stack<KeyValuePair<Func<object, Task>, object>> onStarting = null;
lock (_onStartingSync)
{
onStarting = _onStarting;
_onStarting = null;
}
if (onStarting != null)
{
try
{
foreach (var entry in onStarting)
{
await entry.Key.Invoke(entry.Value);
}
}
catch (Exception ex)
{
ReportApplicationError(ex);
}
}
}
protected async Task FireOnCompleted()
{
Stack<KeyValuePair<Func<object, Task>, object>> onCompleted = null;
lock (_onCompletedSync)
{
onCompleted = _onCompleted;
_onCompleted = null;
}
if (onCompleted != null)
{
foreach (var entry in onCompleted)
{
try
{
await entry.Key.Invoke(entry.Value);
}
catch (Exception ex)
{
ReportApplicationError(ex);
}
}
}
}
public void Flush()
{
InitializeResponse(0).GetAwaiter().GetResult();
Output.Flush();
}
public async Task FlushAsync(CancellationToken cancellationToken)
{
await InitializeResponse(0);
await Output.FlushAsync(cancellationToken);
}
public void Write(ArraySegment<byte> data)
{
// For the first write, ensure headers are flushed if Write(Chunked) isn't called.
var firstWrite = !HasResponseStarted;
if (firstWrite)
{
InitializeResponse(data.Count).GetAwaiter().GetResult();
}
else
{
VerifyAndUpdateWrite(data.Count);
}
if (_canHaveBody)
{
if (_autoChunk)
{
if (data.Count == 0)
{
if (firstWrite)
{
Flush();
}
return;
}
WriteChunked(data);
}
else
{
CheckLastWrite();
Output.Write(data);
}
}
else
{
HandleNonBodyResponseWrite();
if (firstWrite)
{
Flush();
}
}
}
public Task WriteAsync(ArraySegment<byte> data, CancellationToken cancellationToken)
{
if (!HasResponseStarted)
{
return WriteAsyncAwaited(data, cancellationToken);
}
VerifyAndUpdateWrite(data.Count);
if (_canHaveBody)
{
if (_autoChunk)
{
if (data.Count == 0)
{
return TaskCache.CompletedTask;
}
return WriteChunkedAsync(data, cancellationToken);
}
else
{
CheckLastWrite();
return Output.WriteAsync(data, cancellationToken: cancellationToken);
}
}
else
{
HandleNonBodyResponseWrite();
return TaskCache.CompletedTask;
}
}
public async Task WriteAsyncAwaited(ArraySegment<byte> data, CancellationToken cancellationToken)
{
await InitializeResponseAwaited(data.Count);
// WriteAsyncAwaited is only called for the first write to the body.
// Ensure headers are flushed if Write(Chunked)Async isn't called.
if (_canHaveBody)
{
if (_autoChunk)
{
if (data.Count == 0)
{
await FlushAsync(cancellationToken);
return;
}
await WriteChunkedAsync(data, cancellationToken);
}
else
{
CheckLastWrite();
await Output.WriteAsync(data, cancellationToken: cancellationToken);
}
}
else
{
HandleNonBodyResponseWrite();
await FlushAsync(cancellationToken);
}
}
private void VerifyAndUpdateWrite(int count)
{
var responseHeaders = FrameResponseHeaders;
if (responseHeaders != null &&
!responseHeaders.HasTransferEncoding &&
responseHeaders.ContentLength.HasValue &&
_responseBytesWritten + count > responseHeaders.ContentLength.Value)
{
_keepAlive = false;
throw new InvalidOperationException(
$"Response Content-Length mismatch: too many bytes written ({_responseBytesWritten + count} of {responseHeaders.ContentLength.Value}).");
}
_responseBytesWritten += count;
}
private void CheckLastWrite()
{
var responseHeaders = FrameResponseHeaders;
// Prevent firing request aborted token if this is the last write, to avoid
// aborting the request if the app is still running when the client receives
// the final bytes of the response and gracefully closes the connection.
//
// Called after VerifyAndUpdateWrite(), so _responseBytesWritten has already been updated.
if (responseHeaders != null &&
!responseHeaders.HasTransferEncoding &&
responseHeaders.ContentLength.HasValue &&
_responseBytesWritten == responseHeaders.ContentLength.Value)
{
_abortedCts = null;
}
}
protected void VerifyResponseContentLength()
{
var responseHeaders = FrameResponseHeaders;
if (!HttpMethods.IsHead(Method) &&
!responseHeaders.HasTransferEncoding &&
responseHeaders.ContentLength.HasValue &&
_responseBytesWritten < responseHeaders.ContentLength.Value)
{
// We need to close the connection if any bytes were written since the client
// cannot be certain of how many bytes it will receive.
if (_responseBytesWritten > 0)
{
_keepAlive = false;
}
ReportApplicationError(new InvalidOperationException(
$"Response Content-Length mismatch: too few bytes written ({_responseBytesWritten} of {responseHeaders.ContentLength.Value})."));
}
}
private void WriteChunked(ArraySegment<byte> data)
{
Output.Write(data, chunk: true);
}
private Task WriteChunkedAsync(ArraySegment<byte> data, CancellationToken cancellationToken)
{
return Output.WriteAsync(data, chunk: true, cancellationToken: cancellationToken);
}
private Task WriteChunkedResponseSuffix()
{
return Output.WriteAsync(_endChunkedResponseBytes);
}
private static ArraySegment<byte> CreateAsciiByteArraySegment(string text)
{
var bytes = Encoding.ASCII.GetBytes(text);
return new ArraySegment<byte>(bytes);
}
public void ProduceContinue()
{
if (HasResponseStarted)
{
return;
}
StringValues expect;
if (_httpVersion == Http.HttpVersion.Http11 &&
RequestHeaders.TryGetValue("Expect", out expect) &&
(expect.FirstOrDefault() ?? "").Equals("100-continue", StringComparison.OrdinalIgnoreCase))
{
Output.Write(_continueBytes);
}
}
public Task InitializeResponse(int firstWriteByteCount)
{
if (HasResponseStarted)
{
return TaskCache.CompletedTask;
}
if (_onStarting != null)
{
return InitializeResponseAwaited(firstWriteByteCount);
}
if (_applicationException != null)
{
ThrowResponseAbortedException();
}
VerifyAndUpdateWrite(firstWriteByteCount);
ProduceStart(appCompleted: false);
return TaskCache.CompletedTask;
}
private async Task InitializeResponseAwaited(int firstWriteByteCount)
{
await FireOnStarting();
if (_applicationException != null)
{
ThrowResponseAbortedException();
}
VerifyAndUpdateWrite(firstWriteByteCount);
ProduceStart(appCompleted: false);
}
private void ProduceStart(bool appCompleted)
{
if (HasResponseStarted)
{
return;
}
_requestProcessingStatus = RequestProcessingStatus.ResponseStarted;
var statusBytes = ReasonPhrases.ToStatusBytes(StatusCode, ReasonPhrase);
CreateResponseHeader(statusBytes, appCompleted);
}
protected Task TryProduceInvalidRequestResponse()
{
if (_requestRejectedException != null)
{
if (FrameRequestHeaders == null || FrameResponseHeaders == null)
{
InitializeHeaders();
}
return ProduceEnd();
}
return TaskCache.CompletedTask;
}
protected Task ProduceEnd()
{
if (_requestRejectedException != null || _applicationException != null)
{
if (HasResponseStarted)
{
// We can no longer change the response, so we simply close the connection.
_requestProcessingStopping = true;
return TaskCache.CompletedTask;
}
// If the request was rejected, the error state has already been set by SetBadRequestState and
// that should take precedence.
if (_requestRejectedException != null)
{
SetErrorResponseHeaders(statusCode: _requestRejectedException.StatusCode);
}
else
{
// 500 Internal Server Error
SetErrorResponseHeaders(statusCode: StatusCodes.Status500InternalServerError);
}
}
if (!HasResponseStarted)
{
return ProduceEndAwaited();
}
return WriteSuffix();
}
private async Task ProduceEndAwaited()
{
ProduceStart(appCompleted: true);
// Force flush
await Output.FlushAsync();
await WriteSuffix();
}
private Task WriteSuffix()
{
// _autoChunk should be checked after we are sure ProduceStart() has been called
// since ProduceStart() may set _autoChunk to true.
if (_autoChunk)
{
return WriteAutoChunkSuffixAwaited();
}
if (_keepAlive)
{
ConnectionControl.End(ProduceEndType.ConnectionKeepAlive);
}
if (HttpMethods.IsHead(Method) && _responseBytesWritten > 0)
{
Log.ConnectionHeadResponseBodyWrite(ConnectionId, _responseBytesWritten);
}
return TaskCache.CompletedTask;
}
private async Task WriteAutoChunkSuffixAwaited()
{
// For the same reason we call CheckLastWrite() in Content-Length responses.
_abortedCts = null;
await WriteChunkedResponseSuffix();
if (_keepAlive)
{
ConnectionControl.End(ProduceEndType.ConnectionKeepAlive);
}
}
private void CreateResponseHeader(
byte[] statusBytes,
bool appCompleted)
{
var responseHeaders = FrameResponseHeaders;
var hasConnection = responseHeaders.HasConnection;
var connectionOptions = FrameHeaders.ParseConnection(responseHeaders.HeaderConnection);
var hasTransferEncoding = responseHeaders.HasTransferEncoding;
var transferCoding = FrameHeaders.GetFinalTransferCoding(responseHeaders.HeaderTransferEncoding);
var end = Output.ProducingStart();
if (_keepAlive && hasConnection)
{
_keepAlive = (connectionOptions & ConnectionOptions.KeepAlive) == ConnectionOptions.KeepAlive;
}
// https://tools.ietf.org/html/rfc7230#section-3.3.1
// If any transfer coding other than
// chunked is applied to a response payload body, the sender MUST either
// apply chunked as the final transfer coding or terminate the message
// by closing the connection.
if (hasTransferEncoding && transferCoding != TransferCoding.Chunked)
{
_keepAlive = false;
}
// Set whether response can have body
_canHaveBody = StatusCanHaveBody(StatusCode) && Method != "HEAD";
// Don't set the Content-Length or Transfer-Encoding headers
// automatically for HEAD requests or 204, 205, 304 responses.
if (_canHaveBody)
{
if (!hasTransferEncoding && !responseHeaders.ContentLength.HasValue)
{
if (appCompleted && StatusCode != StatusCodes.Status101SwitchingProtocols)
{
// Since the app has completed and we are only now generating
// the headers we can safely set the Content-Length to 0.
responseHeaders.ContentLength = 0;
}
else
{
// Note for future reference: never change this to set _autoChunk to true on HTTP/1.0
// connections, even if we were to infer the client supports it because an HTTP/1.0 request
// was received that used chunked encoding. Sending a chunked response to an HTTP/1.0
// client would break compliance with RFC 7230 (section 3.3.1):
//
// A server MUST NOT send a response containing Transfer-Encoding unless the corresponding
// request indicates HTTP/1.1 (or later).
if (_httpVersion == Http.HttpVersion.Http11 && StatusCode != StatusCodes.Status101SwitchingProtocols)
{
_autoChunk = true;
responseHeaders.SetRawTransferEncoding("chunked", _bytesTransferEncodingChunked);
}
else
{
_keepAlive = false;
}
}
}
}
else if (hasTransferEncoding)
{
RejectNonBodyTransferEncodingResponse(appCompleted);
}
responseHeaders.SetReadOnly();
if (!hasConnection)
{
if (!_keepAlive)
{
responseHeaders.SetRawConnection("close", _bytesConnectionClose);
}
else if (_httpVersion == Http.HttpVersion.Http10)
{
responseHeaders.SetRawConnection("keep-alive", _bytesConnectionKeepAlive);
}
}
if (ServerOptions.AddServerHeader && !responseHeaders.HasServer)
{
responseHeaders.SetRawServer(Constants.ServerName, _bytesServer);
}
if (!responseHeaders.HasDate)
{
var dateHeaderValues = DateHeaderValueManager.GetDateHeaderValues();
responseHeaders.SetRawDate(dateHeaderValues.String, dateHeaderValues.Bytes);
}
end.CopyFrom(_bytesHttpVersion11);
end.CopyFrom(statusBytes);
responseHeaders.CopyTo(ref end);
end.CopyFrom(_bytesEndHeaders, 0, _bytesEndHeaders.Length);
Output.ProducingComplete(end);
}
public unsafe bool TakeStartLine(ReadableBuffer buffer, out ReadCursor consumed, out ReadCursor examined)
{
var start = buffer.Start;
var end = buffer.Start;
var bufferEnd = buffer.End;
examined = buffer.End;
consumed = buffer.Start;
if (_requestProcessingStatus == RequestProcessingStatus.RequestPending)
{
ConnectionControl.ResetTimeout(_requestHeadersTimeoutMilliseconds, TimeoutAction.SendTimeoutResponse);
}
_requestProcessingStatus = RequestProcessingStatus.RequestStarted;
var overLength = false;
if (buffer.Length >= ServerOptions.Limits.MaxRequestLineSize)
{
bufferEnd = buffer.Move(start, ServerOptions.Limits.MaxRequestLineSize);
overLength = true;
}
if (ReadCursorOperations.Seek(start, bufferEnd, out end, ByteLF) == -1)
{
if (overLength)
{
RejectRequest(RequestRejectionReason.RequestLineTooLong);
}
else
{
return false;
}
}
const int stackAllocLimit = 512;
// Move 1 byte past the \n
end = buffer.Move(end, 1);
var startLineBuffer = buffer.Slice(start, end);
Span<byte> span;
if (startLineBuffer.IsSingleSpan)
{
// No copies, directly use the one and only span
span = startLineBuffer.First.Span;
}
else if (startLineBuffer.Length < stackAllocLimit)
{
// Multiple buffers and < stackAllocLimit, copy into a stack buffer
byte* stackBuffer = stackalloc byte[startLineBuffer.Length];
span = new Span<byte>(stackBuffer, startLineBuffer.Length);
startLineBuffer.CopyTo(span);
}
else
{
// We're not a single span here but we can use pooled arrays to avoid allocations in the rare case
span = new Span<byte>(new byte[startLineBuffer.Length]);
startLineBuffer.CopyTo(span);
}
var needDecode = false;
var pathStart = -1;
var queryStart = -1;
var queryEnd = -1;
var pathEnd = -1;
var versionStart = -1;
var queryString = "";
var httpVersion = "";
var method = "";
var state = StartLineState.KnownMethod;
fixed (byte* data = &span.DangerousGetPinnableReference())
{
var length = span.Length;
for (var i = 0; i < length; i++)
{
var ch = data[i];
switch (state)
{
case StartLineState.KnownMethod:
if (span.GetKnownMethod(out method))
{
// Update the index, current char, state and jump directly
// to the next state
i += method.Length + 1;
ch = data[i];
state = StartLineState.Path;
goto case StartLineState.Path;
}
state = StartLineState.UnknownMethod;
goto case StartLineState.UnknownMethod;
case StartLineState.UnknownMethod:
if (ch == ByteSpace)
{
method = span.Slice(0, i).GetAsciiString();
if (method == null)
{
RejectRequestLine(start, end);
}
state = StartLineState.Path;
}
else if (!IsValidTokenChar((char)ch))
{
RejectRequestLine(start, end);
}
break;
case StartLineState.Path:
if (ch == ByteSpace)
{
pathEnd = i;
if (pathStart == -1)
{
// Empty path is illegal
RejectRequestLine(start, end);
}
// No query string found
queryStart = queryEnd = i;
state = StartLineState.KnownVersion;
}
else if (ch == ByteQuestionMark)
{
pathEnd = i;
if (pathStart == -1)
{
// Empty path is illegal
RejectRequestLine(start, end);
}
queryStart = i;
state = StartLineState.QueryString;
}
else if (ch == BytePercentage)
{
needDecode = true;
}
if (pathStart == -1)
{
pathStart = i;
}
break;
case StartLineState.QueryString:
if (ch == ByteSpace)
{
queryEnd = i;
state = StartLineState.KnownVersion;
queryString = span.Slice(queryStart, queryEnd - queryStart).GetAsciiString() ?? string.Empty;
}
break;
case StartLineState.KnownVersion:
// REVIEW: We don't *need* to slice here but it makes the API
// nicer, slicing should be free :)
if (span.Slice(i).GetKnownVersion(out httpVersion))
{
// Update the index, current char, state and jump directly
// to the next state
i += httpVersion.Length + 1;
ch = data[i];
state = StartLineState.NewLine;
goto case StartLineState.NewLine;
}
versionStart = i;
state = StartLineState.UnknownVersion;
goto case StartLineState.UnknownVersion;
case StartLineState.UnknownVersion:
if (ch == ByteCR)
{
var versionSpan = span.Slice(versionStart, i - versionStart);
if (versionSpan.Length == 0)
{
RejectRequestLine(start, end);
}
else
{
RejectRequest(RequestRejectionReason.UnrecognizedHTTPVersion, versionSpan.GetAsciiStringEscaped());
}
}
break;
case StartLineState.NewLine:
if (ch != ByteLF)
{
RejectRequestLine(start, end);
}
state = StartLineState.Complete;
break;
case StartLineState.Complete:
break;
default:
break;
}
}
}
if (state != StartLineState.Complete)
{
RejectRequestLine(start, end);
}
var pathBuffer = span.Slice(pathStart, pathEnd - pathStart);
var targetBuffer = span.Slice(pathStart, queryEnd - pathStart);
// URIs are always encoded/escaped to ASCII https://tools.ietf.org/html/rfc3986#page-11
// Multibyte Internationalized Resource Identifiers (IRIs) are first converted to utf8;
// then encoded/escaped to ASCII https://www.ietf.org/rfc/rfc3987.txt "Mapping of IRIs to URIs"
string requestUrlPath;
string rawTarget;
if (needDecode)
{
// Read raw target before mutating memory.
rawTarget = targetBuffer.GetAsciiString() ?? string.Empty;
// URI was encoded, unescape and then parse as utf8
var pathSpan = pathBuffer;
int pathLength = UrlEncoder.Decode(pathSpan, pathSpan);
requestUrlPath = new Utf8String(pathSpan.Slice(0, pathLength)).ToString();
}
else
{
// URI wasn't encoded, parse as ASCII
requestUrlPath = pathBuffer.GetAsciiString() ?? string.Empty;
if (queryString.Length == 0)
{
// No need to allocate an extra string if the path didn't need
// decoding and there's no query string following it.
rawTarget = requestUrlPath;
}
else
{
rawTarget = targetBuffer.GetAsciiString() ?? string.Empty;
}
}
var normalizedTarget = PathNormalizer.RemoveDotSegments(requestUrlPath);
consumed = end;
examined = end;
Method = method;
QueryString = queryString;
RawTarget = rawTarget;
HttpVersion = httpVersion;
if (RequestUrlStartsWithPathBase(normalizedTarget, out bool caseMatches))
{
PathBase = caseMatches ? _pathBase : normalizedTarget.Substring(0, _pathBase.Length);
Path = normalizedTarget.Substring(_pathBase.Length);
}
else if (rawTarget[0] == '/') // check rawTarget since normalizedTarget can be "" or "/" after dot segment removal
{
Path = normalizedTarget;
}
else
{
Path = string.Empty;
PathBase = string.Empty;
QueryString = string.Empty;
}
return true;
}
private void RejectRequestLine(ReadCursor start, ReadCursor end)
{
const int MaxRequestLineError = 32;
RejectRequest(RequestRejectionReason.InvalidRequestLine,
Log.IsEnabled(LogLevel.Information) ? start.GetAsciiStringEscaped(end, MaxRequestLineError) : string.Empty);
}
private static bool IsValidTokenChar(char c)
{
// Determines if a character is valid as a 'token' as defined in the
// HTTP spec: https://tools.ietf.org/html/rfc7230#section-3.2.6
return
(c >= '0' && c <= '9') ||
(c >= 'A' && c <= 'Z') ||
(c >= 'a' && c <= 'z') ||
c == '!' ||
c == '#' ||
c == '$' ||
c == '%' ||
c == '&' ||
c == '\'' ||
c == '*' ||
c == '+' ||
c == '-' ||
c == '.' ||
c == '^' ||
c == '_' ||
c == '`' ||
c == '|' ||
c == '~';
}
private bool RequestUrlStartsWithPathBase(string requestUrl, out bool caseMatches)
{
caseMatches = true;
if (string.IsNullOrEmpty(_pathBase))
{
return false;
}
if (requestUrl.Length < _pathBase.Length || (requestUrl.Length > _pathBase.Length && requestUrl[_pathBase.Length] != '/'))
{
return false;
}
for (var i = 0; i < _pathBase.Length; i++)
{
if (requestUrl[i] != _pathBase[i])
{
if (char.ToLowerInvariant(requestUrl[i]) == char.ToLowerInvariant(_pathBase[i]))
{
caseMatches = false;
}
else
{
return false;
}
}
}
return true;
}
public unsafe bool TakeMessageHeaders(ReadableBuffer buffer, FrameRequestHeaders requestHeaders, out ReadCursor consumed, out ReadCursor examined)
{
consumed = buffer.Start;
examined = buffer.End;
var bufferEnd = buffer.End;
var reader = new ReadableBufferReader(buffer);
// Make sure the buffer is limited
var overLength = false;
if (buffer.Length >= _remainingRequestHeadersBytesAllowed)
{
bufferEnd = buffer.Move(consumed, _remainingRequestHeadersBytesAllowed);
// If we sliced it means the current buffer bigger than what we're
// allowed to look at
overLength = true;
}
while (true)
{
var start = reader;
int ch1 = reader.Take();
var ch2 = reader.Take();
if (ch1 == -1)
{
return false;
}
if (ch1 == ByteCR)
{
// Check for final CRLF.
if (ch2 == -1)
{
return false;
}
else if (ch2 == ByteLF)
{
consumed = reader.Cursor;
examined = consumed;
ConnectionControl.CancelTimeout();
return true;
}
// Headers don't end in CRLF line.
RejectRequest(RequestRejectionReason.HeadersCorruptedInvalidHeaderSequence);
}
else if (ch1 == ByteSpace || ch1 == ByteTab)
{
RejectRequest(RequestRejectionReason.HeaderLineMustNotStartWithWhitespace);
}
// If we've parsed the max allowed numbers of headers and we're starting a new
// one, we've gone over the limit.
if (_requestHeadersParsed == ServerOptions.Limits.MaxRequestHeaderCount)
{
RejectRequest(RequestRejectionReason.TooManyHeaders);
}
// Reset the reader since we're not at the end of headers
reader = start;
if (ReadCursorOperations.Seek(consumed, bufferEnd, out var lineEnd, ByteLF) == -1)
{
// We didn't find a \n in the current buffer and we had to slice it so it's an issue
if (overLength)
{
RejectRequest(RequestRejectionReason.HeadersExceedMaxTotalSize);
}
else
{
return false;
}
}
const int stackAllocLimit = 512;
if (lineEnd != bufferEnd)
{
lineEnd = buffer.Move(lineEnd, 1);
}
var headerBuffer = buffer.Slice(consumed, lineEnd);
Span<byte> span;
if (headerBuffer.IsSingleSpan)
{
// No copies, directly use the one and only span
span = headerBuffer.First.Span;
}
else if (headerBuffer.Length < stackAllocLimit)
{
// Multiple buffers and < stackAllocLimit, copy into a stack buffer
byte* stackBuffer = stackalloc byte[headerBuffer.Length];
span = new Span<byte>(stackBuffer, headerBuffer.Length);
headerBuffer.CopyTo(span);
}
else
{
// We're not a single span here but we can use pooled arrays to avoid allocations in the rare case
span = new Span<byte>(new byte[headerBuffer.Length]);
headerBuffer.CopyTo(span);
}
var state = HeaderState.Name;
var nameStart = 0;
var nameEnd = -1;
var valueStart = -1;
var valueEnd = -1;
var nameHasWhitespace = false;
var previouslyWhitespace = false;
var headerLineLength = span.Length;
fixed (byte* data = &span.DangerousGetPinnableReference())
{
for (var i = 0; i < headerLineLength; i++)
{
var ch = data[i];
switch (state)
{
case HeaderState.Name:
if (ch == ByteColon)
{
if (nameHasWhitespace)
{
RejectRequest(RequestRejectionReason.WhitespaceIsNotAllowedInHeaderName);
}
state = HeaderState.Whitespace;
nameEnd = i;
}
if (ch == ByteSpace || ch == ByteTab)
{
nameHasWhitespace = true;
}
break;
case HeaderState.Whitespace:
{
var whitespace = ch == ByteTab || ch == ByteSpace || ch == ByteCR;
if (!whitespace)
{
// Mark the first non whitespace char as the start of the
// header value and change the state to expect to the header value
valueStart = i;
state = HeaderState.ExpectValue;
}
// If we see a CR then jump to the next state directly
else if (ch == ByteCR)
{
state = HeaderState.ExpectValue;
goto case HeaderState.ExpectValue;
}
}
break;
case HeaderState.ExpectValue:
{
var whitespace = ch == ByteTab || ch == ByteSpace;
if (whitespace)
{
if (!previouslyWhitespace)
{
// If we see a whitespace char then maybe it's end of the
// header value
valueEnd = i;
}
}
else if (ch == ByteCR)
{
// If we see a CR and we haven't ever seen whitespace then
// this is the end of the header value
if (valueEnd == -1)
{
valueEnd = i;
}
// We never saw a non whitespace character before the CR
if (valueStart == -1)
{
valueStart = valueEnd;
}
state = HeaderState.ExpectNewLine;
}
else
{
// If we find a non whitespace char that isn't CR then reset the end index
valueEnd = -1;
}
previouslyWhitespace = whitespace;
}
break;
case HeaderState.ExpectNewLine:
if (ch != ByteLF)
{
RejectRequest(RequestRejectionReason.HeaderValueMustNotContainCR);
}
state = HeaderState.Complete;
break;
default:
break;
}
}
}
if (state == HeaderState.Name)
{
RejectRequest(RequestRejectionReason.NoColonCharacterFoundInHeaderLine);
}
if (state == HeaderState.ExpectValue || state == HeaderState.Whitespace)
{
RejectRequest(RequestRejectionReason.MissingCRInHeaderLine);
}
if (state != HeaderState.Complete)
{
return false;
}
// Skip the reader forward past the header line
reader.Skip(headerLineLength);
// Before accepting the header line, we need to see at least one character
// > so we can make sure there's no space or tab
var next = reader.Peek();
// TODO: We don't need to reject the line here, we can use the state machine
// to store the fact that we're reading a header value
if (next == -1)
{
// If we can't see the next char then reject the entire line
return false;
}
if (next == ByteSpace || next == ByteTab)
{
// From https://tools.ietf.org/html/rfc7230#section-3.2.4:
//
// Historically, HTTP header field values could be extended over
// multiple lines by preceding each extra line with at least one space
// or horizontal tab (obs-fold). This specification deprecates such
// line folding except within the message/http media type
// (Section 8.3.1). A sender MUST NOT generate a message that includes
// line folding (i.e., that has any field-value that contains a match to
// the obs-fold rule) unless the message is intended for packaging
// within the message/http media type.
//
// A server that receives an obs-fold in a request message that is not
// within a message/http container MUST either reject the message by
// sending a 400 (Bad Request), preferably with a representation
// explaining that obsolete line folding is unacceptable, or replace
// each received obs-fold with one or more SP octets prior to
// interpreting the field value or forwarding the message downstream.
RejectRequest(RequestRejectionReason.HeaderValueLineFoldingNotSupported);
}
var nameBuffer = span.Slice(nameStart, nameEnd - nameStart);
var valueBuffer = span.Slice(valueStart, valueEnd - valueStart);
var value = valueBuffer.GetAsciiString() ?? string.Empty;
// Update the frame state only after we know there's no header line continuation
_remainingRequestHeadersBytesAllowed -= headerLineLength;
_requestHeadersParsed++;
requestHeaders.Append(nameBuffer, value);
consumed = reader.Cursor;
}
}
public bool StatusCanHaveBody(int statusCode)
{
// List of status codes taken from Microsoft.Net.Http.Server.Response
return statusCode != StatusCodes.Status204NoContent &&
statusCode != StatusCodes.Status205ResetContent &&
statusCode != StatusCodes.Status304NotModified;
}
private void ThrowResponseAlreadyStartedException(string value)
{
throw new InvalidOperationException($"{value} cannot be set, response has already started.");
}
private void RejectNonBodyTransferEncodingResponse(bool appCompleted)
{
var ex = new InvalidOperationException($"Transfer-Encoding set on a {StatusCode} non-body request.");
if (!appCompleted)
{
// Back out of header creation surface exeception in user code
_requestProcessingStatus = RequestProcessingStatus.RequestStarted;
throw ex;
}
else
{
ReportApplicationError(ex);
// 500 Internal Server Error
SetErrorResponseHeaders(statusCode: StatusCodes.Status500InternalServerError);
}
}
private void SetErrorResponseHeaders(int statusCode)
{
Debug.Assert(!HasResponseStarted, $"{nameof(SetErrorResponseHeaders)} called after response had already started.");
StatusCode = statusCode;
ReasonPhrase = null;
if (FrameResponseHeaders == null)
{
InitializeHeaders();
}
var responseHeaders = FrameResponseHeaders;
responseHeaders.Reset();
var dateHeaderValues = DateHeaderValueManager.GetDateHeaderValues();
responseHeaders.SetRawDate(dateHeaderValues.String, dateHeaderValues.Bytes);
responseHeaders.ContentLength = 0;
if (ServerOptions.AddServerHeader)
{
responseHeaders.SetRawServer(Constants.ServerName, _bytesServer);
}
}
public void HandleNonBodyResponseWrite()
{
// Writes to HEAD response are ignored and logged at the end of the request
if (Method != "HEAD")
{
// Throw Exception for 204, 205, 304 responses.
throw new InvalidOperationException($"Write to non-body {StatusCode} response.");
}
}
private void ThrowResponseAbortedException()
{
throw new ObjectDisposedException(
"The response has been aborted due to an unhandled application exception.",
_applicationException);
}
public void RejectRequest(RequestRejectionReason reason)
{
RejectRequest(BadHttpRequestException.GetException(reason));
}
public void RejectRequest(RequestRejectionReason reason, string value)
{
RejectRequest(BadHttpRequestException.GetException(reason, value));
}
private void RejectRequest(BadHttpRequestException ex)
{
Log.ConnectionBadRequest(ConnectionId, ex);
throw ex;
}
public void SetBadRequestState(RequestRejectionReason reason)
{
SetBadRequestState(BadHttpRequestException.GetException(reason));
}
public void SetBadRequestState(BadHttpRequestException ex)
{
if (!HasResponseStarted)
{
SetErrorResponseHeaders(ex.StatusCode);
}
_keepAlive = false;
_requestProcessingStopping = true;
_requestRejectedException = ex;
}
protected void ReportApplicationError(Exception ex)
{
if (_applicationException == null)
{
_applicationException = ex;
}
else if (_applicationException is AggregateException)
{
_applicationException = new AggregateException(_applicationException, ex).Flatten();
}
else
{
_applicationException = new AggregateException(_applicationException, ex);
}
Log.ApplicationError(ConnectionId, ex);
}
public enum RequestLineStatus
{
Empty,
Incomplete,
Done
}
private enum RequestProcessingStatus
{
RequestPending,
RequestStarted,
ResponseStarted
}
private enum StartLineState
{
KnownMethod,
UnknownMethod,
Path,
QueryString,
KnownVersion,
UnknownVersion,
NewLine,
Complete
}
private enum HeaderState
{
Name,
Whitespace,
ExpectValue,
ExpectNewLine,
Complete
}
}
}
| 1 | 11,691 | This is the `GET % HTTP/1.1` scenario right? | aspnet-KestrelHttpServer | .cs |
@@ -1,6 +1,7 @@
<?php namespace Backend\FormWidgets;
use App;
+use Backend\Facades\BackendAuth;
use File;
use Event;
use Lang; | 1 | <?php namespace Backend\FormWidgets;
use App;
use File;
use Event;
use Lang;
use Request;
use Backend\Classes\FormWidgetBase;
use Backend\Models\EditorSetting;
/**
* Rich Editor
* Renders a rich content editor field.
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*/
class RichEditor extends FormWidgetBase
{
//
// Configurable properties
//
/**
* @var boolean Determines whether content has HEAD and HTML tags.
*/
public $fullPage = false;
/**
* @var boolean Determines whether content has HEAD and HTML tags.
*/
public $toolbarButtons = null;
/**
* @var boolean If true, the editor is set to read-only mode
*/
public $readOnly = false;
//
// Object properties
//
/**
* @inheritDoc
*/
protected $defaultAlias = 'richeditor';
/**
* @inheritDoc
*/
public function init()
{
if ($this->formField->disabled) {
$this->readOnly = true;
}
$this->fillFromConfig([
'fullPage',
'readOnly',
'toolbarButtons',
]);
}
/**
* @inheritDoc
*/
public function render()
{
$this->prepareVars();
return $this->makePartial('richeditor');
}
/**
* Prepares the list data
*/
public function prepareVars()
{
$this->vars['field'] = $this->formField;
$this->vars['editorLang'] = $this->getValidEditorLang();
$this->vars['fullPage'] = $this->fullPage;
$this->vars['stretch'] = $this->formField->stretch;
$this->vars['size'] = $this->formField->size;
$this->vars['readOnly'] = $this->readOnly;
$this->vars['name'] = $this->getFieldName();
$this->vars['value'] = $this->getLoadValue();
$this->vars['toolbarButtons'] = $this->evalToolbarButtons();
$this->vars['globalToolbarButtons'] = EditorSetting::getConfigured('html_toolbar_buttons');
$this->vars['allowEmptyTags'] = EditorSetting::getConfigured('html_allow_empty_tags');
$this->vars['allowTags'] = EditorSetting::getConfigured('html_allow_tags');
$this->vars['noWrapTags'] = EditorSetting::getConfigured('html_no_wrap_tags');
$this->vars['removeTags'] = EditorSetting::getConfigured('html_remove_tags');
$this->vars['imageStyles'] = EditorSetting::getConfiguredStyles('html_style_image');
$this->vars['linkStyles'] = EditorSetting::getConfiguredStyles('html_style_link');
$this->vars['paragraphStyles'] = EditorSetting::getConfiguredStyles('html_style_paragraph');
$this->vars['tableStyles'] = EditorSetting::getConfiguredStyles('html_style_table');
$this->vars['tableCellStyles'] = EditorSetting::getConfiguredStyles('html_style_table_cell');
}
/**
* Determine the toolbar buttons to use based on config.
* @return string
*/
protected function evalToolbarButtons()
{
$buttons = $this->toolbarButtons;
if (is_string($buttons)) {
$buttons = array_map(function ($button) {
return strlen($button) ? $button : '|';
}, explode('|', $buttons));
}
return $buttons;
}
public function onLoadPageLinksForm()
{
$this->vars['links'] = $this->getPageLinksArray();
return $this->makePartial('page_links_form');
}
/**
* @inheritDoc
*/
protected function loadAssets()
{
$this->addCss('css/richeditor.css', 'core');
$this->addJs('js/build-min.js', 'core');
$this->addJs('/modules/backend/formwidgets/codeeditor/assets/js/build-min.js', 'core');
if ($lang = $this->getValidEditorLang()) {
$this->addJs('vendor/froala/js/languages/'.$lang.'.js', 'core');
}
}
/**
* Returns a valid language code for Redactor.
* @return string|mixed
*/
protected function getValidEditorLang()
{
$locale = App::getLocale();
// English is baked in
if ($locale == 'en') {
return null;
}
$locale = str_replace('-', '_', strtolower($locale));
$path = base_path('modules/backend/formwidgets/richeditor/assets/vendor/froala/js/languages/'.$locale.'.js');
return File::exists($path) ? $locale : false;
}
/**
* Returns a list of registered page link types.
* This is reserved functionality for separating the links by type.
* @return array Returns an array of registered page link types
*/
protected function getPageLinkTypes()
{
$result = [];
$apiResult = Event::fire('backend.richeditor.listTypes');
if (is_array($apiResult)) {
foreach ($apiResult as $typeList) {
if (!is_array($typeList)) {
continue;
}
foreach ($typeList as $typeCode => $typeName) {
$result[$typeCode] = $typeName;
}
}
}
return $result;
}
protected function getPageLinks($type)
{
$result = [];
$apiResult = Event::fire('backend.richeditor.getTypeInfo', [$type]);
if (is_array($apiResult)) {
foreach ($apiResult as $typeInfo) {
if (!is_array($typeInfo)) {
continue;
}
foreach ($typeInfo as $name => $value) {
$result[$name] = $value;
}
}
}
return $result;
}
/**
* Returns a single collection of available page links.
* This implementation has room to place links under
* different groups based on the link type.
* @return array
*/
protected function getPageLinksArray()
{
$links = [];
$types = $this->getPageLinkTypes();
$links[] = ['name' => Lang::get('backend::lang.pagelist.select_page'), 'url' => false];
$iterator = function ($links, $level = 0) use (&$iterator) {
$result = [];
foreach ($links as $linkUrl => $link) {
/*
* Remove scheme and host from URL
*/
$baseUrl = Request::getSchemeAndHttpHost();
if (strpos($linkUrl, $baseUrl) === 0) {
$linkUrl = substr($linkUrl, strlen($baseUrl));
}
$linkName = str_repeat(' ', $level * 4);
$linkName .= is_array($link) ? array_get($link, 'title', '') : $link;
$result[] = ['name' => $linkName, 'url' => $linkUrl];
if (is_array($link)) {
$result = array_merge(
$result,
$iterator(array_get($link, 'links', []), $level + 1)
);
}
}
return $result;
};
foreach ($types as $typeCode => $typeName) {
$links = array_merge($links, $iterator($this->getPageLinks($typeCode)));
}
return $links;
}
}
| 1 | 12,669 | No need to use the fully qualified path the BackendAuth facade, just `use BackendAuth` is fine. | octobercms-october | php |
@@ -357,6 +357,11 @@ bool nano::send_block::valid_predecessor (nano::block const & block_a) const
return result;
}
+nano::epoch nano::send_block::epoch () const
+{
+ return nano::epoch::epoch_0;
+}
+
nano::block_type nano::send_block::type () const
{
return nano::block_type::send; | 1 | #include <nano/crypto_lib/random_pool.hpp>
#include <nano/lib/blocks.hpp>
#include <nano/lib/memory.hpp>
#include <nano/lib/numbers.hpp>
#include <nano/lib/utility.hpp>
#include <boost/endian/conversion.hpp>
#include <boost/pool/pool_alloc.hpp>
/** Compare blocks, first by type, then content. This is an optimization over dynamic_cast, which is very slow on some platforms. */
namespace
{
template <typename T>
bool blocks_equal (T const & first, nano::block const & second)
{
static_assert (std::is_base_of<nano::block, T>::value, "Input parameter is not a block type");
return (first.type () == second.type ()) && (static_cast<T const &> (second)) == first;
}
template <typename block>
std::shared_ptr<block> deserialize_block (nano::stream & stream_a)
{
auto error (false);
auto result = nano::make_shared<block> (error, stream_a);
if (error)
{
result = nullptr;
}
return result;
}
}
void nano::block_memory_pool_purge ()
{
nano::purge_singleton_pool_memory<nano::open_block> ();
nano::purge_singleton_pool_memory<nano::state_block> ();
nano::purge_singleton_pool_memory<nano::send_block> ();
nano::purge_singleton_pool_memory<nano::change_block> ();
}
std::string nano::block::to_json () const
{
std::string result;
serialize_json (result);
return result;
}
size_t nano::block::size (nano::block_type type_a)
{
size_t result (0);
switch (type_a)
{
case nano::block_type::invalid:
case nano::block_type::not_a_block:
assert (false);
break;
case nano::block_type::send:
result = nano::send_block::size;
break;
case nano::block_type::receive:
result = nano::receive_block::size;
break;
case nano::block_type::change:
result = nano::change_block::size;
break;
case nano::block_type::open:
result = nano::open_block::size;
break;
case nano::block_type::state:
result = nano::state_block::size;
break;
}
return result;
}
nano::block_hash nano::block::hash () const
{
nano::uint256_union result;
blake2b_state hash_l;
auto status (blake2b_init (&hash_l, sizeof (result.bytes)));
assert (status == 0);
hash (hash_l);
status = blake2b_final (&hash_l, result.bytes.data (), sizeof (result.bytes));
assert (status == 0);
return result;
}
nano::block_hash nano::block::full_hash () const
{
nano::block_hash result;
blake2b_state state;
blake2b_init (&state, sizeof (result.bytes));
blake2b_update (&state, hash ().bytes.data (), sizeof (hash ()));
auto signature (block_signature ());
blake2b_update (&state, signature.bytes.data (), sizeof (signature));
auto work (block_work ());
blake2b_update (&state, &work, sizeof (work));
blake2b_final (&state, result.bytes.data (), sizeof (result.bytes));
return result;
}
nano::account nano::block::representative () const
{
return 0;
}
nano::block_hash nano::block::source () const
{
return 0;
}
nano::block_hash nano::block::link () const
{
return 0;
}
nano::account nano::block::account () const
{
return 0;
}
nano::qualified_root nano::block::qualified_root () const
{
return nano::qualified_root (previous (), root ());
}
void nano::send_block::visit (nano::block_visitor & visitor_a) const
{
visitor_a.send_block (*this);
}
void nano::send_block::hash (blake2b_state & hash_a) const
{
hashables.hash (hash_a);
}
uint64_t nano::send_block::block_work () const
{
return work;
}
void nano::send_block::block_work_set (uint64_t work_a)
{
work = work_a;
}
nano::send_hashables::send_hashables (nano::block_hash const & previous_a, nano::account const & destination_a, nano::amount const & balance_a) :
previous (previous_a),
destination (destination_a),
balance (balance_a)
{
}
nano::send_hashables::send_hashables (bool & error_a, nano::stream & stream_a)
{
try
{
nano::read (stream_a, previous.bytes);
nano::read (stream_a, destination.bytes);
nano::read (stream_a, balance.bytes);
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
nano::send_hashables::send_hashables (bool & error_a, boost::property_tree::ptree const & tree_a)
{
try
{
auto previous_l (tree_a.get<std::string> ("previous"));
auto destination_l (tree_a.get<std::string> ("destination"));
auto balance_l (tree_a.get<std::string> ("balance"));
error_a = previous.decode_hex (previous_l);
if (!error_a)
{
error_a = destination.decode_account (destination_l);
if (!error_a)
{
error_a = balance.decode_hex (balance_l);
}
}
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
void nano::send_hashables::hash (blake2b_state & hash_a) const
{
auto status (blake2b_update (&hash_a, previous.bytes.data (), sizeof (previous.bytes)));
assert (status == 0);
status = blake2b_update (&hash_a, destination.bytes.data (), sizeof (destination.bytes));
assert (status == 0);
status = blake2b_update (&hash_a, balance.bytes.data (), sizeof (balance.bytes));
assert (status == 0);
}
void nano::send_block::serialize (nano::stream & stream_a) const
{
write (stream_a, hashables.previous.bytes);
write (stream_a, hashables.destination.bytes);
write (stream_a, hashables.balance.bytes);
write (stream_a, signature.bytes);
write (stream_a, work);
}
bool nano::send_block::deserialize (nano::stream & stream_a)
{
auto error (false);
try
{
read (stream_a, hashables.previous.bytes);
read (stream_a, hashables.destination.bytes);
read (stream_a, hashables.balance.bytes);
read (stream_a, signature.bytes);
read (stream_a, work);
}
catch (std::exception const &)
{
error = true;
}
return error;
}
void nano::send_block::serialize_json (std::string & string_a, bool single_line) const
{
boost::property_tree::ptree tree;
serialize_json (tree);
std::stringstream ostream;
boost::property_tree::write_json (ostream, tree, !single_line);
string_a = ostream.str ();
}
void nano::send_block::serialize_json (boost::property_tree::ptree & tree) const
{
tree.put ("type", "send");
std::string previous;
hashables.previous.encode_hex (previous);
tree.put ("previous", previous);
tree.put ("destination", hashables.destination.to_account ());
std::string balance;
hashables.balance.encode_hex (balance);
tree.put ("balance", balance);
std::string signature_l;
signature.encode_hex (signature_l);
tree.put ("work", nano::to_string_hex (work));
tree.put ("signature", signature_l);
}
bool nano::send_block::deserialize_json (boost::property_tree::ptree const & tree_a)
{
auto error (false);
try
{
assert (tree_a.get<std::string> ("type") == "send");
auto previous_l (tree_a.get<std::string> ("previous"));
auto destination_l (tree_a.get<std::string> ("destination"));
auto balance_l (tree_a.get<std::string> ("balance"));
auto work_l (tree_a.get<std::string> ("work"));
auto signature_l (tree_a.get<std::string> ("signature"));
error = hashables.previous.decode_hex (previous_l);
if (!error)
{
error = hashables.destination.decode_account (destination_l);
if (!error)
{
error = hashables.balance.decode_hex (balance_l);
if (!error)
{
error = nano::from_string_hex (work_l, work);
if (!error)
{
error = signature.decode_hex (signature_l);
}
}
}
}
}
catch (std::runtime_error const &)
{
error = true;
}
return error;
}
nano::send_block::send_block (nano::block_hash const & previous_a, nano::account const & destination_a, nano::amount const & balance_a, nano::raw_key const & prv_a, nano::public_key const & pub_a, uint64_t work_a) :
hashables (previous_a, destination_a, balance_a),
signature (nano::sign_message (prv_a, pub_a, hash ())),
work (work_a)
{
}
nano::send_block::send_block (bool & error_a, nano::stream & stream_a) :
hashables (error_a, stream_a)
{
if (!error_a)
{
try
{
nano::read (stream_a, signature.bytes);
nano::read (stream_a, work);
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
}
nano::send_block::send_block (bool & error_a, boost::property_tree::ptree const & tree_a) :
hashables (error_a, tree_a)
{
if (!error_a)
{
try
{
auto signature_l (tree_a.get<std::string> ("signature"));
auto work_l (tree_a.get<std::string> ("work"));
error_a = signature.decode_hex (signature_l);
if (!error_a)
{
error_a = nano::from_string_hex (work_l, work);
}
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
}
bool nano::send_block::operator== (nano::block const & other_a) const
{
return blocks_equal (*this, other_a);
}
bool nano::send_block::valid_predecessor (nano::block const & block_a) const
{
bool result;
switch (block_a.type ())
{
case nano::block_type::send:
case nano::block_type::receive:
case nano::block_type::open:
case nano::block_type::change:
result = true;
break;
default:
result = false;
break;
}
return result;
}
nano::block_type nano::send_block::type () const
{
return nano::block_type::send;
}
bool nano::send_block::operator== (nano::send_block const & other_a) const
{
auto result (hashables.destination == other_a.hashables.destination && hashables.previous == other_a.hashables.previous && hashables.balance == other_a.hashables.balance && work == other_a.work && signature == other_a.signature);
return result;
}
nano::block_hash nano::send_block::previous () const
{
return hashables.previous;
}
nano::block_hash nano::send_block::root () const
{
return hashables.previous;
}
nano::signature nano::send_block::block_signature () const
{
return signature;
}
void nano::send_block::signature_set (nano::uint512_union const & signature_a)
{
signature = signature_a;
}
nano::open_hashables::open_hashables (nano::block_hash const & source_a, nano::account const & representative_a, nano::account const & account_a) :
source (source_a),
representative (representative_a),
account (account_a)
{
}
nano::open_hashables::open_hashables (bool & error_a, nano::stream & stream_a)
{
try
{
nano::read (stream_a, source.bytes);
nano::read (stream_a, representative.bytes);
nano::read (stream_a, account.bytes);
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
nano::open_hashables::open_hashables (bool & error_a, boost::property_tree::ptree const & tree_a)
{
try
{
auto source_l (tree_a.get<std::string> ("source"));
auto representative_l (tree_a.get<std::string> ("representative"));
auto account_l (tree_a.get<std::string> ("account"));
error_a = source.decode_hex (source_l);
if (!error_a)
{
error_a = representative.decode_account (representative_l);
if (!error_a)
{
error_a = account.decode_account (account_l);
}
}
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
void nano::open_hashables::hash (blake2b_state & hash_a) const
{
blake2b_update (&hash_a, source.bytes.data (), sizeof (source.bytes));
blake2b_update (&hash_a, representative.bytes.data (), sizeof (representative.bytes));
blake2b_update (&hash_a, account.bytes.data (), sizeof (account.bytes));
}
nano::open_block::open_block (nano::block_hash const & source_a, nano::account const & representative_a, nano::account const & account_a, nano::raw_key const & prv_a, nano::public_key const & pub_a, uint64_t work_a) :
hashables (source_a, representative_a, account_a),
signature (nano::sign_message (prv_a, pub_a, hash ())),
work (work_a)
{
assert (!representative_a.is_zero ());
}
nano::open_block::open_block (nano::block_hash const & source_a, nano::account const & representative_a, nano::account const & account_a, std::nullptr_t) :
hashables (source_a, representative_a, account_a),
work (0)
{
signature.clear ();
}
nano::open_block::open_block (bool & error_a, nano::stream & stream_a) :
hashables (error_a, stream_a)
{
if (!error_a)
{
try
{
nano::read (stream_a, signature);
nano::read (stream_a, work);
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
}
nano::open_block::open_block (bool & error_a, boost::property_tree::ptree const & tree_a) :
hashables (error_a, tree_a)
{
if (!error_a)
{
try
{
auto work_l (tree_a.get<std::string> ("work"));
auto signature_l (tree_a.get<std::string> ("signature"));
error_a = nano::from_string_hex (work_l, work);
if (!error_a)
{
error_a = signature.decode_hex (signature_l);
}
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
}
void nano::open_block::hash (blake2b_state & hash_a) const
{
hashables.hash (hash_a);
}
uint64_t nano::open_block::block_work () const
{
return work;
}
void nano::open_block::block_work_set (uint64_t work_a)
{
work = work_a;
}
nano::block_hash nano::open_block::previous () const
{
nano::block_hash result (0);
return result;
}
nano::account nano::open_block::account () const
{
return hashables.account;
}
void nano::open_block::serialize (nano::stream & stream_a) const
{
write (stream_a, hashables.source);
write (stream_a, hashables.representative);
write (stream_a, hashables.account);
write (stream_a, signature);
write (stream_a, work);
}
bool nano::open_block::deserialize (nano::stream & stream_a)
{
auto error (false);
try
{
read (stream_a, hashables.source);
read (stream_a, hashables.representative);
read (stream_a, hashables.account);
read (stream_a, signature);
read (stream_a, work);
}
catch (std::runtime_error const &)
{
error = true;
}
return error;
}
void nano::open_block::serialize_json (std::string & string_a, bool single_line) const
{
boost::property_tree::ptree tree;
serialize_json (tree);
std::stringstream ostream;
boost::property_tree::write_json (ostream, tree, !single_line);
string_a = ostream.str ();
}
void nano::open_block::serialize_json (boost::property_tree::ptree & tree) const
{
tree.put ("type", "open");
tree.put ("source", hashables.source.to_string ());
tree.put ("representative", representative ().to_account ());
tree.put ("account", hashables.account.to_account ());
std::string signature_l;
signature.encode_hex (signature_l);
tree.put ("work", nano::to_string_hex (work));
tree.put ("signature", signature_l);
}
bool nano::open_block::deserialize_json (boost::property_tree::ptree const & tree_a)
{
auto error (false);
try
{
assert (tree_a.get<std::string> ("type") == "open");
auto source_l (tree_a.get<std::string> ("source"));
auto representative_l (tree_a.get<std::string> ("representative"));
auto account_l (tree_a.get<std::string> ("account"));
auto work_l (tree_a.get<std::string> ("work"));
auto signature_l (tree_a.get<std::string> ("signature"));
error = hashables.source.decode_hex (source_l);
if (!error)
{
error = hashables.representative.decode_hex (representative_l);
if (!error)
{
error = hashables.account.decode_hex (account_l);
if (!error)
{
error = nano::from_string_hex (work_l, work);
if (!error)
{
error = signature.decode_hex (signature_l);
}
}
}
}
}
catch (std::runtime_error const &)
{
error = true;
}
return error;
}
void nano::open_block::visit (nano::block_visitor & visitor_a) const
{
visitor_a.open_block (*this);
}
nano::block_type nano::open_block::type () const
{
return nano::block_type::open;
}
bool nano::open_block::operator== (nano::block const & other_a) const
{
return blocks_equal (*this, other_a);
}
bool nano::open_block::operator== (nano::open_block const & other_a) const
{
return hashables.source == other_a.hashables.source && hashables.representative == other_a.hashables.representative && hashables.account == other_a.hashables.account && work == other_a.work && signature == other_a.signature;
}
bool nano::open_block::valid_predecessor (nano::block const & block_a) const
{
return false;
}
nano::block_hash nano::open_block::source () const
{
return hashables.source;
}
nano::block_hash nano::open_block::root () const
{
return hashables.account;
}
nano::account nano::open_block::representative () const
{
return hashables.representative;
}
nano::signature nano::open_block::block_signature () const
{
return signature;
}
void nano::open_block::signature_set (nano::uint512_union const & signature_a)
{
signature = signature_a;
}
nano::change_hashables::change_hashables (nano::block_hash const & previous_a, nano::account const & representative_a) :
previous (previous_a),
representative (representative_a)
{
}
nano::change_hashables::change_hashables (bool & error_a, nano::stream & stream_a)
{
try
{
nano::read (stream_a, previous);
nano::read (stream_a, representative);
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
nano::change_hashables::change_hashables (bool & error_a, boost::property_tree::ptree const & tree_a)
{
try
{
auto previous_l (tree_a.get<std::string> ("previous"));
auto representative_l (tree_a.get<std::string> ("representative"));
error_a = previous.decode_hex (previous_l);
if (!error_a)
{
error_a = representative.decode_account (representative_l);
}
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
void nano::change_hashables::hash (blake2b_state & hash_a) const
{
blake2b_update (&hash_a, previous.bytes.data (), sizeof (previous.bytes));
blake2b_update (&hash_a, representative.bytes.data (), sizeof (representative.bytes));
}
nano::change_block::change_block (nano::block_hash const & previous_a, nano::account const & representative_a, nano::raw_key const & prv_a, nano::public_key const & pub_a, uint64_t work_a) :
hashables (previous_a, representative_a),
signature (nano::sign_message (prv_a, pub_a, hash ())),
work (work_a)
{
}
nano::change_block::change_block (bool & error_a, nano::stream & stream_a) :
hashables (error_a, stream_a)
{
if (!error_a)
{
try
{
nano::read (stream_a, signature);
nano::read (stream_a, work);
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
}
nano::change_block::change_block (bool & error_a, boost::property_tree::ptree const & tree_a) :
hashables (error_a, tree_a)
{
if (!error_a)
{
try
{
auto work_l (tree_a.get<std::string> ("work"));
auto signature_l (tree_a.get<std::string> ("signature"));
error_a = nano::from_string_hex (work_l, work);
if (!error_a)
{
error_a = signature.decode_hex (signature_l);
}
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
}
void nano::change_block::hash (blake2b_state & hash_a) const
{
hashables.hash (hash_a);
}
uint64_t nano::change_block::block_work () const
{
return work;
}
void nano::change_block::block_work_set (uint64_t work_a)
{
work = work_a;
}
nano::block_hash nano::change_block::previous () const
{
return hashables.previous;
}
void nano::change_block::serialize (nano::stream & stream_a) const
{
write (stream_a, hashables.previous);
write (stream_a, hashables.representative);
write (stream_a, signature);
write (stream_a, work);
}
bool nano::change_block::deserialize (nano::stream & stream_a)
{
auto error (false);
try
{
read (stream_a, hashables.previous);
read (stream_a, hashables.representative);
read (stream_a, signature);
read (stream_a, work);
}
catch (std::runtime_error const &)
{
error = true;
}
return error;
}
void nano::change_block::serialize_json (std::string & string_a, bool single_line) const
{
boost::property_tree::ptree tree;
serialize_json (tree);
std::stringstream ostream;
boost::property_tree::write_json (ostream, tree, !single_line);
string_a = ostream.str ();
}
void nano::change_block::serialize_json (boost::property_tree::ptree & tree) const
{
tree.put ("type", "change");
tree.put ("previous", hashables.previous.to_string ());
tree.put ("representative", representative ().to_account ());
tree.put ("work", nano::to_string_hex (work));
std::string signature_l;
signature.encode_hex (signature_l);
tree.put ("signature", signature_l);
}
bool nano::change_block::deserialize_json (boost::property_tree::ptree const & tree_a)
{
auto error (false);
try
{
assert (tree_a.get<std::string> ("type") == "change");
auto previous_l (tree_a.get<std::string> ("previous"));
auto representative_l (tree_a.get<std::string> ("representative"));
auto work_l (tree_a.get<std::string> ("work"));
auto signature_l (tree_a.get<std::string> ("signature"));
error = hashables.previous.decode_hex (previous_l);
if (!error)
{
error = hashables.representative.decode_hex (representative_l);
if (!error)
{
error = nano::from_string_hex (work_l, work);
if (!error)
{
error = signature.decode_hex (signature_l);
}
}
}
}
catch (std::runtime_error const &)
{
error = true;
}
return error;
}
void nano::change_block::visit (nano::block_visitor & visitor_a) const
{
visitor_a.change_block (*this);
}
nano::block_type nano::change_block::type () const
{
return nano::block_type::change;
}
bool nano::change_block::operator== (nano::block const & other_a) const
{
return blocks_equal (*this, other_a);
}
bool nano::change_block::operator== (nano::change_block const & other_a) const
{
return hashables.previous == other_a.hashables.previous && hashables.representative == other_a.hashables.representative && work == other_a.work && signature == other_a.signature;
}
bool nano::change_block::valid_predecessor (nano::block const & block_a) const
{
bool result;
switch (block_a.type ())
{
case nano::block_type::send:
case nano::block_type::receive:
case nano::block_type::open:
case nano::block_type::change:
result = true;
break;
default:
result = false;
break;
}
return result;
}
nano::block_hash nano::change_block::root () const
{
return hashables.previous;
}
nano::account nano::change_block::representative () const
{
return hashables.representative;
}
nano::signature nano::change_block::block_signature () const
{
return signature;
}
void nano::change_block::signature_set (nano::uint512_union const & signature_a)
{
signature = signature_a;
}
nano::state_hashables::state_hashables (nano::account const & account_a, nano::block_hash const & previous_a, nano::account const & representative_a, nano::amount const & balance_a, nano::uint256_union const & link_a) :
account (account_a),
previous (previous_a),
representative (representative_a),
balance (balance_a),
link (link_a)
{
}
nano::state_hashables::state_hashables (bool & error_a, nano::stream & stream_a)
{
try
{
nano::read (stream_a, account);
nano::read (stream_a, previous);
nano::read (stream_a, representative);
nano::read (stream_a, balance);
nano::read (stream_a, link);
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
nano::state_hashables::state_hashables (bool & error_a, boost::property_tree::ptree const & tree_a)
{
try
{
auto account_l (tree_a.get<std::string> ("account"));
auto previous_l (tree_a.get<std::string> ("previous"));
auto representative_l (tree_a.get<std::string> ("representative"));
auto balance_l (tree_a.get<std::string> ("balance"));
auto link_l (tree_a.get<std::string> ("link"));
error_a = account.decode_account (account_l);
if (!error_a)
{
error_a = previous.decode_hex (previous_l);
if (!error_a)
{
error_a = representative.decode_account (representative_l);
if (!error_a)
{
error_a = balance.decode_dec (balance_l);
if (!error_a)
{
error_a = link.decode_account (link_l) && link.decode_hex (link_l);
}
}
}
}
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
void nano::state_hashables::hash (blake2b_state & hash_a) const
{
blake2b_update (&hash_a, account.bytes.data (), sizeof (account.bytes));
blake2b_update (&hash_a, previous.bytes.data (), sizeof (previous.bytes));
blake2b_update (&hash_a, representative.bytes.data (), sizeof (representative.bytes));
blake2b_update (&hash_a, balance.bytes.data (), sizeof (balance.bytes));
blake2b_update (&hash_a, link.bytes.data (), sizeof (link.bytes));
}
nano::state_block::state_block (nano::account const & account_a, nano::block_hash const & previous_a, nano::account const & representative_a, nano::amount const & balance_a, nano::uint256_union const & link_a, nano::raw_key const & prv_a, nano::public_key const & pub_a, uint64_t work_a) :
hashables (account_a, previous_a, representative_a, balance_a, link_a),
signature (nano::sign_message (prv_a, pub_a, hash ())),
work (work_a)
{
}
nano::state_block::state_block (bool & error_a, nano::stream & stream_a) :
hashables (error_a, stream_a)
{
if (!error_a)
{
try
{
nano::read (stream_a, signature);
nano::read (stream_a, work);
boost::endian::big_to_native_inplace (work);
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
}
nano::state_block::state_block (bool & error_a, boost::property_tree::ptree const & tree_a) :
hashables (error_a, tree_a)
{
if (!error_a)
{
try
{
auto type_l (tree_a.get<std::string> ("type"));
auto signature_l (tree_a.get<std::string> ("signature"));
auto work_l (tree_a.get<std::string> ("work"));
error_a = type_l != "state";
if (!error_a)
{
error_a = nano::from_string_hex (work_l, work);
if (!error_a)
{
error_a = signature.decode_hex (signature_l);
}
}
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
}
void nano::state_block::hash (blake2b_state & hash_a) const
{
nano::uint256_union preamble (static_cast<uint64_t> (nano::block_type::state));
blake2b_update (&hash_a, preamble.bytes.data (), preamble.bytes.size ());
hashables.hash (hash_a);
}
uint64_t nano::state_block::block_work () const
{
return work;
}
void nano::state_block::block_work_set (uint64_t work_a)
{
work = work_a;
}
nano::block_hash nano::state_block::previous () const
{
return hashables.previous;
}
nano::account nano::state_block::account () const
{
return hashables.account;
}
void nano::state_block::serialize (nano::stream & stream_a) const
{
write (stream_a, hashables.account);
write (stream_a, hashables.previous);
write (stream_a, hashables.representative);
write (stream_a, hashables.balance);
write (stream_a, hashables.link);
write (stream_a, signature);
write (stream_a, boost::endian::native_to_big (work));
}
bool nano::state_block::deserialize (nano::stream & stream_a)
{
auto error (false);
try
{
read (stream_a, hashables.account);
read (stream_a, hashables.previous);
read (stream_a, hashables.representative);
read (stream_a, hashables.balance);
read (stream_a, hashables.link);
read (stream_a, signature);
read (stream_a, work);
boost::endian::big_to_native_inplace (work);
}
catch (std::runtime_error const &)
{
error = true;
}
return error;
}
void nano::state_block::serialize_json (std::string & string_a, bool single_line) const
{
boost::property_tree::ptree tree;
serialize_json (tree);
std::stringstream ostream;
boost::property_tree::write_json (ostream, tree, !single_line);
string_a = ostream.str ();
}
void nano::state_block::serialize_json (boost::property_tree::ptree & tree) const
{
tree.put ("type", "state");
tree.put ("account", hashables.account.to_account ());
tree.put ("previous", hashables.previous.to_string ());
tree.put ("representative", representative ().to_account ());
tree.put ("balance", hashables.balance.to_string_dec ());
tree.put ("link", hashables.link.to_string ());
tree.put ("link_as_account", hashables.link.to_account ());
std::string signature_l;
signature.encode_hex (signature_l);
tree.put ("signature", signature_l);
tree.put ("work", nano::to_string_hex (work));
}
bool nano::state_block::deserialize_json (boost::property_tree::ptree const & tree_a)
{
auto error (false);
try
{
assert (tree_a.get<std::string> ("type") == "state");
auto account_l (tree_a.get<std::string> ("account"));
auto previous_l (tree_a.get<std::string> ("previous"));
auto representative_l (tree_a.get<std::string> ("representative"));
auto balance_l (tree_a.get<std::string> ("balance"));
auto link_l (tree_a.get<std::string> ("link"));
auto work_l (tree_a.get<std::string> ("work"));
auto signature_l (tree_a.get<std::string> ("signature"));
error = hashables.account.decode_account (account_l);
if (!error)
{
error = hashables.previous.decode_hex (previous_l);
if (!error)
{
error = hashables.representative.decode_account (representative_l);
if (!error)
{
error = hashables.balance.decode_dec (balance_l);
if (!error)
{
error = hashables.link.decode_account (link_l) && hashables.link.decode_hex (link_l);
if (!error)
{
error = nano::from_string_hex (work_l, work);
if (!error)
{
error = signature.decode_hex (signature_l);
}
}
}
}
}
}
}
catch (std::runtime_error const &)
{
error = true;
}
return error;
}
void nano::state_block::visit (nano::block_visitor & visitor_a) const
{
visitor_a.state_block (*this);
}
nano::block_type nano::state_block::type () const
{
return nano::block_type::state;
}
bool nano::state_block::operator== (nano::block const & other_a) const
{
return blocks_equal (*this, other_a);
}
bool nano::state_block::operator== (nano::state_block const & other_a) const
{
return hashables.account == other_a.hashables.account && hashables.previous == other_a.hashables.previous && hashables.representative == other_a.hashables.representative && hashables.balance == other_a.hashables.balance && hashables.link == other_a.hashables.link && signature == other_a.signature && work == other_a.work;
}
bool nano::state_block::valid_predecessor (nano::block const & block_a) const
{
return true;
}
nano::block_hash nano::state_block::root () const
{
return !hashables.previous.is_zero () ? hashables.previous : hashables.account;
}
nano::block_hash nano::state_block::link () const
{
return hashables.link;
}
nano::account nano::state_block::representative () const
{
return hashables.representative;
}
nano::signature nano::state_block::block_signature () const
{
return signature;
}
void nano::state_block::signature_set (nano::uint512_union const & signature_a)
{
signature = signature_a;
}
std::shared_ptr<nano::block> nano::deserialize_block_json (boost::property_tree::ptree const & tree_a, nano::block_uniquer * uniquer_a)
{
std::shared_ptr<nano::block> result;
try
{
auto type (tree_a.get<std::string> ("type"));
if (type == "receive")
{
bool error (false);
std::unique_ptr<nano::receive_block> obj (new nano::receive_block (error, tree_a));
if (!error)
{
result = std::move (obj);
}
}
else if (type == "send")
{
bool error (false);
std::unique_ptr<nano::send_block> obj (new nano::send_block (error, tree_a));
if (!error)
{
result = std::move (obj);
}
}
else if (type == "open")
{
bool error (false);
std::unique_ptr<nano::open_block> obj (new nano::open_block (error, tree_a));
if (!error)
{
result = std::move (obj);
}
}
else if (type == "change")
{
bool error (false);
std::unique_ptr<nano::change_block> obj (new nano::change_block (error, tree_a));
if (!error)
{
result = std::move (obj);
}
}
else if (type == "state")
{
bool error (false);
std::unique_ptr<nano::state_block> obj (new nano::state_block (error, tree_a));
if (!error)
{
result = std::move (obj);
}
}
}
catch (std::runtime_error const &)
{
}
if (uniquer_a != nullptr)
{
result = uniquer_a->unique (result);
}
return result;
}
std::shared_ptr<nano::block> nano::deserialize_block (nano::stream & stream_a)
{
nano::block_type type;
auto error (try_read (stream_a, type));
std::shared_ptr<nano::block> result;
if (!error)
{
result = nano::deserialize_block (stream_a, type);
}
return result;
}
std::shared_ptr<nano::block> nano::deserialize_block (nano::stream & stream_a, nano::block_type type_a, nano::block_uniquer * uniquer_a)
{
std::shared_ptr<nano::block> result;
switch (type_a)
{
case nano::block_type::receive:
{
result = ::deserialize_block<nano::receive_block> (stream_a);
break;
}
case nano::block_type::send:
{
result = ::deserialize_block<nano::send_block> (stream_a);
break;
}
case nano::block_type::open:
{
result = ::deserialize_block<nano::open_block> (stream_a);
break;
}
case nano::block_type::change:
{
result = ::deserialize_block<nano::change_block> (stream_a);
break;
}
case nano::block_type::state:
{
result = ::deserialize_block<nano::state_block> (stream_a);
break;
}
default:
assert (false);
break;
}
if (uniquer_a != nullptr)
{
result = uniquer_a->unique (result);
}
return result;
}
void nano::receive_block::visit (nano::block_visitor & visitor_a) const
{
visitor_a.receive_block (*this);
}
bool nano::receive_block::operator== (nano::receive_block const & other_a) const
{
auto result (hashables.previous == other_a.hashables.previous && hashables.source == other_a.hashables.source && work == other_a.work && signature == other_a.signature);
return result;
}
void nano::receive_block::serialize (nano::stream & stream_a) const
{
write (stream_a, hashables.previous.bytes);
write (stream_a, hashables.source.bytes);
write (stream_a, signature.bytes);
write (stream_a, work);
}
bool nano::receive_block::deserialize (nano::stream & stream_a)
{
auto error (false);
try
{
read (stream_a, hashables.previous.bytes);
read (stream_a, hashables.source.bytes);
read (stream_a, signature.bytes);
read (stream_a, work);
}
catch (std::runtime_error const &)
{
error = true;
}
return error;
}
void nano::receive_block::serialize_json (std::string & string_a, bool single_line) const
{
boost::property_tree::ptree tree;
serialize_json (tree);
std::stringstream ostream;
boost::property_tree::write_json (ostream, tree, !single_line);
string_a = ostream.str ();
}
void nano::receive_block::serialize_json (boost::property_tree::ptree & tree) const
{
tree.put ("type", "receive");
std::string previous;
hashables.previous.encode_hex (previous);
tree.put ("previous", previous);
std::string source;
hashables.source.encode_hex (source);
tree.put ("source", source);
std::string signature_l;
signature.encode_hex (signature_l);
tree.put ("work", nano::to_string_hex (work));
tree.put ("signature", signature_l);
}
bool nano::receive_block::deserialize_json (boost::property_tree::ptree const & tree_a)
{
auto error (false);
try
{
assert (tree_a.get<std::string> ("type") == "receive");
auto previous_l (tree_a.get<std::string> ("previous"));
auto source_l (tree_a.get<std::string> ("source"));
auto work_l (tree_a.get<std::string> ("work"));
auto signature_l (tree_a.get<std::string> ("signature"));
error = hashables.previous.decode_hex (previous_l);
if (!error)
{
error = hashables.source.decode_hex (source_l);
if (!error)
{
error = nano::from_string_hex (work_l, work);
if (!error)
{
error = signature.decode_hex (signature_l);
}
}
}
}
catch (std::runtime_error const &)
{
error = true;
}
return error;
}
nano::receive_block::receive_block (nano::block_hash const & previous_a, nano::block_hash const & source_a, nano::raw_key const & prv_a, nano::public_key const & pub_a, uint64_t work_a) :
hashables (previous_a, source_a),
signature (nano::sign_message (prv_a, pub_a, hash ())),
work (work_a)
{
}
nano::receive_block::receive_block (bool & error_a, nano::stream & stream_a) :
hashables (error_a, stream_a)
{
if (!error_a)
{
try
{
nano::read (stream_a, signature);
nano::read (stream_a, work);
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
}
nano::receive_block::receive_block (bool & error_a, boost::property_tree::ptree const & tree_a) :
hashables (error_a, tree_a)
{
if (!error_a)
{
try
{
auto signature_l (tree_a.get<std::string> ("signature"));
auto work_l (tree_a.get<std::string> ("work"));
error_a = signature.decode_hex (signature_l);
if (!error_a)
{
error_a = nano::from_string_hex (work_l, work);
}
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
}
void nano::receive_block::hash (blake2b_state & hash_a) const
{
hashables.hash (hash_a);
}
uint64_t nano::receive_block::block_work () const
{
return work;
}
void nano::receive_block::block_work_set (uint64_t work_a)
{
work = work_a;
}
bool nano::receive_block::operator== (nano::block const & other_a) const
{
return blocks_equal (*this, other_a);
}
bool nano::receive_block::valid_predecessor (nano::block const & block_a) const
{
bool result;
switch (block_a.type ())
{
case nano::block_type::send:
case nano::block_type::receive:
case nano::block_type::open:
case nano::block_type::change:
result = true;
break;
default:
result = false;
break;
}
return result;
}
nano::block_hash nano::receive_block::previous () const
{
return hashables.previous;
}
nano::block_hash nano::receive_block::source () const
{
return hashables.source;
}
nano::block_hash nano::receive_block::root () const
{
return hashables.previous;
}
nano::signature nano::receive_block::block_signature () const
{
return signature;
}
void nano::receive_block::signature_set (nano::uint512_union const & signature_a)
{
signature = signature_a;
}
nano::block_type nano::receive_block::type () const
{
return nano::block_type::receive;
}
nano::receive_hashables::receive_hashables (nano::block_hash const & previous_a, nano::block_hash const & source_a) :
previous (previous_a),
source (source_a)
{
}
nano::receive_hashables::receive_hashables (bool & error_a, nano::stream & stream_a)
{
try
{
nano::read (stream_a, previous.bytes);
nano::read (stream_a, source.bytes);
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
nano::receive_hashables::receive_hashables (bool & error_a, boost::property_tree::ptree const & tree_a)
{
try
{
auto previous_l (tree_a.get<std::string> ("previous"));
auto source_l (tree_a.get<std::string> ("source"));
error_a = previous.decode_hex (previous_l);
if (!error_a)
{
error_a = source.decode_hex (source_l);
}
}
catch (std::runtime_error const &)
{
error_a = true;
}
}
void nano::receive_hashables::hash (blake2b_state & hash_a) const
{
blake2b_update (&hash_a, previous.bytes.data (), sizeof (previous.bytes));
blake2b_update (&hash_a, source.bytes.data (), sizeof (source.bytes));
}
std::shared_ptr<nano::block> nano::block_uniquer::unique (std::shared_ptr<nano::block> block_a)
{
auto result (block_a);
if (result != nullptr)
{
nano::uint256_union key (block_a->full_hash ());
std::lock_guard<std::mutex> lock (mutex);
auto & existing (blocks[key]);
if (auto block_l = existing.lock ())
{
result = block_l;
}
else
{
existing = block_a;
}
release_assert (std::numeric_limits<CryptoPP::word32>::max () > blocks.size ());
for (auto i (0); i < cleanup_count && !blocks.empty (); ++i)
{
auto random_offset (nano::random_pool::generate_word32 (0, static_cast<CryptoPP::word32> (blocks.size () - 1)));
auto existing (std::next (blocks.begin (), random_offset));
if (existing == blocks.end ())
{
existing = blocks.begin ();
}
if (existing != blocks.end ())
{
if (auto block_l = existing->second.lock ())
{
// Still live
}
else
{
blocks.erase (existing);
}
}
}
}
return result;
}
size_t nano::block_uniquer::size ()
{
std::lock_guard<std::mutex> lock (mutex);
return blocks.size ();
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (block_uniquer & block_uniquer, const std::string & name)
{
auto count = block_uniquer.size ();
auto sizeof_element = sizeof (block_uniquer::value_type);
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "blocks", count, sizeof_element }));
return composite;
}
}
| 1 | 15,908 | Because it's similar for send/open/change/receive types, then probably it can be just common `nano::epoch nano::block::epoch () const` with override for state_block (like nano::block::link (), account (), representative ()) | nanocurrency-nano-node | cpp |
@@ -1,5 +1,6 @@
// This is the API that JS files loaded from the webview can see
const webviewApiPromises_ = {};
+let cb_ = () => {};
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const webviewApi = { | 1 | // This is the API that JS files loaded from the webview can see
const webviewApiPromises_ = {};
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
const webviewApi = {
postMessage: function(message) {
const messageId = `userWebview_${Date.now()}${Math.random()}`;
const promise = new Promise((resolve, reject) => {
webviewApiPromises_[messageId] = { resolve, reject };
});
window.postMessage({
target: 'postMessageService.message',
message: {
from: 'userWebview',
to: 'plugin',
id: messageId,
content: message,
},
});
return promise;
},
};
(function() {
function docReady(fn) {
if (document.readyState === 'complete' || document.readyState === 'interactive') {
setTimeout(fn, 1);
} else {
document.addEventListener('DOMContentLoaded', fn);
}
}
function fileExtension(path) {
if (!path) throw new Error('Path is empty');
const output = path.split('.');
if (output.length <= 1) return '';
return output[output.length - 1];
}
docReady(() => {
const rootElement = document.createElement('div');
document.getElementsByTagName('body')[0].appendChild(rootElement);
const contentElement = document.createElement('div');
contentElement.setAttribute('id', 'joplin-plugin-content');
rootElement.appendChild(contentElement);
const headElement = document.getElementsByTagName('head')[0];
const addedScripts = {};
function addScript(scriptPath, id = null) {
const ext = fileExtension(scriptPath).toLowerCase();
if (ext === 'js') {
const script = document.createElement('script');
script.src = scriptPath;
if (id) script.id = id;
headElement.appendChild(script);
} else if (ext === 'css') {
const link = document.createElement('link');
link.rel = 'stylesheet';
link.href = scriptPath;
if (id) link.id = id;
headElement.appendChild(link);
} else {
throw new Error(`Unsupported script: ${scriptPath}`);
}
}
const ipc = {
setHtml: (args) => {
contentElement.innerHTML = args.html;
// console.debug('UserWebviewIndex: setting html to', args.html);
window.requestAnimationFrame(() => {
console.debug('UserWebviewIndex: setting html callback', args.hash);
window.postMessage({ target: 'UserWebview', message: 'htmlIsSet', hash: args.hash }, '*');
});
},
setScript: (args) => {
const { script, key } = args;
const scriptPath = `file://${script}`;
const elementId = `joplin-script-${key}`;
if (addedScripts[elementId]) {
document.getElementById(elementId).remove();
delete addedScripts[elementId];
}
addScript(scriptPath, elementId);
},
setScripts: (args) => {
const scripts = args.scripts;
if (!scripts) return;
for (let i = 0; i < scripts.length; i++) {
const scriptPath = `file://${scripts[i]}`;
if (addedScripts[scriptPath]) continue;
addedScripts[scriptPath] = true;
addScript(scriptPath);
}
},
'postMessageService.response': (event) => {
const message = event.message;
const promise = webviewApiPromises_[message.responseId];
if (!promise) {
console.warn('postMessageService.response: could not find callback for message', message);
return;
}
if (message.error) {
promise.reject(message.error);
} else {
promise.resolve(message.response);
}
},
};
window.addEventListener('message', ((event) => {
if (!event.data || event.data.target !== 'webview') return;
const callName = event.data.name;
const args = event.data.args;
if (!ipc[callName]) {
console.warn('Missing IPC function:', event.data);
} else {
console.debug('UserWebviewIndex: Got message', callName, args);
ipc[callName](args);
}
}));
// Send a message to the containing component to notify it that the
// view content is fully ready.
//
// Need to send it with a delay to make sure all listeners are
// ready when the message is sent.
window.requestAnimationFrame(() => {
console.debug('UserWebViewIndex: calling isReady');
window.postMessage({ target: 'UserWebview', message: 'ready' }, '*');
});
});
})();
| 1 | 18,374 | Please give a more descriptive name and add a command to explain what it does. | laurent22-joplin | js |
@@ -77,7 +77,8 @@ public class UserPreferences {
// Network
private static final String PREF_ENQUEUE_DOWNLOADED = "prefEnqueueDownloaded";
public static final String PREF_UPDATE_INTERVAL = "prefAutoUpdateIntervall";
- private static final String PREF_MOBILE_UPDATE = "prefMobileUpdate";
+ public static final String PREF_MOBILE_UPDATE_OLD = "prefMobileUpdate";
+ public static final String PREF_MOBILE_UPDATE = "prefMobileUpdateAllowed";
public static final String PREF_EPISODE_CLEANUP = "prefEpisodeCleanup";
public static final String PREF_PARALLEL_DOWNLOADS = "prefParallelDownloads";
public static final String PREF_EPISODE_CACHE_SIZE = "prefEpisodeCacheSize"; | 1 | package de.danoeh.antennapod.core.preferences;
import android.content.Context;
import android.content.SharedPreferences;
import android.preference.PreferenceManager;
import android.support.annotation.IntRange;
import android.support.annotation.NonNull;
import android.support.v4.app.NotificationCompat;
import android.text.TextUtils;
import android.util.Log;
import org.json.JSONArray;
import org.json.JSONException;
import java.io.File;
import java.io.IOException;
import java.net.Proxy;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import de.danoeh.antennapod.core.R;
import de.danoeh.antennapod.core.service.download.ProxyConfig;
import de.danoeh.antennapod.core.storage.APCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.APNullCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.APQueueCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.EpisodeCleanupAlgorithm;
import de.danoeh.antennapod.core.util.Converter;
import de.danoeh.antennapod.core.util.download.AutoUpdateManager;
/**
* Provides access to preferences set by the user in the settings screen. A
* private instance of this class must first be instantiated via
* init() or otherwise every public method will throw an Exception
* when called.
*/
public class UserPreferences {
private UserPreferences(){}
private static final String IMPORT_DIR = "import/";
private static final String TAG = "UserPreferences";
// User Interface
public static final String PREF_THEME = "prefTheme";
public static final String PREF_HIDDEN_DRAWER_ITEMS = "prefHiddenDrawerItems";
private static final String PREF_DRAWER_FEED_ORDER = "prefDrawerFeedOrder";
private static final String PREF_DRAWER_FEED_COUNTER = "prefDrawerFeedIndicator";
public static final String PREF_EXPANDED_NOTIFICATION = "prefExpandNotify";
private static final String PREF_PERSISTENT_NOTIFICATION = "prefPersistNotify";
public static final String PREF_COMPACT_NOTIFICATION_BUTTONS = "prefCompactNotificationButtons";
public static final String PREF_LOCKSCREEN_BACKGROUND = "prefLockscreenBackground";
private static final String PREF_SHOW_DOWNLOAD_REPORT = "prefShowDownloadReport";
public static final String PREF_BACK_BUTTON_BEHAVIOR = "prefBackButtonBehavior";
private static final String PREF_BACK_BUTTON_GO_TO_PAGE = "prefBackButtonGoToPage";
// Queue
private static final String PREF_QUEUE_ADD_TO_FRONT = "prefQueueAddToFront";
// Playback
public static final String PREF_PAUSE_ON_HEADSET_DISCONNECT = "prefPauseOnHeadsetDisconnect";
public static final String PREF_UNPAUSE_ON_HEADSET_RECONNECT = "prefUnpauseOnHeadsetReconnect";
private static final String PREF_UNPAUSE_ON_BLUETOOTH_RECONNECT = "prefUnpauseOnBluetoothReconnect";
private static final String PREF_HARDWARE_FOWARD_BUTTON_SKIPS = "prefHardwareForwardButtonSkips";
private static final String PREF_HARDWARE_PREVIOUS_BUTTON_RESTARTS = "prefHardwarePreviousButtonRestarts";
public static final String PREF_FOLLOW_QUEUE = "prefFollowQueue";
private static final String PREF_SKIP_KEEPS_EPISODE = "prefSkipKeepsEpisode";
private static final String PREF_FAVORITE_KEEPS_EPISODE = "prefFavoriteKeepsEpisode";
private static final String PREF_AUTO_DELETE = "prefAutoDelete";
public static final String PREF_SMART_MARK_AS_PLAYED_SECS = "prefSmartMarkAsPlayedSecs";
private static final String PREF_PLAYBACK_SPEED_ARRAY = "prefPlaybackSpeedArray";
private static final String PREF_PAUSE_PLAYBACK_FOR_FOCUS_LOSS = "prefPauseForFocusLoss";
private static final String PREF_RESUME_AFTER_CALL = "prefResumeAfterCall";
public static final String PREF_VIDEO_BEHAVIOR = "prefVideoBehavior";
// Network
private static final String PREF_ENQUEUE_DOWNLOADED = "prefEnqueueDownloaded";
public static final String PREF_UPDATE_INTERVAL = "prefAutoUpdateIntervall";
private static final String PREF_MOBILE_UPDATE = "prefMobileUpdate";
public static final String PREF_EPISODE_CLEANUP = "prefEpisodeCleanup";
public static final String PREF_PARALLEL_DOWNLOADS = "prefParallelDownloads";
public static final String PREF_EPISODE_CACHE_SIZE = "prefEpisodeCacheSize";
public static final String PREF_ENABLE_AUTODL = "prefEnableAutoDl";
public static final String PREF_ENABLE_AUTODL_ON_BATTERY = "prefEnableAutoDownloadOnBattery";
public static final String PREF_ENABLE_AUTODL_WIFI_FILTER = "prefEnableAutoDownloadWifiFilter";
public static final String PREF_ENABLE_AUTODL_ON_MOBILE = "prefEnableAutoDownloadOnMobile";
private static final String PREF_AUTODL_SELECTED_NETWORKS = "prefAutodownloadSelectedNetworks";
private static final String PREF_PROXY_TYPE = "prefProxyType";
private static final String PREF_PROXY_HOST = "prefProxyHost";
private static final String PREF_PROXY_PORT = "prefProxyPort";
private static final String PREF_PROXY_USER = "prefProxyUser";
private static final String PREF_PROXY_PASSWORD = "prefProxyPassword";
// Services
private static final String PREF_AUTO_FLATTR = "pref_auto_flattr";
private static final String PREF_AUTO_FLATTR_PLAYED_DURATION_THRESHOLD = "prefAutoFlattrPlayedDurationThreshold";
private static final String PREF_GPODNET_NOTIFICATIONS = "pref_gpodnet_notifications";
// Other
private static final String PREF_DATA_FOLDER = "prefDataFolder";
public static final String PREF_IMAGE_CACHE_SIZE = "prefImageCacheSize";
public static final String PREF_DELETE_REMOVES_FROM_QUEUE = "prefDeleteRemovesFromQueue";
// Mediaplayer
public static final String PREF_MEDIA_PLAYER = "prefMediaPlayer";
public static final String PREF_MEDIA_PLAYER_EXOPLAYER = "exoplayer";
private static final String PREF_PLAYBACK_SPEED = "prefPlaybackSpeed";
public static final String PREF_PLAYBACK_SKIP_SILENCE = "prefSkipSilence";
private static final String PREF_FAST_FORWARD_SECS = "prefFastForwardSecs";
private static final String PREF_REWIND_SECS = "prefRewindSecs";
private static final String PREF_QUEUE_LOCKED = "prefQueueLocked";
private static final String IMAGE_CACHE_DEFAULT_VALUE = "100";
private static final int IMAGE_CACHE_SIZE_MINIMUM = 20;
private static final String PREF_LEFT_VOLUME = "prefLeftVolume";
private static final String PREF_RIGHT_VOLUME = "prefRightVolume";
// Experimental
private static final String PREF_STEREO_TO_MONO = "PrefStereoToMono";
public static final String PREF_CAST_ENABLED = "prefCast"; //Used for enabling Chromecast support
public static final int EPISODE_CLEANUP_QUEUE = -1;
public static final int EPISODE_CLEANUP_NULL = -2;
public static final int EPISODE_CLEANUP_DEFAULT = 0;
// Constants
private static final int NOTIFICATION_BUTTON_REWIND = 0;
private static final int NOTIFICATION_BUTTON_FAST_FORWARD = 1;
private static final int NOTIFICATION_BUTTON_SKIP = 2;
private static final int EPISODE_CACHE_SIZE_UNLIMITED = -1;
public static final int FEED_ORDER_COUNTER = 0;
public static final int FEED_ORDER_ALPHABETICAL = 1;
public static final int FEED_ORDER_MOST_PLAYED = 3;
public static final int FEED_COUNTER_SHOW_NEW_UNPLAYED_SUM = 0;
public static final int FEED_COUNTER_SHOW_NEW = 1;
public static final int FEED_COUNTER_SHOW_UNPLAYED = 2;
public static final int FEED_COUNTER_SHOW_NONE = 3;
public static final int FEED_COUNTER_SHOW_DOWNLOADED = 4;
private static Context context;
private static SharedPreferences prefs;
/**
* Sets up the UserPreferences class.
*
* @throws IllegalArgumentException if context is null
*/
public static void init(@NonNull Context context) {
Log.d(TAG, "Creating new instance of UserPreferences");
UserPreferences.context = context.getApplicationContext();
UserPreferences.prefs = PreferenceManager.getDefaultSharedPreferences(context);
createImportDirectory();
createNoMediaFile();
}
/**
* Returns theme as R.style value
*
* @return R.style.Theme_AntennaPod_Light or R.style.Theme_AntennaPod_Dark
*/
public static int getTheme() {
return readThemeValue(prefs.getString(PREF_THEME, "0"));
}
public static int getNoTitleTheme() {
int theme = getTheme();
if (theme == R.style.Theme_AntennaPod_Dark) {
return R.style.Theme_AntennaPod_Dark_NoTitle;
} else if (theme == R.style.Theme_AntennaPod_TrueBlack) {
return R.style.Theme_AntennaPod_TrueBlack_NoTitle;
} else {
return R.style.Theme_AntennaPod_Light_NoTitle;
}
}
public static List<String> getHiddenDrawerItems() {
String hiddenItems = prefs.getString(PREF_HIDDEN_DRAWER_ITEMS, "");
return new ArrayList<>(Arrays.asList(TextUtils.split(hiddenItems, ",")));
}
public static List<Integer> getCompactNotificationButtons() {
String[] buttons = TextUtils.split(
prefs.getString(PREF_COMPACT_NOTIFICATION_BUTTONS,
String.valueOf(NOTIFICATION_BUTTON_SKIP)),
",");
List<Integer> notificationButtons = new ArrayList<>();
for (String button : buttons) {
notificationButtons.add(Integer.parseInt(button));
}
return notificationButtons;
}
/**
* Helper function to return whether the specified button should be shown on compact
* notifications.
*
* @param buttonId Either NOTIFICATION_BUTTON_REWIND, NOTIFICATION_BUTTON_FAST_FORWARD or
* NOTIFICATION_BUTTON_SKIP.
* @return {@code true} if button should be shown, {@code false} otherwise
*/
private static boolean showButtonOnCompactNotification(int buttonId) {
return getCompactNotificationButtons().contains(buttonId);
}
public static boolean showRewindOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_REWIND);
}
public static boolean showFastForwardOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_FAST_FORWARD);
}
public static boolean showSkipOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_SKIP);
}
public static int getFeedOrder() {
String value = prefs.getString(PREF_DRAWER_FEED_ORDER, "0");
return Integer.parseInt(value);
}
public static int getFeedCounterSetting() {
String value = prefs.getString(PREF_DRAWER_FEED_COUNTER, "0");
return Integer.parseInt(value);
}
/**
* Returns notification priority.
*
* @return NotificationCompat.PRIORITY_MAX or NotificationCompat.PRIORITY_DEFAULT
*/
public static int getNotifyPriority() {
if (prefs.getBoolean(PREF_EXPANDED_NOTIFICATION, false)) {
return NotificationCompat.PRIORITY_MAX;
} else {
return NotificationCompat.PRIORITY_DEFAULT;
}
}
/**
* Returns true if notifications are persistent
*
* @return {@code true} if notifications are persistent, {@code false} otherwise
*/
public static boolean isPersistNotify() {
return prefs.getBoolean(PREF_PERSISTENT_NOTIFICATION, true);
}
/**
* Returns true if the lockscreen background should be set to the current episode's image
*
* @return {@code true} if the lockscreen background should be set, {@code false} otherwise
*/
public static boolean setLockscreenBackground() {
return prefs.getBoolean(PREF_LOCKSCREEN_BACKGROUND, true);
}
/**
* Returns true if download reports are shown
*
* @return {@code true} if download reports are shown, {@code false} otherwise
*/
public static boolean showDownloadReport() {
return prefs.getBoolean(PREF_SHOW_DOWNLOAD_REPORT, true);
}
public static boolean enqueueDownloadedEpisodes() {
return prefs.getBoolean(PREF_ENQUEUE_DOWNLOADED, true);
}
public static boolean enqueueAtFront() {
return prefs.getBoolean(PREF_QUEUE_ADD_TO_FRONT, false);
}
public static boolean isPauseOnHeadsetDisconnect() {
return prefs.getBoolean(PREF_PAUSE_ON_HEADSET_DISCONNECT, true);
}
public static boolean isUnpauseOnHeadsetReconnect() {
return prefs.getBoolean(PREF_UNPAUSE_ON_HEADSET_RECONNECT, true);
}
public static boolean isUnpauseOnBluetoothReconnect() {
return prefs.getBoolean(PREF_UNPAUSE_ON_BLUETOOTH_RECONNECT, false);
}
public static boolean shouldHardwareButtonSkip() {
return prefs.getBoolean(PREF_HARDWARE_FOWARD_BUTTON_SKIPS, false);
}
public static boolean shouldHardwarePreviousButtonRestart() {
return prefs.getBoolean(PREF_HARDWARE_PREVIOUS_BUTTON_RESTARTS, false);
}
public static boolean isFollowQueue() {
return prefs.getBoolean(PREF_FOLLOW_QUEUE, true);
}
public static boolean shouldSkipKeepEpisode() { return prefs.getBoolean(PREF_SKIP_KEEPS_EPISODE, true); }
public static boolean shouldFavoriteKeepEpisode() {
return prefs.getBoolean(PREF_FAVORITE_KEEPS_EPISODE, true);
}
public static boolean isAutoDelete() {
return prefs.getBoolean(PREF_AUTO_DELETE, false);
}
public static int getSmartMarkAsPlayedSecs() {
return Integer.parseInt(prefs.getString(PREF_SMART_MARK_AS_PLAYED_SECS, "30"));
}
public static boolean shouldDeleteRemoveFromQueue() {
return prefs.getBoolean(PREF_DELETE_REMOVES_FROM_QUEUE, false);
}
public static boolean isAutoFlattr() {
return prefs.getBoolean(PREF_AUTO_FLATTR, false);
}
public static String getPlaybackSpeed() {
return prefs.getString(PREF_PLAYBACK_SPEED, "1.00");
}
public static boolean isSkipSilence() {
return prefs.getBoolean(PREF_PLAYBACK_SKIP_SILENCE, false);
}
public static String[] getPlaybackSpeedArray() {
return readPlaybackSpeedArray(prefs.getString(PREF_PLAYBACK_SPEED_ARRAY, null));
}
public static float getLeftVolume() {
int volume = prefs.getInt(PREF_LEFT_VOLUME, 100);
return Converter.getVolumeFromPercentage(volume);
}
public static float getRightVolume() {
int volume = prefs.getInt(PREF_RIGHT_VOLUME, 100);
return Converter.getVolumeFromPercentage(volume);
}
public static int getLeftVolumePercentage() {
return prefs.getInt(PREF_LEFT_VOLUME, 100);
}
public static int getRightVolumePercentage() {
return prefs.getInt(PREF_RIGHT_VOLUME, 100);
}
public static boolean shouldPauseForFocusLoss() {
return prefs.getBoolean(PREF_PAUSE_PLAYBACK_FOR_FOCUS_LOSS, false);
}
/*
* Returns update interval in milliseconds; value 0 means that auto update is disabled
* or feeds are updated at a certain time of day
*/
public static long getUpdateInterval() {
String updateInterval = prefs.getString(PREF_UPDATE_INTERVAL, "0");
if(!updateInterval.contains(":")) {
return readUpdateInterval(updateInterval);
} else {
return 0;
}
}
public static int[] getUpdateTimeOfDay() {
String datetime = prefs.getString(PREF_UPDATE_INTERVAL, "");
if(datetime.length() >= 3 && datetime.contains(":")) {
String[] parts = datetime.split(":");
int hourOfDay = Integer.parseInt(parts[0]);
int minute = Integer.parseInt(parts[1]);
return new int[] { hourOfDay, minute };
} else {
return new int[0];
}
}
public static boolean isAllowMobileUpdate() {
return prefs.getBoolean(PREF_MOBILE_UPDATE, false);
}
public static int getParallelDownloads() {
return Integer.parseInt(prefs.getString(PREF_PARALLEL_DOWNLOADS, "4"));
}
public static int getEpisodeCacheSizeUnlimited() {
return context.getResources().getInteger(R.integer.episode_cache_size_unlimited);
}
/**
* Returns the capacity of the episode cache. This method will return the
* negative integer EPISODE_CACHE_SIZE_UNLIMITED if the cache size is set to
* 'unlimited'.
*/
public static int getEpisodeCacheSize() {
return readEpisodeCacheSizeInternal(prefs.getString(PREF_EPISODE_CACHE_SIZE, "20"));
}
public static boolean isEnableAutodownload() {
return prefs.getBoolean(PREF_ENABLE_AUTODL, false);
}
public static boolean isEnableAutodownloadOnBattery() {
return prefs.getBoolean(PREF_ENABLE_AUTODL_ON_BATTERY, true);
}
public static boolean isEnableAutodownloadWifiFilter() {
return prefs.getBoolean(PREF_ENABLE_AUTODL_WIFI_FILTER, false);
}
public static boolean isEnableAutodownloadOnMobile() {
return prefs.getBoolean(PREF_ENABLE_AUTODL_ON_MOBILE, false);
}
public static int getImageCacheSize() {
String cacheSizeString = prefs.getString(PREF_IMAGE_CACHE_SIZE, IMAGE_CACHE_DEFAULT_VALUE);
int cacheSizeInt = Integer.parseInt(cacheSizeString);
// if the cache size is too small the user won't get any images at all
// that's bad, force it back to the default.
if (cacheSizeInt < IMAGE_CACHE_SIZE_MINIMUM) {
prefs.edit().putString(PREF_IMAGE_CACHE_SIZE, IMAGE_CACHE_DEFAULT_VALUE).apply();
cacheSizeInt = Integer.parseInt(IMAGE_CACHE_DEFAULT_VALUE);
}
int cacheSizeMB = cacheSizeInt * 1024 * 1024;
return cacheSizeMB;
}
public static int getFastForwardSecs() {
return prefs.getInt(PREF_FAST_FORWARD_SECS, 30);
}
public static int getRewindSecs() {
return prefs.getInt(PREF_REWIND_SECS, 30);
}
/**
* Returns the time after which an episode should be auto-flattr'd in percent of the episode's
* duration.
*/
public static float getAutoFlattrPlayedDurationThreshold() {
return prefs.getFloat(PREF_AUTO_FLATTR_PLAYED_DURATION_THRESHOLD, 0.8f);
}
public static String[] getAutodownloadSelectedNetworks() {
String selectedNetWorks = prefs.getString(PREF_AUTODL_SELECTED_NETWORKS, "");
return TextUtils.split(selectedNetWorks, ",");
}
public static void setProxyConfig(ProxyConfig config) {
SharedPreferences.Editor editor = prefs.edit();
editor.putString(PREF_PROXY_TYPE, config.type.name());
if(TextUtils.isEmpty(config.host)) {
editor.remove(PREF_PROXY_HOST);
} else {
editor.putString(PREF_PROXY_HOST, config.host);
}
if(config.port <= 0 || config.port > 65535) {
editor.remove(PREF_PROXY_PORT);
} else {
editor.putInt(PREF_PROXY_PORT, config.port);
}
if(TextUtils.isEmpty(config.username)) {
editor.remove(PREF_PROXY_USER);
} else {
editor.putString(PREF_PROXY_USER, config.username);
}
if(TextUtils.isEmpty(config.password)) {
editor.remove(PREF_PROXY_PASSWORD);
} else {
editor.putString(PREF_PROXY_PASSWORD, config.password);
}
editor.apply();
}
public static ProxyConfig getProxyConfig() {
Proxy.Type type = Proxy.Type.valueOf(prefs.getString(PREF_PROXY_TYPE, Proxy.Type.DIRECT.name()));
String host = prefs.getString(PREF_PROXY_HOST, null);
int port = prefs.getInt(PREF_PROXY_PORT, 0);
String username = prefs.getString(PREF_PROXY_USER, null);
String password = prefs.getString(PREF_PROXY_PASSWORD, null);
return new ProxyConfig(type, host, port, username, password);
}
public static boolean shouldResumeAfterCall() {
return prefs.getBoolean(PREF_RESUME_AFTER_CALL, true);
}
public static boolean isQueueLocked() {
return prefs.getBoolean(PREF_QUEUE_LOCKED, false);
}
public static void setFastForwardSecs(int secs) {
prefs.edit()
.putInt(PREF_FAST_FORWARD_SECS, secs)
.apply();
}
public static void setRewindSecs(int secs) {
prefs.edit()
.putInt(PREF_REWIND_SECS, secs)
.apply();
}
public static void setPlaybackSpeed(String speed) {
prefs.edit()
.putString(PREF_PLAYBACK_SPEED, speed)
.apply();
}
public static void setSkipSilence(boolean skipSilence) {
prefs.edit()
.putBoolean(PREF_PLAYBACK_SKIP_SILENCE, skipSilence)
.apply();
}
public static void setPlaybackSpeedArray(String[] speeds) {
JSONArray jsonArray = new JSONArray();
for (String speed : speeds) {
jsonArray.put(speed);
}
prefs.edit()
.putString(PREF_PLAYBACK_SPEED_ARRAY, jsonArray.toString())
.apply();
}
public static void setVolume(@IntRange(from = 0, to = 100) int leftVolume,
@IntRange(from = 0, to = 100) int rightVolume) {
prefs.edit()
.putInt(PREF_LEFT_VOLUME, leftVolume)
.putInt(PREF_RIGHT_VOLUME, rightVolume)
.apply();
}
public static void setAutodownloadSelectedNetworks(String[] value) {
prefs.edit()
.putString(PREF_AUTODL_SELECTED_NETWORKS, TextUtils.join(",", value))
.apply();
}
/**
* Sets the update interval value.
*/
public static void setUpdateInterval(long hours) {
prefs.edit()
.putString(PREF_UPDATE_INTERVAL, String.valueOf(hours))
.apply();
// when updating with an interval, we assume the user wants
// to update *now* and then every 'hours' interval thereafter.
restartUpdateAlarm(true);
}
/**
* Sets the update interval value.
*/
public static void setUpdateTimeOfDay(int hourOfDay, int minute) {
prefs.edit()
.putString(PREF_UPDATE_INTERVAL, hourOfDay + ":" + minute)
.apply();
restartUpdateAlarm(false);
}
/**
* Change the auto-flattr settings
*
* @param enabled Whether automatic flattring should be enabled at all
* @param autoFlattrThreshold The percentage of playback time after which an episode should be
* flattrd. Must be a value between 0 and 1 (inclusive)
* */
public static void setAutoFlattrSettings( boolean enabled, float autoFlattrThreshold) {
if(autoFlattrThreshold < 0.0 || autoFlattrThreshold > 1.0) {
throw new IllegalArgumentException("Flattr threshold must be in range [0.0, 1.0]");
}
prefs.edit()
.putBoolean(PREF_AUTO_FLATTR, enabled)
.putFloat(PREF_AUTO_FLATTR_PLAYED_DURATION_THRESHOLD, autoFlattrThreshold)
.apply();
}
public static boolean gpodnetNotificationsEnabled() {
return prefs.getBoolean(PREF_GPODNET_NOTIFICATIONS, true);
}
public static void setGpodnetNotificationsEnabled() {
prefs.edit()
.putBoolean(PREF_GPODNET_NOTIFICATIONS, true)
.apply();
}
public static void setHiddenDrawerItems(List<String> items) {
String str = TextUtils.join(",", items);
prefs.edit()
.putString(PREF_HIDDEN_DRAWER_ITEMS, str)
.apply();
}
public static void setCompactNotificationButtons(List<Integer> items) {
String str = TextUtils.join(",", items);
prefs.edit()
.putString(PREF_COMPACT_NOTIFICATION_BUTTONS, str)
.apply();
}
public static void setQueueLocked(boolean locked) {
prefs.edit()
.putBoolean(PREF_QUEUE_LOCKED, locked)
.apply();
}
private static int readThemeValue(String valueFromPrefs) {
switch (Integer.parseInt(valueFromPrefs)) {
case 0:
return R.style.Theme_AntennaPod_Light;
case 1:
return R.style.Theme_AntennaPod_Dark;
case 2:
return R.style.Theme_AntennaPod_TrueBlack;
default:
return R.style.Theme_AntennaPod_Light;
}
}
private static long readUpdateInterval(String valueFromPrefs) {
int hours = Integer.parseInt(valueFromPrefs);
return TimeUnit.HOURS.toMillis(hours);
}
private static int readEpisodeCacheSizeInternal(String valueFromPrefs) {
if (valueFromPrefs.equals(context.getString(R.string.pref_episode_cache_unlimited))) {
return EPISODE_CACHE_SIZE_UNLIMITED;
} else {
return Integer.parseInt(valueFromPrefs);
}
}
private static String[] readPlaybackSpeedArray(String valueFromPrefs) {
String[] selectedSpeeds = null;
// If this preference hasn't been set yet, return the default options
if (valueFromPrefs == null) {
selectedSpeeds = new String[] { "1.00", "1.25", "1.50", "1.75", "2.00" };
} else {
try {
JSONArray jsonArray = new JSONArray(valueFromPrefs);
selectedSpeeds = new String[jsonArray.length()];
for (int i = 0; i < jsonArray.length(); i++) {
selectedSpeeds[i] = jsonArray.getString(i);
}
} catch (JSONException e) {
Log.e(TAG, "Got JSON error when trying to get speeds from JSONArray");
e.printStackTrace();
}
}
return selectedSpeeds;
}
public static boolean useSonic() {
return prefs.getString(PREF_MEDIA_PLAYER, "sonic").equals("sonic");
}
public static boolean useExoplayer() {
return prefs.getString(PREF_MEDIA_PLAYER, "sonic").equals(PREF_MEDIA_PLAYER_EXOPLAYER);
}
public static void enableSonic() {
prefs.edit().putString(PREF_MEDIA_PLAYER, "sonic").apply();
}
public static boolean stereoToMono() {
return prefs.getBoolean(PREF_STEREO_TO_MONO, false);
}
public static void stereoToMono(boolean enable) {
prefs.edit()
.putBoolean(PREF_STEREO_TO_MONO, enable)
.apply();
}
public static VideoBackgroundBehavior getVideoBackgroundBehavior() {
switch (prefs.getString(PREF_VIDEO_BEHAVIOR, "stop")) {
case "stop": return VideoBackgroundBehavior.STOP;
case "pip": return VideoBackgroundBehavior.PICTURE_IN_PICTURE;
case "continue": return VideoBackgroundBehavior.CONTINUE_PLAYING;
default: return VideoBackgroundBehavior.STOP;
}
}
public static EpisodeCleanupAlgorithm getEpisodeCleanupAlgorithm() {
int cleanupValue = getEpisodeCleanupValue();
if (cleanupValue == EPISODE_CLEANUP_QUEUE) {
return new APQueueCleanupAlgorithm();
} else if (cleanupValue == EPISODE_CLEANUP_NULL) {
return new APNullCleanupAlgorithm();
} else {
return new APCleanupAlgorithm(cleanupValue);
}
}
public static int getEpisodeCleanupValue() {
return Integer.parseInt(prefs.getString(PREF_EPISODE_CLEANUP, "-1"));
}
public static void setEpisodeCleanupValue(int episodeCleanupValue) {
prefs.edit()
.putString(PREF_EPISODE_CLEANUP, Integer.toString(episodeCleanupValue))
.apply();
}
/**
* Return the folder where the app stores all of its data. This method will
* return the standard data folder if none has been set by the user.
*
* @param type The name of the folder inside the data folder. May be null
* when accessing the root of the data folder.
* @return The data folder that has been requested or null if the folder
* could not be created.
*/
public static File getDataFolder(String type) {
String strDir = prefs.getString(PREF_DATA_FOLDER, null);
if (strDir == null) {
Log.d(TAG, "Using default data folder");
return context.getExternalFilesDir(type);
} else {
File dataDir = new File(strDir);
if (!dataDir.exists()) {
if (!dataDir.mkdir()) {
Log.w(TAG, "Could not create data folder");
return null;
}
}
if (type == null) {
return dataDir;
} else {
// handle path separators
String[] dirs = type.split("/");
for (int i = 0; i < dirs.length; i++) {
if (dirs.length > 0) {
if (i < dirs.length - 1) {
dataDir = getDataFolder(dirs[i]);
if (dataDir == null) {
return null;
}
}
type = dirs[i];
}
}
File typeDir = new File(dataDir, type);
if (!typeDir.exists()) {
if (dataDir.canWrite()) {
if (!typeDir.mkdir()) {
Log.e(TAG, "Could not create data folder named " + type);
return null;
}
}
}
return typeDir;
}
}
}
public static void setDataFolder(String dir) {
Log.d(TAG, "setDataFolder(dir: " + dir + ")");
prefs.edit()
.putString(PREF_DATA_FOLDER, dir)
.apply();
createImportDirectory();
}
/**
* Create a .nomedia file to prevent scanning by the media scanner.
*/
private static void createNoMediaFile() {
File f = new File(context.getExternalFilesDir(null), ".nomedia");
if (!f.exists()) {
try {
f.createNewFile();
} catch (IOException e) {
Log.e(TAG, "Could not create .nomedia file");
e.printStackTrace();
}
Log.d(TAG, ".nomedia file created");
}
}
/**
* Creates the import directory if it doesn't exist and if storage is
* available
*/
private static void createImportDirectory() {
File importDir = getDataFolder(IMPORT_DIR);
if (importDir != null) {
if (importDir.exists()) {
Log.d(TAG, "Import directory already exists");
} else {
Log.d(TAG, "Creating import directory");
importDir.mkdir();
}
} else {
Log.d(TAG, "Could not access external storage.");
}
}
public static void restartUpdateAlarm(boolean now) {
int[] timeOfDay = getUpdateTimeOfDay();
Log.d(TAG, "timeOfDay: " + Arrays.toString(timeOfDay));
if (timeOfDay.length == 2) {
AutoUpdateManager.restartUpdateTimeOfDayAlarm(context, timeOfDay[0], timeOfDay[1]);
} else {
long milliseconds = getUpdateInterval();
long startTrigger = milliseconds;
if (now) {
startTrigger = TimeUnit.SECONDS.toMillis(10);
}
AutoUpdateManager.restartUpdateIntervalAlarm(context, startTrigger, milliseconds);
}
}
/**
* Reads episode cache size as it is saved in the episode_cache_size_values array.
*/
public static int readEpisodeCacheSize(String valueFromPrefs) {
return readEpisodeCacheSizeInternal(valueFromPrefs);
}
/**
* Evaluates whether Cast support (Chromecast, Audio Cast, etc) is enabled on the preferences.
*/
public static boolean isCastEnabled() {
return prefs.getBoolean(PREF_CAST_ENABLED, false);
}
public enum VideoBackgroundBehavior {
STOP, PICTURE_IN_PICTURE, CONTINUE_PLAYING
}
public enum BackButtonBehavior {
DEFAULT, OPEN_DRAWER, DOUBLE_TAP, SHOW_PROMPT, GO_TO_PAGE
}
public static BackButtonBehavior getBackButtonBehavior() {
switch (prefs.getString(PREF_BACK_BUTTON_BEHAVIOR, "default")) {
case "default": return BackButtonBehavior.DEFAULT;
case "drawer": return BackButtonBehavior.OPEN_DRAWER;
case "doubletap": return BackButtonBehavior.DOUBLE_TAP;
case "prompt": return BackButtonBehavior.SHOW_PROMPT;
case "page": return BackButtonBehavior.GO_TO_PAGE;
default: return BackButtonBehavior.DEFAULT;
}
}
public static String getBackButtonGoToPage() {
return prefs.getString(PREF_BACK_BUTTON_GO_TO_PAGE, "QueueFragment");
}
public static void setBackButtonGoToPage(String tag) {
prefs.edit()
.putString(PREF_BACK_BUTTON_GO_TO_PAGE, tag)
.apply();
}
}
| 1 | 14,657 | we can never get rid of this... | AntennaPod-AntennaPod | java |
@@ -28,8 +28,9 @@ var (
[]string{"controller", "method", "resource", "remote", "status"},
)
metricKubeClientRequestSeconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
- Name: "hive_kube_client_request_seconds",
- Help: "Length of time for kubernetes client requests.",
+ Name: "hive_kube_client_request_seconds",
+ Help: "Length of time for kubernetes client requests.",
+ Buckets: []float64{0.05, 0.1, 0.5, 1, 5, 10, 30, 60, 120},
},
[]string{"controller", "method", "resource", "remote", "status"},
) | 1 | package utils
import (
"fmt"
"net/http"
"strings"
"time"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/flowcontrol"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/metrics"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
)
var (
metricKubeClientRequests = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "hive_kube_client_requests_total",
Help: "Counter incremented for each kube client request.",
},
[]string{"controller", "method", "resource", "remote", "status"},
)
metricKubeClientRequestSeconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "hive_kube_client_request_seconds",
Help: "Length of time for kubernetes client requests.",
},
[]string{"controller", "method", "resource", "remote", "status"},
)
)
func init() {
metrics.Registry.MustRegister(metricKubeClientRequests)
metrics.Registry.MustRegister(metricKubeClientRequestSeconds)
}
// NewClientWithMetricsOrDie creates a new controller-runtime client with a wrapper which increments
// metrics for requests by controller name, HTTP method, URL path, and whether or not the request was
// to a remote cluster.. The client will re-use the managers cache. This should be used in
// all Hive controllers.
func NewClientWithMetricsOrDie(mgr manager.Manager, ctrlrName hivev1.ControllerName, rateLimiter *flowcontrol.RateLimiter) client.Client {
// Copy the rest config as we want our round trippers to be controller specific.
cfg := rest.CopyConfig(mgr.GetConfig())
if rateLimiter != nil {
cfg.RateLimiter = *rateLimiter
}
AddControllerMetricsTransportWrapper(cfg, ctrlrName, false)
options := client.Options{
Scheme: mgr.GetScheme(),
Mapper: mgr.GetRESTMapper(),
}
c, err := client.New(cfg, options)
if err != nil {
log.WithError(err).Fatal("unable to initialize metrics wrapped client")
}
return &client.DelegatingClient{
Reader: &client.DelegatingReader{
CacheReader: mgr.GetCache(),
ClientReader: c,
},
Writer: c,
StatusClient: c,
}
}
// AddControllerMetricsTransportWrapper adds a transport wrapper to the given rest config which
// exposes metrics based on the requests being made.
func AddControllerMetricsTransportWrapper(cfg *rest.Config, controllerName hivev1.ControllerName, remote bool) {
// If the restConfig already has a transport wrapper, wrap it.
if cfg.WrapTransport != nil {
origFunc := cfg.WrapTransport
cfg.WrapTransport = func(rt http.RoundTripper) http.RoundTripper {
return &ControllerMetricsTripper{
RoundTripper: origFunc(rt),
Controller: controllerName,
Remote: remote,
}
}
}
cfg.WrapTransport = func(rt http.RoundTripper) http.RoundTripper {
return &ControllerMetricsTripper{
RoundTripper: rt,
Controller: controllerName,
Remote: remote,
}
}
}
// ControllerMetricsTripper is a RoundTripper implementation which tracks our metrics for client requests.
type ControllerMetricsTripper struct {
http.RoundTripper
Controller hivev1.ControllerName
Remote bool
}
// RoundTrip implements the http RoundTripper interface.
func (cmt *ControllerMetricsTripper) RoundTrip(req *http.Request) (*http.Response, error) {
startTime := time.Now()
remoteStr := "false"
if cmt.Remote {
remoteStr = "true"
}
path, pathErr := parsePath(req.URL.Path)
// Call the nested RoundTripper.
resp, err := cmt.RoundTripper.RoundTrip(req)
applyTime := metav1.Now().Sub(startTime).Seconds()
if err == nil && pathErr == nil {
metricKubeClientRequests.WithLabelValues(cmt.Controller.String(), req.Method, path, remoteStr, resp.Status).Inc()
metricKubeClientRequestSeconds.WithLabelValues(cmt.Controller.String(), req.Method, path, remoteStr, resp.Status).Observe(applyTime)
}
return resp, err
}
// parsePath returns a group/version/resource string from the given path. Used to avoid per cluster metrics
// for cardinality reason. We do not return metrics for all paths however and will return an error if we're unable
// to parse a resource from a path.
func parsePath(path string) (string, error) {
tokens := strings.Split(path[1:], "/")
if tokens[0] == "api" {
// Handle core resources:
if len(tokens) == 3 || len(tokens) == 4 {
return strings.Join([]string{"core", tokens[1], tokens[2]}, "/"), nil
}
// Handle operators on direct namespaced resources:
if len(tokens) > 4 && tokens[2] == "namespaces" {
return strings.Join([]string{"core", tokens[1], tokens[4]}, "/"), nil
}
} else if tokens[0] == "apis" {
// Handle resources with apigroups:
if len(tokens) == 4 || len(tokens) == 5 {
return strings.Join([]string{tokens[1], tokens[2], tokens[3]}, "/"), nil
}
if len(tokens) > 5 && tokens[3] == "namespaces" {
return strings.Join([]string{tokens[1], tokens[2], tokens[5]}, "/"), nil
}
}
return "", fmt.Errorf("unable to parse path for client metrics: %s", path)
}
| 1 | 15,237 | what is the effect of changing these buckets in existing data that is available in the monitoring system? if there are any existing dashboards that use previous histogram buckets they are probably going to be wrong ot invalid? also any reason why we chose these specific values? | openshift-hive | go |
@@ -109,4 +109,13 @@
//
// var h handler
// yarpc.InjectClients(dispatcher, &h)
+//
+// Automatically Sanitizing TChannel Contexts
+//
+// Contexts created with `tchannel.ContextWithHeaders` are incompatible with yarpc clients generated from thrift.
+// Using such a context will cause a yarpc client to error on any call. Using the `sanitize-tchannel` flag will
+// generate a yarpc client such that all tchannel headers from any context supplied are removed before making a yarpc call.
+// the option can be used like so:
+//
+// thriftrw --plugin yarpc --sanitize-tchannel myservice.thrift
package thrift | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package thrift implements Thrift encoding support for YARPC.
//
// To use this package, you must install ThriftRW 1.0 or newer.
//
// go get go.uber.org/thriftrw
//
// You must also install the ThriftRW plugin for YARPC.
//
// go get go.uber.org/yarpc/encoding/thrift/thriftrw-plugin-yarpc
//
// To generate YARPC compatible code from a Thrift file, use the command,
//
// thriftrw --plugin yarpc myservice.thrift
//
// In addition to generating code for types specified in your Thrift file,
// this will generate the following packages for each service in the file: a
// client package, a server package, a test package, and an UberFx module.
//
// myservice
// |- myserviceclient
// |- myserviceserver
// |- myservicefx
// |- myservicetest
//
// The client package allows sending requests through a YARPC dispatcher.
//
// client := myserviceclient.New(dispatcher.ClientConfig("myservice"))
//
// The server package facilitates registration of service implementations with
// a YARPC dispatcher.
//
// handler := myHandler{}
// dispatcher.Register(myserviceserver.New(handler))
//
// The test package provides a gomock-compatible mock client for the service.
//
// mockCtrl := gomock.NewController(t)
// client := myservicetest.NewMockClient(mockCtrl)
// client.EXPECT().Hello(request).Return(response, nil)
//
// The Fx package provides an UberFx-compatible constructor for service
// clients. This may be used with Provide to make service clients available in
// the container.
//
// fx.Provide(myservicefx.Client("myservice"))
//
// Automatically Building Clients
//
// All clients generated by the YARPC ThriftRW plugin are compatible with
// YARPC's yarpc.InjectClients function.
//
// var handler struct{ Client keyvalueclient.Interface `service:"keyvalue"` }
// yarpc.Injectclients(dispatcher, &handler)
//
// These clients may further be customized by providing a "thrift" tag. The
// following options may be provided on the tag using a comma-separated list.
//
// enveloped: Requests and responses will be wrapped inside a standard
// Apache Thrift envelope. This flag is needed to call existing
// Apache Thrift services with clients generated by YARPC.
// Equivalent to passing thrift.Enveloped.
// multiplexed: Requests are being sent to an Apache Thrift server which has
// multiplexing enabled. Equivalent to passing
// thrift.Multiplexed. This option has no effect if enveloped
// was not set.
//
// For example,
//
// type handler struct {
// Client keyvalueclient.Interface `service:"keyvalue" thrift:"multiplexed,enveloped"`
// }
//
// var h handler
// yarpc.Injectclients(dispatcher, &h)
//
// Calling Existing Apache Thrift Services
//
// You can call existing Apache Thrift services with YARPC by passing in the
// thrift.Enveloped option when constructing the corresponding clients.
//
// client := myserviceclient.New(dispatcher.ClientConfig("myservice"), thrift.Enveloped)
//
// With yarpc.InjectClients, you can pass the tag `thrift:"enveloped"` to
// enable this option on automatically instantiated clients.
//
// type handler struct {
// Client myserviceclient.Interface `service:"myservice" thrift:"enveloped"`
// }
//
// var h handler
// yarpc.InjectClients(dispatcher, &h)
package thrift
| 1 | 15,137 | This won't work. For the flag to be passed to the plugin, it should be passed as part of the --plugin argument. --plugin "yarpc --sanitize-tchannel" | yarpc-yarpc-go | go |
@@ -2308,6 +2308,13 @@ int LGBM_BoosterSetLeafValue(BoosterHandle handle,
API_END();
}
+int LGBM_BoosterGetNumFeatures(BoosterHandle handle, int *out_val) {
+ API_BEGIN();
+ Booster* ref_booster = reinterpret_cast<Booster*>(handle);
+ *out_val = ref_booster->GetBoosting()->MaxFeatureIdx() + 1;
+ API_END();
+}
+
int LGBM_BoosterFeatureImportance(BoosterHandle handle,
int num_iteration,
int importance_type, | 1 | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#include <LightGBM/c_api.h>
#include <LightGBM/boosting.h>
#include <LightGBM/config.h>
#include <LightGBM/dataset.h>
#include <LightGBM/dataset_loader.h>
#include <LightGBM/metric.h>
#include <LightGBM/network.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/threading.h>
#include <string>
#include <cstdio>
#include <functional>
#include <memory>
#include <mutex>
#include <stdexcept>
#include <vector>
#include "application/predictor.hpp"
#include <LightGBM/utils/yamc/alternate_shared_mutex.hpp>
#include <LightGBM/utils/yamc/yamc_shared_lock.hpp>
namespace LightGBM {
inline int LGBM_APIHandleException(const std::exception& ex) {
LGBM_SetLastError(ex.what());
return -1;
}
inline int LGBM_APIHandleException(const std::string& ex) {
LGBM_SetLastError(ex.c_str());
return -1;
}
#define API_BEGIN() try {
#define API_END() } \
catch(std::exception& ex) { return LGBM_APIHandleException(ex); } \
catch(std::string& ex) { return LGBM_APIHandleException(ex); } \
catch(...) { return LGBM_APIHandleException("unknown exception"); } \
return 0;
#define UNIQUE_LOCK(mtx) \
std::unique_lock<yamc::alternate::shared_mutex> lock(mtx);
#define SHARED_LOCK(mtx) \
yamc::shared_lock<yamc::alternate::shared_mutex> lock(&mtx);
const int PREDICTOR_TYPES = 4;
// Single row predictor to abstract away caching logic
class SingleRowPredictor {
public:
PredictFunction predict_function;
int64_t num_pred_in_one_row;
SingleRowPredictor(int predict_type, Boosting* boosting, const Config& config, int start_iter, int num_iter) {
bool is_predict_leaf = false;
bool is_raw_score = false;
bool predict_contrib = false;
if (predict_type == C_API_PREDICT_LEAF_INDEX) {
is_predict_leaf = true;
} else if (predict_type == C_API_PREDICT_RAW_SCORE) {
is_raw_score = true;
} else if (predict_type == C_API_PREDICT_CONTRIB) {
predict_contrib = true;
}
early_stop_ = config.pred_early_stop;
early_stop_freq_ = config.pred_early_stop_freq;
early_stop_margin_ = config.pred_early_stop_margin;
iter_ = num_iter;
predictor_.reset(new Predictor(boosting, start_iter, iter_, is_raw_score, is_predict_leaf, predict_contrib,
early_stop_, early_stop_freq_, early_stop_margin_));
num_pred_in_one_row = boosting->NumPredictOneRow(start_iter, iter_, is_predict_leaf, predict_contrib);
predict_function = predictor_->GetPredictFunction();
num_total_model_ = boosting->NumberOfTotalModel();
}
~SingleRowPredictor() {}
bool IsPredictorEqual(const Config& config, int iter, Boosting* boosting) {
return early_stop_ == config.pred_early_stop &&
early_stop_freq_ == config.pred_early_stop_freq &&
early_stop_margin_ == config.pred_early_stop_margin &&
iter_ == iter &&
num_total_model_ == boosting->NumberOfTotalModel();
}
private:
std::unique_ptr<Predictor> predictor_;
bool early_stop_;
int early_stop_freq_;
double early_stop_margin_;
int iter_;
int num_total_model_;
};
class Booster {
public:
explicit Booster(const char* filename) {
boosting_.reset(Boosting::CreateBoosting("gbdt", filename));
}
Booster(const Dataset* train_data,
const char* parameters) {
auto param = Config::Str2Map(parameters);
config_.Set(param);
if (config_.num_threads > 0) {
omp_set_num_threads(config_.num_threads);
}
// create boosting
if (config_.input_model.size() > 0) {
Log::Warning("Continued train from model is not supported for c_api,\n"
"please use continued train with input score");
}
boosting_.reset(Boosting::CreateBoosting(config_.boosting, nullptr));
train_data_ = train_data;
CreateObjectiveAndMetrics();
// initialize the boosting
if (config_.tree_learner == std::string("feature")) {
Log::Fatal("Do not support feature parallel in c api");
}
if (Network::num_machines() == 1 && config_.tree_learner != std::string("serial")) {
Log::Warning("Only find one worker, will switch to serial tree learner");
config_.tree_learner = "serial";
}
boosting_->Init(&config_, train_data_, objective_fun_.get(),
Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
}
void MergeFrom(const Booster* other) {
UNIQUE_LOCK(mutex_)
boosting_->MergeFrom(other->boosting_.get());
}
~Booster() {
}
void CreateObjectiveAndMetrics() {
// create objective function
objective_fun_.reset(ObjectiveFunction::CreateObjectiveFunction(config_.objective,
config_));
if (objective_fun_ == nullptr) {
Log::Warning("Using self-defined objective function");
}
// initialize the objective function
if (objective_fun_ != nullptr) {
objective_fun_->Init(train_data_->metadata(), train_data_->num_data());
}
// create training metric
train_metric_.clear();
for (auto metric_type : config_.metric) {
auto metric = std::unique_ptr<Metric>(
Metric::CreateMetric(metric_type, config_));
if (metric == nullptr) { continue; }
metric->Init(train_data_->metadata(), train_data_->num_data());
train_metric_.push_back(std::move(metric));
}
train_metric_.shrink_to_fit();
}
void ResetTrainingData(const Dataset* train_data) {
if (train_data != train_data_) {
UNIQUE_LOCK(mutex_)
train_data_ = train_data;
CreateObjectiveAndMetrics();
// reset the boosting
boosting_->ResetTrainingData(train_data_,
objective_fun_.get(), Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
}
}
static void CheckDatasetResetConfig(
const Config& old_config,
const std::unordered_map<std::string, std::string>& new_param) {
Config new_config;
new_config.Set(new_param);
if (new_param.count("data_random_seed") &&
new_config.data_random_seed != old_config.data_random_seed) {
Log::Fatal("Cannot change data_random_seed after constructed Dataset handle.");
}
if (new_param.count("max_bin") &&
new_config.max_bin != old_config.max_bin) {
Log::Fatal("Cannot change max_bin after constructed Dataset handle.");
}
if (new_param.count("max_bin_by_feature") &&
new_config.max_bin_by_feature != old_config.max_bin_by_feature) {
Log::Fatal(
"Cannot change max_bin_by_feature after constructed Dataset handle.");
}
if (new_param.count("bin_construct_sample_cnt") &&
new_config.bin_construct_sample_cnt !=
old_config.bin_construct_sample_cnt) {
Log::Fatal(
"Cannot change bin_construct_sample_cnt after constructed Dataset "
"handle.");
}
if (new_param.count("min_data_in_bin") &&
new_config.min_data_in_bin != old_config.min_data_in_bin) {
Log::Fatal(
"Cannot change min_data_in_bin after constructed Dataset handle.");
}
if (new_param.count("use_missing") &&
new_config.use_missing != old_config.use_missing) {
Log::Fatal("Cannot change use_missing after constructed Dataset handle.");
}
if (new_param.count("zero_as_missing") &&
new_config.zero_as_missing != old_config.zero_as_missing) {
Log::Fatal(
"Cannot change zero_as_missing after constructed Dataset handle.");
}
if (new_param.count("categorical_feature") &&
new_config.categorical_feature != old_config.categorical_feature) {
Log::Fatal(
"Cannot change categorical_feature after constructed Dataset "
"handle.");
}
if (new_param.count("feature_pre_filter") &&
new_config.feature_pre_filter != old_config.feature_pre_filter) {
Log::Fatal(
"Cannot change feature_pre_filter after constructed Dataset handle.");
}
if (new_param.count("is_enable_sparse") &&
new_config.is_enable_sparse != old_config.is_enable_sparse) {
Log::Fatal(
"Cannot change is_enable_sparse after constructed Dataset handle.");
}
if (new_param.count("pre_partition") &&
new_config.pre_partition != old_config.pre_partition) {
Log::Fatal(
"Cannot change pre_partition after constructed Dataset handle.");
}
if (new_param.count("enable_bundle") &&
new_config.enable_bundle != old_config.enable_bundle) {
Log::Fatal(
"Cannot change enable_bundle after constructed Dataset handle.");
}
if (new_param.count("header") && new_config.header != old_config.header) {
Log::Fatal("Cannot change header after constructed Dataset handle.");
}
if (new_param.count("two_round") &&
new_config.two_round != old_config.two_round) {
Log::Fatal("Cannot change two_round after constructed Dataset handle.");
}
if (new_param.count("label_column") &&
new_config.label_column != old_config.label_column) {
Log::Fatal(
"Cannot change label_column after constructed Dataset handle.");
}
if (new_param.count("weight_column") &&
new_config.weight_column != old_config.weight_column) {
Log::Fatal(
"Cannot change weight_column after constructed Dataset handle.");
}
if (new_param.count("group_column") &&
new_config.group_column != old_config.group_column) {
Log::Fatal(
"Cannot change group_column after constructed Dataset handle.");
}
if (new_param.count("ignore_column") &&
new_config.ignore_column != old_config.ignore_column) {
Log::Fatal(
"Cannot change ignore_column after constructed Dataset handle.");
}
if (new_param.count("forcedbins_filename")) {
Log::Fatal("Cannot change forced bins after constructed Dataset handle.");
}
if (new_param.count("min_data_in_leaf") &&
new_config.min_data_in_leaf < old_config.min_data_in_leaf &&
old_config.feature_pre_filter) {
Log::Fatal(
"Reducing `min_data_in_leaf` with `feature_pre_filter=true` may "
"cause unexpected behaviour "
"for features that were pre-filtered by the larger "
"`min_data_in_leaf`.\n"
"You need to set `feature_pre_filter=false` to dynamically change "
"the `min_data_in_leaf`.");
}
if (new_param.count("linear_tree") && new_config.linear_tree != old_config.linear_tree) {
Log::Fatal("Cannot change linear_tree after constructed Dataset handle.");
}
if (new_param.count("precise_float_parser") &&
new_config.precise_float_parser != old_config.precise_float_parser) {
Log::Fatal("Cannot change precise_float_parser after constructed Dataset handle.");
}
}
void ResetConfig(const char* parameters) {
UNIQUE_LOCK(mutex_)
auto param = Config::Str2Map(parameters);
Config new_config;
new_config.Set(param);
if (param.count("num_class") && new_config.num_class != config_.num_class) {
Log::Fatal("Cannot change num_class during training");
}
if (param.count("boosting") && new_config.boosting != config_.boosting) {
Log::Fatal("Cannot change boosting during training");
}
if (param.count("metric") && new_config.metric != config_.metric) {
Log::Fatal("Cannot change metric during training");
}
CheckDatasetResetConfig(config_, param);
config_.Set(param);
if (config_.num_threads > 0) {
omp_set_num_threads(config_.num_threads);
}
if (param.count("objective")) {
// create objective function
objective_fun_.reset(ObjectiveFunction::CreateObjectiveFunction(config_.objective,
config_));
if (objective_fun_ == nullptr) {
Log::Warning("Using self-defined objective function");
}
// initialize the objective function
if (objective_fun_ != nullptr) {
objective_fun_->Init(train_data_->metadata(), train_data_->num_data());
}
boosting_->ResetTrainingData(train_data_,
objective_fun_.get(), Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
}
boosting_->ResetConfig(&config_);
}
void AddValidData(const Dataset* valid_data) {
UNIQUE_LOCK(mutex_)
valid_metrics_.emplace_back();
for (auto metric_type : config_.metric) {
auto metric = std::unique_ptr<Metric>(Metric::CreateMetric(metric_type, config_));
if (metric == nullptr) { continue; }
metric->Init(valid_data->metadata(), valid_data->num_data());
valid_metrics_.back().push_back(std::move(metric));
}
valid_metrics_.back().shrink_to_fit();
boosting_->AddValidDataset(valid_data,
Common::ConstPtrInVectorWrapper<Metric>(valid_metrics_.back()));
}
bool TrainOneIter() {
UNIQUE_LOCK(mutex_)
return boosting_->TrainOneIter(nullptr, nullptr);
}
void Refit(const int32_t* leaf_preds, int32_t nrow, int32_t ncol) {
UNIQUE_LOCK(mutex_)
std::vector<std::vector<int32_t>> v_leaf_preds(nrow, std::vector<int32_t>(ncol, 0));
for (int i = 0; i < nrow; ++i) {
for (int j = 0; j < ncol; ++j) {
v_leaf_preds[i][j] = leaf_preds[static_cast<size_t>(i) * static_cast<size_t>(ncol) + static_cast<size_t>(j)];
}
}
boosting_->RefitTree(v_leaf_preds);
}
bool TrainOneIter(const score_t* gradients, const score_t* hessians) {
UNIQUE_LOCK(mutex_)
return boosting_->TrainOneIter(gradients, hessians);
}
void RollbackOneIter() {
UNIQUE_LOCK(mutex_)
boosting_->RollbackOneIter();
}
void SetSingleRowPredictor(int start_iteration, int num_iteration, int predict_type, const Config& config) {
UNIQUE_LOCK(mutex_)
if (single_row_predictor_[predict_type].get() == nullptr ||
!single_row_predictor_[predict_type]->IsPredictorEqual(config, num_iteration, boosting_.get())) {
single_row_predictor_[predict_type].reset(new SingleRowPredictor(predict_type, boosting_.get(),
config, start_iteration, num_iteration));
}
}
void PredictSingleRow(int predict_type, int ncol,
std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun,
const Config& config,
double* out_result, int64_t* out_len) const {
if (!config.predict_disable_shape_check && ncol != boosting_->MaxFeatureIdx() + 1) {
Log::Fatal("The number of features in data (%d) is not the same as it was in training data (%d).\n"\
"You can set ``predict_disable_shape_check=true`` to discard this error, but please be aware what you are doing.", ncol, boosting_->MaxFeatureIdx() + 1);
}
UNIQUE_LOCK(mutex_)
const auto& single_row_predictor = single_row_predictor_[predict_type];
auto one_row = get_row_fun(0);
auto pred_wrt_ptr = out_result;
single_row_predictor->predict_function(one_row, pred_wrt_ptr);
*out_len = single_row_predictor->num_pred_in_one_row;
}
Predictor CreatePredictor(int start_iteration, int num_iteration, int predict_type, int ncol, const Config& config) const {
if (!config.predict_disable_shape_check && ncol != boosting_->MaxFeatureIdx() + 1) {
Log::Fatal("The number of features in data (%d) is not the same as it was in training data (%d).\n" \
"You can set ``predict_disable_shape_check=true`` to discard this error, but please be aware what you are doing.", ncol, boosting_->MaxFeatureIdx() + 1);
}
bool is_predict_leaf = false;
bool is_raw_score = false;
bool predict_contrib = false;
if (predict_type == C_API_PREDICT_LEAF_INDEX) {
is_predict_leaf = true;
} else if (predict_type == C_API_PREDICT_RAW_SCORE) {
is_raw_score = true;
} else if (predict_type == C_API_PREDICT_CONTRIB) {
predict_contrib = true;
} else {
is_raw_score = false;
}
return Predictor(boosting_.get(), start_iteration, num_iteration, is_raw_score, is_predict_leaf, predict_contrib,
config.pred_early_stop, config.pred_early_stop_freq, config.pred_early_stop_margin);
}
void Predict(int start_iteration, int num_iteration, int predict_type, int nrow, int ncol,
std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun,
const Config& config,
double* out_result, int64_t* out_len) const {
SHARED_LOCK(mutex_);
auto predictor = CreatePredictor(start_iteration, num_iteration, predict_type, ncol, config);
bool is_predict_leaf = false;
bool predict_contrib = false;
if (predict_type == C_API_PREDICT_LEAF_INDEX) {
is_predict_leaf = true;
} else if (predict_type == C_API_PREDICT_CONTRIB) {
predict_contrib = true;
}
int64_t num_pred_in_one_row = boosting_->NumPredictOneRow(start_iteration, num_iteration, is_predict_leaf, predict_contrib);
auto pred_fun = predictor.GetPredictFunction();
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nrow; ++i) {
OMP_LOOP_EX_BEGIN();
auto one_row = get_row_fun(i);
auto pred_wrt_ptr = out_result + static_cast<size_t>(num_pred_in_one_row) * i;
pred_fun(one_row, pred_wrt_ptr);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
*out_len = num_pred_in_one_row * nrow;
}
void PredictSparse(int start_iteration, int num_iteration, int predict_type, int64_t nrow, int ncol,
std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun,
const Config& config, int64_t* out_elements_size,
std::vector<std::vector<std::unordered_map<int, double>>>* agg_ptr,
int32_t** out_indices, void** out_data, int data_type,
bool* is_data_float32_ptr, int num_matrices) const {
auto predictor = CreatePredictor(start_iteration, num_iteration, predict_type, ncol, config);
auto pred_sparse_fun = predictor.GetPredictSparseFunction();
std::vector<std::vector<std::unordered_map<int, double>>>& agg = *agg_ptr;
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int64_t i = 0; i < nrow; ++i) {
OMP_LOOP_EX_BEGIN();
auto one_row = get_row_fun(i);
agg[i] = std::vector<std::unordered_map<int, double>>(num_matrices);
pred_sparse_fun(one_row, &agg[i]);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
// calculate the nonzero data and indices size
int64_t elements_size = 0;
for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
auto row_vector = agg[i];
for (int j = 0; j < static_cast<int>(row_vector.size()); ++j) {
elements_size += static_cast<int64_t>(row_vector[j].size());
}
}
*out_elements_size = elements_size;
*is_data_float32_ptr = false;
// allocate data and indices arrays
if (data_type == C_API_DTYPE_FLOAT32) {
*out_data = new float[elements_size];
*is_data_float32_ptr = true;
} else if (data_type == C_API_DTYPE_FLOAT64) {
*out_data = new double[elements_size];
} else {
Log::Fatal("Unknown data type in PredictSparse");
return;
}
*out_indices = new int32_t[elements_size];
}
void PredictSparseCSR(int start_iteration, int num_iteration, int predict_type, int64_t nrow, int ncol,
std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun,
const Config& config,
int64_t* out_len, void** out_indptr, int indptr_type,
int32_t** out_indices, void** out_data, int data_type) const {
SHARED_LOCK(mutex_);
// Get the number of trees per iteration (for multiclass scenario we output multiple sparse matrices)
int num_matrices = boosting_->NumModelPerIteration();
bool is_indptr_int32 = false;
bool is_data_float32 = false;
int64_t indptr_size = (nrow + 1) * num_matrices;
if (indptr_type == C_API_DTYPE_INT32) {
*out_indptr = new int32_t[indptr_size];
is_indptr_int32 = true;
} else if (indptr_type == C_API_DTYPE_INT64) {
*out_indptr = new int64_t[indptr_size];
} else {
Log::Fatal("Unknown indptr type in PredictSparseCSR");
return;
}
// aggregated per row feature contribution results
std::vector<std::vector<std::unordered_map<int, double>>> agg(nrow);
int64_t elements_size = 0;
PredictSparse(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun, config, &elements_size, &agg,
out_indices, out_data, data_type, &is_data_float32, num_matrices);
std::vector<int> row_sizes(num_matrices * nrow);
std::vector<int64_t> row_matrix_offsets(num_matrices * nrow);
std::vector<int64_t> matrix_offsets(num_matrices);
int64_t row_vector_cnt = 0;
for (int m = 0; m < num_matrices; ++m) {
for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
auto row_vector = agg[i];
auto row_vector_size = row_vector[m].size();
// keep track of the row_vector sizes for parallelization
row_sizes[row_vector_cnt] = static_cast<int>(row_vector_size);
if (i == 0) {
row_matrix_offsets[row_vector_cnt] = 0;
} else {
row_matrix_offsets[row_vector_cnt] = static_cast<int64_t>(row_sizes[row_vector_cnt - 1] + row_matrix_offsets[row_vector_cnt - 1]);
}
row_vector_cnt++;
}
if (m == 0) {
matrix_offsets[m] = 0;
}
if (m + 1 < num_matrices) {
matrix_offsets[m + 1] = static_cast<int64_t>(matrix_offsets[m] + row_matrix_offsets[row_vector_cnt - 1] + row_sizes[row_vector_cnt - 1]);
}
}
// copy vector results to output for each row
int64_t indptr_index = 0;
for (int m = 0; m < num_matrices; ++m) {
if (is_indptr_int32) {
(reinterpret_cast<int32_t*>(*out_indptr))[indptr_index] = 0;
} else {
(reinterpret_cast<int64_t*>(*out_indptr))[indptr_index] = 0;
}
indptr_index++;
int64_t matrix_start_index = m * static_cast<int64_t>(agg.size());
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
OMP_LOOP_EX_BEGIN();
auto row_vector = agg[i];
int64_t row_start_index = matrix_start_index + i;
int64_t element_index = row_matrix_offsets[row_start_index] + matrix_offsets[m];
int64_t indptr_loop_index = indptr_index + i;
for (auto it = row_vector[m].begin(); it != row_vector[m].end(); ++it) {
(*out_indices)[element_index] = it->first;
if (is_data_float32) {
(reinterpret_cast<float*>(*out_data))[element_index] = static_cast<float>(it->second);
} else {
(reinterpret_cast<double*>(*out_data))[element_index] = it->second;
}
element_index++;
}
int64_t indptr_value = row_matrix_offsets[row_start_index] + row_sizes[row_start_index];
if (is_indptr_int32) {
(reinterpret_cast<int32_t*>(*out_indptr))[indptr_loop_index] = static_cast<int32_t>(indptr_value);
} else {
(reinterpret_cast<int64_t*>(*out_indptr))[indptr_loop_index] = indptr_value;
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
indptr_index += static_cast<int64_t>(agg.size());
}
out_len[0] = elements_size;
out_len[1] = indptr_size;
}
void PredictSparseCSC(int start_iteration, int num_iteration, int predict_type, int64_t nrow, int ncol,
std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun,
const Config& config,
int64_t* out_len, void** out_col_ptr, int col_ptr_type,
int32_t** out_indices, void** out_data, int data_type) const {
SHARED_LOCK(mutex_);
// Get the number of trees per iteration (for multiclass scenario we output multiple sparse matrices)
int num_matrices = boosting_->NumModelPerIteration();
auto predictor = CreatePredictor(start_iteration, num_iteration, predict_type, ncol, config);
auto pred_sparse_fun = predictor.GetPredictSparseFunction();
bool is_col_ptr_int32 = false;
bool is_data_float32 = false;
int num_output_cols = ncol + 1;
int col_ptr_size = (num_output_cols + 1) * num_matrices;
if (col_ptr_type == C_API_DTYPE_INT32) {
*out_col_ptr = new int32_t[col_ptr_size];
is_col_ptr_int32 = true;
} else if (col_ptr_type == C_API_DTYPE_INT64) {
*out_col_ptr = new int64_t[col_ptr_size];
} else {
Log::Fatal("Unknown col_ptr type in PredictSparseCSC");
return;
}
// aggregated per row feature contribution results
std::vector<std::vector<std::unordered_map<int, double>>> agg(nrow);
int64_t elements_size = 0;
PredictSparse(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun, config, &elements_size, &agg,
out_indices, out_data, data_type, &is_data_float32, num_matrices);
// calculate number of elements per column to construct
// the CSC matrix with random access
std::vector<std::vector<int64_t>> column_sizes(num_matrices);
for (int m = 0; m < num_matrices; ++m) {
column_sizes[m] = std::vector<int64_t>(num_output_cols, 0);
for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
auto row_vector = agg[i];
for (auto it = row_vector[m].begin(); it != row_vector[m].end(); ++it) {
column_sizes[m][it->first] += 1;
}
}
}
// keep track of column counts
std::vector<std::vector<int64_t>> column_counts(num_matrices);
// keep track of beginning index for each column
std::vector<std::vector<int64_t>> column_start_indices(num_matrices);
// keep track of beginning index for each matrix
std::vector<int64_t> matrix_start_indices(num_matrices, 0);
int col_ptr_index = 0;
for (int m = 0; m < num_matrices; ++m) {
int64_t col_ptr_value = 0;
column_start_indices[m] = std::vector<int64_t>(num_output_cols, 0);
column_counts[m] = std::vector<int64_t>(num_output_cols, 0);
if (is_col_ptr_int32) {
(reinterpret_cast<int32_t*>(*out_col_ptr))[col_ptr_index] = static_cast<int32_t>(col_ptr_value);
} else {
(reinterpret_cast<int64_t*>(*out_col_ptr))[col_ptr_index] = col_ptr_value;
}
col_ptr_index++;
for (int64_t i = 1; i < static_cast<int64_t>(column_sizes[m].size()); ++i) {
column_start_indices[m][i] = column_sizes[m][i - 1] + column_start_indices[m][i - 1];
if (is_col_ptr_int32) {
(reinterpret_cast<int32_t*>(*out_col_ptr))[col_ptr_index] = static_cast<int32_t>(column_start_indices[m][i]);
} else {
(reinterpret_cast<int64_t*>(*out_col_ptr))[col_ptr_index] = column_start_indices[m][i];
}
col_ptr_index++;
}
int64_t last_elem_index = static_cast<int64_t>(column_sizes[m].size()) - 1;
int64_t last_column_start_index = column_start_indices[m][last_elem_index];
int64_t last_column_size = column_sizes[m][last_elem_index];
if (is_col_ptr_int32) {
(reinterpret_cast<int32_t*>(*out_col_ptr))[col_ptr_index] = static_cast<int32_t>(last_column_start_index + last_column_size);
} else {
(reinterpret_cast<int64_t*>(*out_col_ptr))[col_ptr_index] = last_column_start_index + last_column_size;
}
if (m + 1 < num_matrices) {
matrix_start_indices[m + 1] = matrix_start_indices[m] + last_column_start_index + last_column_size;
}
col_ptr_index++;
}
// Note: we parallelize across matrices instead of rows because of the column_counts[m][col_idx] increment inside the loop
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int m = 0; m < num_matrices; ++m) {
OMP_LOOP_EX_BEGIN();
for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
auto row_vector = agg[i];
for (auto it = row_vector[m].begin(); it != row_vector[m].end(); ++it) {
int64_t col_idx = it->first;
int64_t element_index = column_start_indices[m][col_idx] +
matrix_start_indices[m] +
column_counts[m][col_idx];
// store the row index
(*out_indices)[element_index] = static_cast<int32_t>(i);
// update column count
column_counts[m][col_idx]++;
if (is_data_float32) {
(reinterpret_cast<float*>(*out_data))[element_index] = static_cast<float>(it->second);
} else {
(reinterpret_cast<double*>(*out_data))[element_index] = it->second;
}
}
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
out_len[0] = elements_size;
out_len[1] = col_ptr_size;
}
void Predict(int start_iteration, int num_iteration, int predict_type, const char* data_filename,
int data_has_header, const Config& config,
const char* result_filename) const {
SHARED_LOCK(mutex_)
bool is_predict_leaf = false;
bool is_raw_score = false;
bool predict_contrib = false;
if (predict_type == C_API_PREDICT_LEAF_INDEX) {
is_predict_leaf = true;
} else if (predict_type == C_API_PREDICT_RAW_SCORE) {
is_raw_score = true;
} else if (predict_type == C_API_PREDICT_CONTRIB) {
predict_contrib = true;
} else {
is_raw_score = false;
}
Predictor predictor(boosting_.get(), start_iteration, num_iteration, is_raw_score, is_predict_leaf, predict_contrib,
config.pred_early_stop, config.pred_early_stop_freq, config.pred_early_stop_margin);
bool bool_data_has_header = data_has_header > 0 ? true : false;
predictor.Predict(data_filename, result_filename, bool_data_has_header, config.predict_disable_shape_check,
config.precise_float_parser);
}
void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) const {
boosting_->GetPredictAt(data_idx, out_result, out_len);
}
void SaveModelToFile(int start_iteration, int num_iteration, int feature_importance_type, const char* filename) const {
boosting_->SaveModelToFile(start_iteration, num_iteration, feature_importance_type, filename);
}
void LoadModelFromString(const char* model_str) {
size_t len = std::strlen(model_str);
boosting_->LoadModelFromString(model_str, len);
}
std::string SaveModelToString(int start_iteration, int num_iteration,
int feature_importance_type) const {
return boosting_->SaveModelToString(start_iteration,
num_iteration, feature_importance_type);
}
std::string DumpModel(int start_iteration, int num_iteration,
int feature_importance_type) const {
return boosting_->DumpModel(start_iteration, num_iteration,
feature_importance_type);
}
std::vector<double> FeatureImportance(int num_iteration, int importance_type) const {
return boosting_->FeatureImportance(num_iteration, importance_type);
}
double UpperBoundValue() const {
SHARED_LOCK(mutex_)
return boosting_->GetUpperBoundValue();
}
double LowerBoundValue() const {
SHARED_LOCK(mutex_)
return boosting_->GetLowerBoundValue();
}
double GetLeafValue(int tree_idx, int leaf_idx) const {
SHARED_LOCK(mutex_)
return dynamic_cast<GBDTBase*>(boosting_.get())->GetLeafValue(tree_idx, leaf_idx);
}
void SetLeafValue(int tree_idx, int leaf_idx, double val) {
UNIQUE_LOCK(mutex_)
dynamic_cast<GBDTBase*>(boosting_.get())->SetLeafValue(tree_idx, leaf_idx, val);
}
void ShuffleModels(int start_iter, int end_iter) {
UNIQUE_LOCK(mutex_)
boosting_->ShuffleModels(start_iter, end_iter);
}
int GetEvalCounts() const {
SHARED_LOCK(mutex_)
int ret = 0;
for (const auto& metric : train_metric_) {
ret += static_cast<int>(metric->GetName().size());
}
return ret;
}
int GetEvalNames(char** out_strs, const int len, const size_t buffer_len, size_t *out_buffer_len) const {
SHARED_LOCK(mutex_)
*out_buffer_len = 0;
int idx = 0;
for (const auto& metric : train_metric_) {
for (const auto& name : metric->GetName()) {
if (idx < len) {
std::memcpy(out_strs[idx], name.c_str(), std::min(name.size() + 1, buffer_len));
out_strs[idx][buffer_len - 1] = '\0';
}
*out_buffer_len = std::max(name.size() + 1, *out_buffer_len);
++idx;
}
}
return idx;
}
int GetFeatureNames(char** out_strs, const int len, const size_t buffer_len, size_t *out_buffer_len) const {
SHARED_LOCK(mutex_)
*out_buffer_len = 0;
int idx = 0;
for (const auto& name : boosting_->FeatureNames()) {
if (idx < len) {
std::memcpy(out_strs[idx], name.c_str(), std::min(name.size() + 1, buffer_len));
out_strs[idx][buffer_len - 1] = '\0';
}
*out_buffer_len = std::max(name.size() + 1, *out_buffer_len);
++idx;
}
return idx;
}
const Boosting* GetBoosting() const { return boosting_.get(); }
private:
const Dataset* train_data_;
std::unique_ptr<Boosting> boosting_;
std::unique_ptr<SingleRowPredictor> single_row_predictor_[PREDICTOR_TYPES];
/*! \brief All configs */
Config config_;
/*! \brief Metric for training data */
std::vector<std::unique_ptr<Metric>> train_metric_;
/*! \brief Metrics for validation data */
std::vector<std::vector<std::unique_ptr<Metric>>> valid_metrics_;
/*! \brief Training objective function */
std::unique_ptr<ObjectiveFunction> objective_fun_;
/*! \brief mutex for threading safe call */
mutable yamc::alternate::shared_mutex mutex_;
};
} // namespace LightGBM
// explicitly declare symbols from LightGBM namespace
using LightGBM::AllgatherFunction;
using LightGBM::Booster;
using LightGBM::Common::CheckElementsIntervalClosed;
using LightGBM::Common::RemoveQuotationSymbol;
using LightGBM::Common::Vector2Ptr;
using LightGBM::Common::VectorSize;
using LightGBM::Config;
using LightGBM::data_size_t;
using LightGBM::Dataset;
using LightGBM::DatasetLoader;
using LightGBM::kZeroThreshold;
using LightGBM::LGBM_APIHandleException;
using LightGBM::Log;
using LightGBM::Network;
using LightGBM::Random;
using LightGBM::ReduceScatterFunction;
// some help functions used to convert data
std::function<std::vector<double>(int row_idx)>
RowFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major);
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major);
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseRows(const void** data, int num_col, int data_type);
template<typename T>
std::function<std::vector<std::pair<int, double>>(T idx)>
RowFunctionFromCSR(const void* indptr, int indptr_type, const int32_t* indices,
const void* data, int data_type, int64_t nindptr, int64_t nelem);
// Row iterator of on column for CSC matrix
class CSC_RowIterator {
public:
CSC_RowIterator(const void* col_ptr, int col_ptr_type, const int32_t* indices,
const void* data, int data_type, int64_t ncol_ptr, int64_t nelem, int col_idx);
~CSC_RowIterator() {}
// return value at idx, only can access by ascent order
double Get(int idx);
// return next non-zero pair, if index < 0, means no more data
std::pair<int, double> NextNonZero();
private:
int nonzero_idx_ = 0;
int cur_idx_ = -1;
double cur_val_ = 0.0f;
bool is_end_ = false;
std::function<std::pair<int, double>(int idx)> iter_fun_;
};
// start of c_api functions
const char* LGBM_GetLastError() {
return LastErrorMsg();
}
int LGBM_RegisterLogCallback(void (*callback)(const char*)) {
API_BEGIN();
Log::ResetCallBack(callback);
API_END();
}
static inline int SampleCount(int32_t total_nrow, const Config& config) {
return static_cast<int>(total_nrow < config.bin_construct_sample_cnt ? total_nrow : config.bin_construct_sample_cnt);
}
static inline std::vector<int32_t> CreateSampleIndices(int32_t total_nrow, const Config& config) {
Random rand(config.data_random_seed);
int sample_cnt = SampleCount(total_nrow, config);
return rand.Sample(total_nrow, sample_cnt);
}
int LGBM_GetSampleCount(int32_t num_total_row,
const char* parameters,
int* out) {
API_BEGIN();
if (out == nullptr) {
Log::Fatal("LGBM_GetSampleCount output is nullptr");
}
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
*out = SampleCount(num_total_row, config);
API_END();
}
int LGBM_SampleIndices(int32_t num_total_row,
const char* parameters,
void* out,
int32_t* out_len) {
// This API is to keep python binding's behavior the same with C++ implementation.
// Sample count, random seed etc. should be provided in parameters.
API_BEGIN();
if (out == nullptr) {
Log::Fatal("LGBM_SampleIndices output is nullptr");
}
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
auto sample_indices = CreateSampleIndices(num_total_row, config);
memcpy(out, sample_indices.data(), sizeof(int32_t) * sample_indices.size());
*out_len = static_cast<int32_t>(sample_indices.size());
API_END();
}
int LGBM_DatasetCreateFromFile(const char* filename,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
DatasetLoader loader(config, nullptr, 1, filename);
if (reference == nullptr) {
if (Network::num_machines() == 1) {
*out = loader.LoadFromFile(filename);
} else {
*out = loader.LoadFromFile(filename, Network::rank(), Network::num_machines());
}
} else {
*out = loader.LoadFromFileAlignWithOtherDataset(filename,
reinterpret_cast<const Dataset*>(reference));
}
API_END();
}
int LGBM_DatasetCreateFromSampledColumn(double** sample_data,
int** sample_indices,
int32_t ncol,
const int* num_per_col,
int32_t num_sample_row,
int32_t num_total_row,
const char* parameters,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
DatasetLoader loader(config, nullptr, 1, nullptr);
*out = loader.ConstructFromSampleData(sample_data, sample_indices, ncol, num_per_col,
num_sample_row,
static_cast<data_size_t>(num_total_row));
API_END();
}
int LGBM_DatasetCreateByReference(const DatasetHandle reference,
int64_t num_total_row,
DatasetHandle* out) {
API_BEGIN();
std::unique_ptr<Dataset> ret;
ret.reset(new Dataset(static_cast<data_size_t>(num_total_row)));
ret->CreateValid(reinterpret_cast<const Dataset*>(reference));
*out = ret.release();
API_END();
}
int LGBM_DatasetPushRows(DatasetHandle dataset,
const void* data,
int data_type,
int32_t nrow,
int32_t ncol,
int32_t start_row) {
API_BEGIN();
auto p_dataset = reinterpret_cast<Dataset*>(dataset);
auto get_row_fun = RowFunctionFromDenseMatric(data, nrow, ncol, data_type, 1);
if (p_dataset->has_raw()) {
p_dataset->ResizeRaw(p_dataset->num_numeric_features() + nrow);
}
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nrow; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
auto one_row = get_row_fun(i);
p_dataset->PushOneRow(tid, start_row + i, one_row);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
if (start_row + nrow == p_dataset->num_data()) {
p_dataset->FinishLoad();
}
API_END();
}
int LGBM_DatasetPushRowsByCSR(DatasetHandle dataset,
const void* indptr,
int indptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t nindptr,
int64_t nelem,
int64_t,
int64_t start_row) {
API_BEGIN();
auto p_dataset = reinterpret_cast<Dataset*>(dataset);
auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
int32_t nrow = static_cast<int32_t>(nindptr - 1);
if (p_dataset->has_raw()) {
p_dataset->ResizeRaw(p_dataset->num_numeric_features() + nrow);
}
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nrow; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
auto one_row = get_row_fun(i);
p_dataset->PushOneRow(tid, static_cast<data_size_t>(start_row + i), one_row);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
if (start_row + nrow == static_cast<int64_t>(p_dataset->num_data())) {
p_dataset->FinishLoad();
}
API_END();
}
int LGBM_DatasetCreateFromMat(const void* data,
int data_type,
int32_t nrow,
int32_t ncol,
int is_row_major,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
return LGBM_DatasetCreateFromMats(1,
&data,
data_type,
&nrow,
ncol,
is_row_major,
parameters,
reference,
out);
}
int LGBM_DatasetCreateFromMats(int32_t nmat,
const void** data,
int data_type,
int32_t* nrow,
int32_t ncol,
int is_row_major,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
std::unique_ptr<Dataset> ret;
int32_t total_nrow = 0;
for (int j = 0; j < nmat; ++j) {
total_nrow += nrow[j];
}
std::vector<std::function<std::vector<double>(int row_idx)>> get_row_fun;
for (int j = 0; j < nmat; ++j) {
get_row_fun.push_back(RowFunctionFromDenseMatric(data[j], nrow[j], ncol, data_type, is_row_major));
}
if (reference == nullptr) {
// sample data first
auto sample_indices = CreateSampleIndices(total_nrow, config);
int sample_cnt = static_cast<int>(sample_indices.size());
std::vector<std::vector<double>> sample_values(ncol);
std::vector<std::vector<int>> sample_idx(ncol);
int offset = 0;
int j = 0;
for (size_t i = 0; i < sample_indices.size(); ++i) {
auto idx = sample_indices[i];
while ((idx - offset) >= nrow[j]) {
offset += nrow[j];
++j;
}
auto row = get_row_fun[j](static_cast<int>(idx - offset));
for (size_t k = 0; k < row.size(); ++k) {
if (std::fabs(row[k]) > kZeroThreshold || std::isnan(row[k])) {
sample_values[k].emplace_back(row[k]);
sample_idx[k].emplace_back(static_cast<int>(i));
}
}
}
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
Vector2Ptr<int>(&sample_idx).data(),
ncol,
VectorSize<double>(sample_values).data(),
sample_cnt, total_nrow));
} else {
ret.reset(new Dataset(total_nrow));
ret->CreateValid(
reinterpret_cast<const Dataset*>(reference));
if (ret->has_raw()) {
ret->ResizeRaw(total_nrow);
}
}
int32_t start_row = 0;
for (int j = 0; j < nmat; ++j) {
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nrow[j]; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
auto one_row = get_row_fun[j](i);
ret->PushOneRow(tid, start_row + i, one_row);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
start_row += nrow[j];
}
ret->FinishLoad();
*out = ret.release();
API_END();
}
int LGBM_DatasetCreateFromCSR(const void* indptr,
int indptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t nindptr,
int64_t nelem,
int64_t num_col,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
if (num_col <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
std::unique_ptr<Dataset> ret;
auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
int32_t nrow = static_cast<int32_t>(nindptr - 1);
if (reference == nullptr) {
// sample data first
auto sample_indices = CreateSampleIndices(nrow, config);
int sample_cnt = static_cast<int>(sample_indices.size());
std::vector<std::vector<double>> sample_values(num_col);
std::vector<std::vector<int>> sample_idx(num_col);
for (size_t i = 0; i < sample_indices.size(); ++i) {
auto idx = sample_indices[i];
auto row = get_row_fun(static_cast<int>(idx));
for (std::pair<int, double>& inner_data : row) {
CHECK_LT(inner_data.first, num_col);
if (std::fabs(inner_data.second) > kZeroThreshold || std::isnan(inner_data.second)) {
sample_values[inner_data.first].emplace_back(inner_data.second);
sample_idx[inner_data.first].emplace_back(static_cast<int>(i));
}
}
}
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
Vector2Ptr<int>(&sample_idx).data(),
static_cast<int>(num_col),
VectorSize<double>(sample_values).data(),
sample_cnt, nrow));
} else {
ret.reset(new Dataset(nrow));
ret->CreateValid(
reinterpret_cast<const Dataset*>(reference));
if (ret->has_raw()) {
ret->ResizeRaw(nrow);
}
}
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nindptr - 1; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
auto one_row = get_row_fun(i);
ret->PushOneRow(tid, i, one_row);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
ret->FinishLoad();
*out = ret.release();
API_END();
}
int LGBM_DatasetCreateFromCSRFunc(void* get_row_funptr,
int num_rows,
int64_t num_col,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
if (num_col <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto get_row_fun = *static_cast<std::function<void(int idx, std::vector<std::pair<int, double>>&)>*>(get_row_funptr);
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
std::unique_ptr<Dataset> ret;
int32_t nrow = num_rows;
if (reference == nullptr) {
// sample data first
auto sample_indices = CreateSampleIndices(nrow, config);
int sample_cnt = static_cast<int>(sample_indices.size());
std::vector<std::vector<double>> sample_values(num_col);
std::vector<std::vector<int>> sample_idx(num_col);
// local buffer to re-use memory
std::vector<std::pair<int, double>> buffer;
for (size_t i = 0; i < sample_indices.size(); ++i) {
auto idx = sample_indices[i];
get_row_fun(static_cast<int>(idx), buffer);
for (std::pair<int, double>& inner_data : buffer) {
CHECK_LT(inner_data.first, num_col);
if (std::fabs(inner_data.second) > kZeroThreshold || std::isnan(inner_data.second)) {
sample_values[inner_data.first].emplace_back(inner_data.second);
sample_idx[inner_data.first].emplace_back(static_cast<int>(i));
}
}
}
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
Vector2Ptr<int>(&sample_idx).data(),
static_cast<int>(num_col),
VectorSize<double>(sample_values).data(),
sample_cnt, nrow));
} else {
ret.reset(new Dataset(nrow));
ret->CreateValid(
reinterpret_cast<const Dataset*>(reference));
if (ret->has_raw()) {
ret->ResizeRaw(nrow);
}
}
OMP_INIT_EX();
std::vector<std::pair<int, double>> thread_buffer;
#pragma omp parallel for schedule(static) private(thread_buffer)
for (int i = 0; i < num_rows; ++i) {
OMP_LOOP_EX_BEGIN();
{
const int tid = omp_get_thread_num();
get_row_fun(i, thread_buffer);
ret->PushOneRow(tid, i, thread_buffer);
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
ret->FinishLoad();
*out = ret.release();
API_END();
}
int LGBM_DatasetCreateFromCSC(const void* col_ptr,
int col_ptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t ncol_ptr,
int64_t nelem,
int64_t num_row,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
std::unique_ptr<Dataset> ret;
int32_t nrow = static_cast<int32_t>(num_row);
if (reference == nullptr) {
// sample data first
auto sample_indices = CreateSampleIndices(nrow, config);
int sample_cnt = static_cast<int>(sample_indices.size());
std::vector<std::vector<double>> sample_values(ncol_ptr - 1);
std::vector<std::vector<int>> sample_idx(ncol_ptr - 1);
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(sample_values.size()); ++i) {
OMP_LOOP_EX_BEGIN();
CSC_RowIterator col_it(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, i);
for (int j = 0; j < sample_cnt; j++) {
auto val = col_it.Get(sample_indices[j]);
if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
sample_values[i].emplace_back(val);
sample_idx[i].emplace_back(j);
}
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
Vector2Ptr<int>(&sample_idx).data(),
static_cast<int>(sample_values.size()),
VectorSize<double>(sample_values).data(),
sample_cnt, nrow));
} else {
ret.reset(new Dataset(nrow));
ret->CreateValid(
reinterpret_cast<const Dataset*>(reference));
}
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < ncol_ptr - 1; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
int feature_idx = ret->InnerFeatureIndex(i);
if (feature_idx < 0) { continue; }
int group = ret->Feature2Group(feature_idx);
int sub_feature = ret->Feture2SubFeature(feature_idx);
CSC_RowIterator col_it(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, i);
auto bin_mapper = ret->FeatureBinMapper(feature_idx);
if (bin_mapper->GetDefaultBin() == bin_mapper->GetMostFreqBin()) {
int row_idx = 0;
while (row_idx < nrow) {
auto pair = col_it.NextNonZero();
row_idx = pair.first;
// no more data
if (row_idx < 0) { break; }
ret->PushOneData(tid, row_idx, group, feature_idx, sub_feature, pair.second);
}
} else {
for (int row_idx = 0; row_idx < nrow; ++row_idx) {
auto val = col_it.Get(row_idx);
ret->PushOneData(tid, row_idx, group, feature_idx, sub_feature, val);
}
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
ret->FinishLoad();
*out = ret.release();
API_END();
}
int LGBM_DatasetGetSubset(
const DatasetHandle handle,
const int32_t* used_row_indices,
int32_t num_used_row_indices,
const char* parameters,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
auto full_dataset = reinterpret_cast<const Dataset*>(handle);
CHECK_GT(num_used_row_indices, 0);
const int32_t lower = 0;
const int32_t upper = full_dataset->num_data() - 1;
CheckElementsIntervalClosed(used_row_indices, lower, upper, num_used_row_indices, "Used indices of subset");
if (!std::is_sorted(used_row_indices, used_row_indices + num_used_row_indices)) {
Log::Fatal("used_row_indices should be sorted in Subset");
}
auto ret = std::unique_ptr<Dataset>(new Dataset(num_used_row_indices));
ret->CopyFeatureMapperFrom(full_dataset);
ret->CopySubrow(full_dataset, used_row_indices, num_used_row_indices, true);
*out = ret.release();
API_END();
}
int LGBM_DatasetSetFeatureNames(
DatasetHandle handle,
const char** feature_names,
int num_feature_names) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
std::vector<std::string> feature_names_str;
for (int i = 0; i < num_feature_names; ++i) {
feature_names_str.emplace_back(feature_names[i]);
}
dataset->set_feature_names(feature_names_str);
API_END();
}
int LGBM_DatasetGetFeatureNames(
DatasetHandle handle,
const int len,
int* num_feature_names,
const size_t buffer_len,
size_t* out_buffer_len,
char** feature_names) {
API_BEGIN();
*out_buffer_len = 0;
auto dataset = reinterpret_cast<Dataset*>(handle);
auto inside_feature_name = dataset->feature_names();
*num_feature_names = static_cast<int>(inside_feature_name.size());
for (int i = 0; i < *num_feature_names; ++i) {
if (i < len) {
std::memcpy(feature_names[i], inside_feature_name[i].c_str(), std::min(inside_feature_name[i].size() + 1, buffer_len));
feature_names[i][buffer_len - 1] = '\0';
}
*out_buffer_len = std::max(inside_feature_name[i].size() + 1, *out_buffer_len);
}
API_END();
}
#ifdef _MSC_VER
#pragma warning(disable : 4702)
#endif
int LGBM_DatasetFree(DatasetHandle handle) {
API_BEGIN();
delete reinterpret_cast<Dataset*>(handle);
API_END();
}
int LGBM_DatasetSaveBinary(DatasetHandle handle,
const char* filename) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
dataset->SaveBinaryFile(filename);
API_END();
}
int LGBM_DatasetDumpText(DatasetHandle handle,
const char* filename) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
dataset->DumpTextFile(filename);
API_END();
}
int LGBM_DatasetSetField(DatasetHandle handle,
const char* field_name,
const void* field_data,
int num_element,
int type) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
bool is_success = false;
if (type == C_API_DTYPE_FLOAT32) {
is_success = dataset->SetFloatField(field_name, reinterpret_cast<const float*>(field_data), static_cast<int32_t>(num_element));
} else if (type == C_API_DTYPE_INT32) {
is_success = dataset->SetIntField(field_name, reinterpret_cast<const int*>(field_data), static_cast<int32_t>(num_element));
} else if (type == C_API_DTYPE_FLOAT64) {
is_success = dataset->SetDoubleField(field_name, reinterpret_cast<const double*>(field_data), static_cast<int32_t>(num_element));
}
if (!is_success) { Log::Fatal("Input data type error or field not found"); }
API_END();
}
int LGBM_DatasetGetField(DatasetHandle handle,
const char* field_name,
int* out_len,
const void** out_ptr,
int* out_type) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
bool is_success = false;
if (dataset->GetFloatField(field_name, out_len, reinterpret_cast<const float**>(out_ptr))) {
*out_type = C_API_DTYPE_FLOAT32;
is_success = true;
} else if (dataset->GetIntField(field_name, out_len, reinterpret_cast<const int**>(out_ptr))) {
*out_type = C_API_DTYPE_INT32;
is_success = true;
} else if (dataset->GetDoubleField(field_name, out_len, reinterpret_cast<const double**>(out_ptr))) {
*out_type = C_API_DTYPE_FLOAT64;
is_success = true;
}
if (!is_success) { Log::Fatal("Field not found"); }
if (*out_ptr == nullptr) { *out_len = 0; }
API_END();
}
int LGBM_DatasetUpdateParamChecking(const char* old_parameters, const char* new_parameters) {
API_BEGIN();
auto old_param = Config::Str2Map(old_parameters);
Config old_config;
old_config.Set(old_param);
auto new_param = Config::Str2Map(new_parameters);
Booster::CheckDatasetResetConfig(old_config, new_param);
API_END();
}
int LGBM_DatasetGetNumData(DatasetHandle handle,
int* out) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
*out = dataset->num_data();
API_END();
}
int LGBM_DatasetGetNumFeature(DatasetHandle handle,
int* out) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
*out = dataset->num_total_features();
API_END();
}
int LGBM_DatasetAddFeaturesFrom(DatasetHandle target,
DatasetHandle source) {
API_BEGIN();
auto target_d = reinterpret_cast<Dataset*>(target);
auto source_d = reinterpret_cast<Dataset*>(source);
target_d->AddFeaturesFrom(source_d);
API_END();
}
// ---- start of booster
int LGBM_BoosterCreate(const DatasetHandle train_data,
const char* parameters,
BoosterHandle* out) {
API_BEGIN();
const Dataset* p_train_data = reinterpret_cast<const Dataset*>(train_data);
auto ret = std::unique_ptr<Booster>(new Booster(p_train_data, parameters));
*out = ret.release();
API_END();
}
int LGBM_BoosterCreateFromModelfile(
const char* filename,
int* out_num_iterations,
BoosterHandle* out) {
API_BEGIN();
auto ret = std::unique_ptr<Booster>(new Booster(filename));
*out_num_iterations = ret->GetBoosting()->GetCurrentIteration();
*out = ret.release();
API_END();
}
int LGBM_BoosterLoadModelFromString(
const char* model_str,
int* out_num_iterations,
BoosterHandle* out) {
API_BEGIN();
auto ret = std::unique_ptr<Booster>(new Booster(nullptr));
ret->LoadModelFromString(model_str);
*out_num_iterations = ret->GetBoosting()->GetCurrentIteration();
*out = ret.release();
API_END();
}
#ifdef _MSC_VER
#pragma warning(disable : 4702)
#endif
int LGBM_BoosterFree(BoosterHandle handle) {
API_BEGIN();
delete reinterpret_cast<Booster*>(handle);
API_END();
}
int LGBM_BoosterShuffleModels(BoosterHandle handle, int start_iter, int end_iter) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->ShuffleModels(start_iter, end_iter);
API_END();
}
int LGBM_BoosterMerge(BoosterHandle handle,
BoosterHandle other_handle) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
Booster* ref_other_booster = reinterpret_cast<Booster*>(other_handle);
ref_booster->MergeFrom(ref_other_booster);
API_END();
}
int LGBM_BoosterAddValidData(BoosterHandle handle,
const DatasetHandle valid_data) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
const Dataset* p_dataset = reinterpret_cast<const Dataset*>(valid_data);
ref_booster->AddValidData(p_dataset);
API_END();
}
int LGBM_BoosterResetTrainingData(BoosterHandle handle,
const DatasetHandle train_data) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
const Dataset* p_dataset = reinterpret_cast<const Dataset*>(train_data);
ref_booster->ResetTrainingData(p_dataset);
API_END();
}
int LGBM_BoosterResetParameter(BoosterHandle handle, const char* parameters) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->ResetConfig(parameters);
API_END();
}
int LGBM_BoosterGetNumClasses(BoosterHandle handle, int* out_len) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetBoosting()->NumberOfClasses();
API_END();
}
int LGBM_BoosterGetLinear(BoosterHandle handle, bool* out) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out = ref_booster->GetBoosting()->IsLinear();
API_END();
}
int LGBM_BoosterRefit(BoosterHandle handle, const int32_t* leaf_preds, int32_t nrow, int32_t ncol) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->Refit(leaf_preds, nrow, ncol);
API_END();
}
int LGBM_BoosterUpdateOneIter(BoosterHandle handle, int* is_finished) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
if (ref_booster->TrainOneIter()) {
*is_finished = 1;
} else {
*is_finished = 0;
}
API_END();
}
int LGBM_BoosterUpdateOneIterCustom(BoosterHandle handle,
const float* grad,
const float* hess,
int* is_finished) {
API_BEGIN();
#ifdef SCORE_T_USE_DOUBLE
(void) handle; // UNUSED VARIABLE
(void) grad; // UNUSED VARIABLE
(void) hess; // UNUSED VARIABLE
(void) is_finished; // UNUSED VARIABLE
Log::Fatal("Don't support custom loss function when SCORE_T_USE_DOUBLE is enabled");
#else
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
if (ref_booster->TrainOneIter(grad, hess)) {
*is_finished = 1;
} else {
*is_finished = 0;
}
#endif
API_END();
}
int LGBM_BoosterRollbackOneIter(BoosterHandle handle) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->RollbackOneIter();
API_END();
}
int LGBM_BoosterGetCurrentIteration(BoosterHandle handle, int* out_iteration) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_iteration = ref_booster->GetBoosting()->GetCurrentIteration();
API_END();
}
int LGBM_BoosterNumModelPerIteration(BoosterHandle handle, int* out_tree_per_iteration) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_tree_per_iteration = ref_booster->GetBoosting()->NumModelPerIteration();
API_END();
}
int LGBM_BoosterNumberOfTotalModel(BoosterHandle handle, int* out_models) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_models = ref_booster->GetBoosting()->NumberOfTotalModel();
API_END();
}
int LGBM_BoosterGetEvalCounts(BoosterHandle handle, int* out_len) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetEvalCounts();
API_END();
}
int LGBM_BoosterGetEvalNames(BoosterHandle handle,
const int len,
int* out_len,
const size_t buffer_len,
size_t* out_buffer_len,
char** out_strs) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetEvalNames(out_strs, len, buffer_len, out_buffer_len);
API_END();
}
int LGBM_BoosterGetFeatureNames(BoosterHandle handle,
const int len,
int* out_len,
const size_t buffer_len,
size_t* out_buffer_len,
char** out_strs) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetFeatureNames(out_strs, len, buffer_len, out_buffer_len);
API_END();
}
int LGBM_BoosterGetNumFeature(BoosterHandle handle, int* out_len) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetBoosting()->MaxFeatureIdx() + 1;
API_END();
}
int LGBM_BoosterGetEval(BoosterHandle handle,
int data_idx,
int* out_len,
double* out_results) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto boosting = ref_booster->GetBoosting();
auto result_buf = boosting->GetEvalAt(data_idx);
*out_len = static_cast<int>(result_buf.size());
for (size_t i = 0; i < result_buf.size(); ++i) {
(out_results)[i] = static_cast<double>(result_buf[i]);
}
API_END();
}
int LGBM_BoosterGetNumPredict(BoosterHandle handle,
int data_idx,
int64_t* out_len) {
API_BEGIN();
auto boosting = reinterpret_cast<Booster*>(handle)->GetBoosting();
*out_len = boosting->GetNumPredictAt(data_idx);
API_END();
}
int LGBM_BoosterGetPredict(BoosterHandle handle,
int data_idx,
int64_t* out_len,
double* out_result) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->GetPredictAt(data_idx, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForFile(BoosterHandle handle,
const char* data_filename,
int data_has_header,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
const char* result_filename) {
API_BEGIN();
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->Predict(start_iteration, num_iteration, predict_type, data_filename, data_has_header,
config, result_filename);
API_END();
}
int LGBM_BoosterCalcNumPredict(BoosterHandle handle,
int num_row,
int predict_type,
int start_iteration,
int num_iteration,
int64_t* out_len) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = static_cast<int64_t>(num_row) * ref_booster->GetBoosting()->NumPredictOneRow(start_iteration,
num_iteration, predict_type == C_API_PREDICT_LEAF_INDEX, predict_type == C_API_PREDICT_CONTRIB);
API_END();
}
/*!
* \brief Object to store resources meant for single-row Fast Predict methods.
*
* Meant to be used as a basic struct by the *Fast* predict methods only.
* It stores the configuration resources for reuse during prediction.
*
* Even the row function is stored. We score the instance at the same memory
* address all the time. One just replaces the feature values at that address
* and scores again with the *Fast* methods.
*/
struct FastConfig {
FastConfig(Booster *const booster_ptr,
const char *parameter,
const int predict_type_,
const int data_type_,
const int32_t num_cols) : booster(booster_ptr), predict_type(predict_type_), data_type(data_type_), ncol(num_cols) {
config.Set(Config::Str2Map(parameter));
}
Booster* const booster;
Config config;
const int predict_type;
const int data_type;
const int32_t ncol;
};
int LGBM_FastConfigFree(FastConfigHandle fastConfig) {
API_BEGIN();
delete reinterpret_cast<FastConfig*>(fastConfig);
API_END();
}
int LGBM_BoosterPredictForCSR(BoosterHandle handle,
const void* indptr,
int indptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t nindptr,
int64_t nelem,
int64_t num_col,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
if (num_col <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
int nrow = static_cast<int>(nindptr - 1);
ref_booster->Predict(start_iteration, num_iteration, predict_type, nrow, static_cast<int>(num_col), get_row_fun,
config, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictSparseOutput(BoosterHandle handle,
const void* indptr,
int indptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t nindptr,
int64_t nelem,
int64_t num_col_or_row,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
int matrix_type,
int64_t* out_len,
void** out_indptr,
int32_t** out_indices,
void** out_data) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
if (matrix_type == C_API_MATRIX_TYPE_CSR) {
if (num_col_or_row <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col_or_row >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto get_row_fun = RowFunctionFromCSR<int64_t>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
int64_t nrow = nindptr - 1;
ref_booster->PredictSparseCSR(start_iteration, num_iteration, predict_type, nrow, static_cast<int>(num_col_or_row), get_row_fun,
config, out_len, out_indptr, indptr_type, out_indices, out_data, data_type);
} else if (matrix_type == C_API_MATRIX_TYPE_CSC) {
int num_threads = OMP_NUM_THREADS();
int ncol = static_cast<int>(nindptr - 1);
std::vector<std::vector<CSC_RowIterator>> iterators(num_threads, std::vector<CSC_RowIterator>());
for (int i = 0; i < num_threads; ++i) {
for (int j = 0; j < ncol; ++j) {
iterators[i].emplace_back(indptr, indptr_type, indices, data, data_type, nindptr, nelem, j);
}
}
std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun =
[&iterators, ncol](int64_t i) {
std::vector<std::pair<int, double>> one_row;
one_row.reserve(ncol);
const int tid = omp_get_thread_num();
for (int j = 0; j < ncol; ++j) {
auto val = iterators[tid][j].Get(static_cast<int>(i));
if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
one_row.emplace_back(j, val);
}
}
return one_row;
};
ref_booster->PredictSparseCSC(start_iteration, num_iteration, predict_type, num_col_or_row, ncol, get_row_fun, config,
out_len, out_indptr, indptr_type, out_indices, out_data, data_type);
} else {
Log::Fatal("Unknown matrix type in LGBM_BoosterPredictSparseOutput");
}
API_END();
}
int LGBM_BoosterFreePredictSparse(void* indptr, int32_t* indices, void* data, int indptr_type, int data_type) {
API_BEGIN();
if (indptr_type == C_API_DTYPE_INT32) {
delete reinterpret_cast<int32_t*>(indptr);
} else if (indptr_type == C_API_DTYPE_INT64) {
delete reinterpret_cast<int64_t*>(indptr);
} else {
Log::Fatal("Unknown indptr type in LGBM_BoosterFreePredictSparse");
}
delete indices;
if (data_type == C_API_DTYPE_FLOAT32) {
delete reinterpret_cast<float*>(data);
} else if (data_type == C_API_DTYPE_FLOAT64) {
delete reinterpret_cast<double*>(data);
} else {
Log::Fatal("Unknown data type in LGBM_BoosterFreePredictSparse");
}
API_END();
}
int LGBM_BoosterPredictForCSRSingleRow(BoosterHandle handle,
const void* indptr,
int indptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t nindptr,
int64_t nelem,
int64_t num_col,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
if (num_col <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
ref_booster->SetSingleRowPredictor(start_iteration, num_iteration, predict_type, config);
ref_booster->PredictSingleRow(predict_type, static_cast<int32_t>(num_col), get_row_fun, config, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForCSRSingleRowFastInit(BoosterHandle handle,
const int predict_type,
const int start_iteration,
const int num_iteration,
const int data_type,
const int64_t num_col,
const char* parameter,
FastConfigHandle *out_fastConfig) {
API_BEGIN();
if (num_col <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto fastConfig_ptr = std::unique_ptr<FastConfig>(new FastConfig(
reinterpret_cast<Booster*>(handle),
parameter,
predict_type,
data_type,
static_cast<int32_t>(num_col)));
if (fastConfig_ptr->config.num_threads > 0) {
omp_set_num_threads(fastConfig_ptr->config.num_threads);
}
fastConfig_ptr->booster->SetSingleRowPredictor(start_iteration, num_iteration, predict_type, fastConfig_ptr->config);
*out_fastConfig = fastConfig_ptr.release();
API_END();
}
int LGBM_BoosterPredictForCSRSingleRowFast(FastConfigHandle fastConfig_handle,
const void* indptr,
const int indptr_type,
const int32_t* indices,
const void* data,
const int64_t nindptr,
const int64_t nelem,
int64_t* out_len,
double* out_result) {
API_BEGIN();
FastConfig *fastConfig = reinterpret_cast<FastConfig*>(fastConfig_handle);
auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, fastConfig->data_type, nindptr, nelem);
fastConfig->booster->PredictSingleRow(fastConfig->predict_type, fastConfig->ncol,
get_row_fun, fastConfig->config, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForCSC(BoosterHandle handle,
const void* col_ptr,
int col_ptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t ncol_ptr,
int64_t nelem,
int64_t num_row,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
int num_threads = OMP_NUM_THREADS();
int ncol = static_cast<int>(ncol_ptr - 1);
std::vector<std::vector<CSC_RowIterator>> iterators(num_threads, std::vector<CSC_RowIterator>());
for (int i = 0; i < num_threads; ++i) {
for (int j = 0; j < ncol; ++j) {
iterators[i].emplace_back(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, j);
}
}
std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun =
[&iterators, ncol](int i) {
std::vector<std::pair<int, double>> one_row;
one_row.reserve(ncol);
const int tid = omp_get_thread_num();
for (int j = 0; j < ncol; ++j) {
auto val = iterators[tid][j].Get(i);
if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
one_row.emplace_back(j, val);
}
}
return one_row;
};
ref_booster->Predict(start_iteration, num_iteration, predict_type, static_cast<int>(num_row), ncol, get_row_fun, config,
out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForMat(BoosterHandle handle,
const void* data,
int data_type,
int32_t nrow,
int32_t ncol,
int is_row_major,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowPairFunctionFromDenseMatric(data, nrow, ncol, data_type, is_row_major);
ref_booster->Predict(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun,
config, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForMatSingleRow(BoosterHandle handle,
const void* data,
int data_type,
int32_t ncol,
int is_row_major,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowPairFunctionFromDenseMatric(data, 1, ncol, data_type, is_row_major);
ref_booster->SetSingleRowPredictor(start_iteration, num_iteration, predict_type, config);
ref_booster->PredictSingleRow(predict_type, ncol, get_row_fun, config, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForMatSingleRowFastInit(BoosterHandle handle,
const int predict_type,
const int start_iteration,
const int num_iteration,
const int data_type,
const int32_t ncol,
const char* parameter,
FastConfigHandle *out_fastConfig) {
API_BEGIN();
auto fastConfig_ptr = std::unique_ptr<FastConfig>(new FastConfig(
reinterpret_cast<Booster*>(handle),
parameter,
predict_type,
data_type,
ncol));
if (fastConfig_ptr->config.num_threads > 0) {
omp_set_num_threads(fastConfig_ptr->config.num_threads);
}
fastConfig_ptr->booster->SetSingleRowPredictor(start_iteration, num_iteration, predict_type, fastConfig_ptr->config);
*out_fastConfig = fastConfig_ptr.release();
API_END();
}
int LGBM_BoosterPredictForMatSingleRowFast(FastConfigHandle fastConfig_handle,
const void* data,
int64_t* out_len,
double* out_result) {
API_BEGIN();
FastConfig *fastConfig = reinterpret_cast<FastConfig*>(fastConfig_handle);
// Single row in row-major format:
auto get_row_fun = RowPairFunctionFromDenseMatric(data, 1, fastConfig->ncol, fastConfig->data_type, 1);
fastConfig->booster->PredictSingleRow(fastConfig->predict_type, fastConfig->ncol,
get_row_fun, fastConfig->config,
out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForMats(BoosterHandle handle,
const void** data,
int data_type,
int32_t nrow,
int32_t ncol,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowPairFunctionFromDenseRows(data, ncol, data_type);
ref_booster->Predict(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun, config, out_result, out_len);
API_END();
}
int LGBM_BoosterSaveModel(BoosterHandle handle,
int start_iteration,
int num_iteration,
int feature_importance_type,
const char* filename) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->SaveModelToFile(start_iteration, num_iteration,
feature_importance_type, filename);
API_END();
}
int LGBM_BoosterSaveModelToString(BoosterHandle handle,
int start_iteration,
int num_iteration,
int feature_importance_type,
int64_t buffer_len,
int64_t* out_len,
char* out_str) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
std::string model = ref_booster->SaveModelToString(
start_iteration, num_iteration, feature_importance_type);
*out_len = static_cast<int64_t>(model.size()) + 1;
if (*out_len <= buffer_len) {
std::memcpy(out_str, model.c_str(), *out_len);
}
API_END();
}
int LGBM_BoosterDumpModel(BoosterHandle handle,
int start_iteration,
int num_iteration,
int feature_importance_type,
int64_t buffer_len,
int64_t* out_len,
char* out_str) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
std::string model = ref_booster->DumpModel(start_iteration, num_iteration,
feature_importance_type);
*out_len = static_cast<int64_t>(model.size()) + 1;
if (*out_len <= buffer_len) {
std::memcpy(out_str, model.c_str(), *out_len);
}
API_END();
}
int LGBM_BoosterGetLeafValue(BoosterHandle handle,
int tree_idx,
int leaf_idx,
double* out_val) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_val = static_cast<double>(ref_booster->GetLeafValue(tree_idx, leaf_idx));
API_END();
}
int LGBM_BoosterSetLeafValue(BoosterHandle handle,
int tree_idx,
int leaf_idx,
double val) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->SetLeafValue(tree_idx, leaf_idx, val);
API_END();
}
int LGBM_BoosterFeatureImportance(BoosterHandle handle,
int num_iteration,
int importance_type,
double* out_results) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
std::vector<double> feature_importances = ref_booster->FeatureImportance(num_iteration, importance_type);
for (size_t i = 0; i < feature_importances.size(); ++i) {
(out_results)[i] = feature_importances[i];
}
API_END();
}
int LGBM_BoosterGetUpperBoundValue(BoosterHandle handle,
double* out_results) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
double max_value = ref_booster->UpperBoundValue();
*out_results = max_value;
API_END();
}
int LGBM_BoosterGetLowerBoundValue(BoosterHandle handle,
double* out_results) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
double min_value = ref_booster->LowerBoundValue();
*out_results = min_value;
API_END();
}
int LGBM_NetworkInit(const char* machines,
int local_listen_port,
int listen_time_out,
int num_machines) {
API_BEGIN();
Config config;
config.machines = RemoveQuotationSymbol(std::string(machines));
config.local_listen_port = local_listen_port;
config.num_machines = num_machines;
config.time_out = listen_time_out;
if (num_machines > 1) {
Network::Init(config);
}
API_END();
}
int LGBM_NetworkFree() {
API_BEGIN();
Network::Dispose();
API_END();
}
int LGBM_NetworkInitWithFunctions(int num_machines, int rank,
void* reduce_scatter_ext_fun,
void* allgather_ext_fun) {
API_BEGIN();
if (num_machines > 1) {
Network::Init(num_machines, rank, (ReduceScatterFunction)reduce_scatter_ext_fun, (AllgatherFunction)allgather_ext_fun);
}
API_END();
}
// ---- start of some help functions
template<typename T>
std::function<std::vector<double>(int row_idx)>
RowFunctionFromDenseMatric_helper(const void* data, int num_row, int num_col, int is_row_major) {
const T* data_ptr = reinterpret_cast<const T*>(data);
if (is_row_major) {
return [=] (int row_idx) {
std::vector<double> ret(num_col);
auto tmp_ptr = data_ptr + static_cast<size_t>(num_col) * row_idx;
for (int i = 0; i < num_col; ++i) {
ret[i] = static_cast<double>(*(tmp_ptr + i));
}
return ret;
};
} else {
return [=] (int row_idx) {
std::vector<double> ret(num_col);
for (int i = 0; i < num_col; ++i) {
ret[i] = static_cast<double>(*(data_ptr + static_cast<size_t>(num_row) * i + row_idx));
}
return ret;
};
}
}
std::function<std::vector<double>(int row_idx)>
RowFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major) {
if (data_type == C_API_DTYPE_FLOAT32) {
return RowFunctionFromDenseMatric_helper<float>(data, num_row, num_col, is_row_major);
} else if (data_type == C_API_DTYPE_FLOAT64) {
return RowFunctionFromDenseMatric_helper<double>(data, num_row, num_col, is_row_major);
}
Log::Fatal("Unknown data type in RowFunctionFromDenseMatric");
return nullptr;
}
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major) {
auto inner_function = RowFunctionFromDenseMatric(data, num_row, num_col, data_type, is_row_major);
if (inner_function != nullptr) {
return [inner_function] (int row_idx) {
auto raw_values = inner_function(row_idx);
std::vector<std::pair<int, double>> ret;
ret.reserve(raw_values.size());
for (int i = 0; i < static_cast<int>(raw_values.size()); ++i) {
if (std::fabs(raw_values[i]) > kZeroThreshold || std::isnan(raw_values[i])) {
ret.emplace_back(i, raw_values[i]);
}
}
return ret;
};
}
return nullptr;
}
// data is array of pointers to individual rows
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseRows(const void** data, int num_col, int data_type) {
return [=](int row_idx) {
auto inner_function = RowFunctionFromDenseMatric(data[row_idx], 1, num_col, data_type, /* is_row_major */ true);
auto raw_values = inner_function(0);
std::vector<std::pair<int, double>> ret;
ret.reserve(raw_values.size());
for (int i = 0; i < static_cast<int>(raw_values.size()); ++i) {
if (std::fabs(raw_values[i]) > kZeroThreshold || std::isnan(raw_values[i])) {
ret.emplace_back(i, raw_values[i]);
}
}
return ret;
};
}
template<typename T, typename T1, typename T2>
std::function<std::vector<std::pair<int, double>>(T idx)>
RowFunctionFromCSR_helper(const void* indptr, const int32_t* indices, const void* data) {
const T1* data_ptr = reinterpret_cast<const T1*>(data);
const T2* ptr_indptr = reinterpret_cast<const T2*>(indptr);
return [=] (T idx) {
std::vector<std::pair<int, double>> ret;
int64_t start = ptr_indptr[idx];
int64_t end = ptr_indptr[idx + 1];
if (end - start > 0) {
ret.reserve(end - start);
}
for (int64_t i = start; i < end; ++i) {
ret.emplace_back(indices[i], data_ptr[i]);
}
return ret;
};
}
template<typename T>
std::function<std::vector<std::pair<int, double>>(T idx)>
RowFunctionFromCSR(const void* indptr, int indptr_type, const int32_t* indices, const void* data, int data_type, int64_t , int64_t ) {
if (data_type == C_API_DTYPE_FLOAT32) {
if (indptr_type == C_API_DTYPE_INT32) {
return RowFunctionFromCSR_helper<T, float, int32_t>(indptr, indices, data);
} else if (indptr_type == C_API_DTYPE_INT64) {
return RowFunctionFromCSR_helper<T, float, int64_t>(indptr, indices, data);
}
} else if (data_type == C_API_DTYPE_FLOAT64) {
if (indptr_type == C_API_DTYPE_INT32) {
return RowFunctionFromCSR_helper<T, double, int32_t>(indptr, indices, data);
} else if (indptr_type == C_API_DTYPE_INT64) {
return RowFunctionFromCSR_helper<T, double, int64_t>(indptr, indices, data);
}
}
Log::Fatal("Unknown data type in RowFunctionFromCSR");
return nullptr;
}
template <typename T1, typename T2>
std::function<std::pair<int, double>(int idx)> IterateFunctionFromCSC_helper(const void* col_ptr, const int32_t* indices, const void* data, int col_idx) {
const T1* data_ptr = reinterpret_cast<const T1*>(data);
const T2* ptr_col_ptr = reinterpret_cast<const T2*>(col_ptr);
int64_t start = ptr_col_ptr[col_idx];
int64_t end = ptr_col_ptr[col_idx + 1];
return [=] (int offset) {
int64_t i = static_cast<int64_t>(start + offset);
if (i >= end) {
return std::make_pair(-1, 0.0);
}
int idx = static_cast<int>(indices[i]);
double val = static_cast<double>(data_ptr[i]);
return std::make_pair(idx, val);
};
}
std::function<std::pair<int, double>(int idx)>
IterateFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* indices, const void* data, int data_type, int64_t ncol_ptr, int64_t , int col_idx) {
CHECK(col_idx < ncol_ptr && col_idx >= 0);
if (data_type == C_API_DTYPE_FLOAT32) {
if (col_ptr_type == C_API_DTYPE_INT32) {
return IterateFunctionFromCSC_helper<float, int32_t>(col_ptr, indices, data, col_idx);
} else if (col_ptr_type == C_API_DTYPE_INT64) {
return IterateFunctionFromCSC_helper<float, int64_t>(col_ptr, indices, data, col_idx);
}
} else if (data_type == C_API_DTYPE_FLOAT64) {
if (col_ptr_type == C_API_DTYPE_INT32) {
return IterateFunctionFromCSC_helper<double, int32_t>(col_ptr, indices, data, col_idx);
} else if (col_ptr_type == C_API_DTYPE_INT64) {
return IterateFunctionFromCSC_helper<double, int64_t>(col_ptr, indices, data, col_idx);
}
}
Log::Fatal("Unknown data type in CSC matrix");
return nullptr;
}
CSC_RowIterator::CSC_RowIterator(const void* col_ptr, int col_ptr_type, const int32_t* indices,
const void* data, int data_type, int64_t ncol_ptr, int64_t nelem, int col_idx) {
iter_fun_ = IterateFunctionFromCSC(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, col_idx);
}
double CSC_RowIterator::Get(int idx) {
while (idx > cur_idx_ && !is_end_) {
auto ret = iter_fun_(nonzero_idx_);
if (ret.first < 0) {
is_end_ = true;
break;
}
cur_idx_ = ret.first;
cur_val_ = ret.second;
++nonzero_idx_;
}
if (idx == cur_idx_) {
return cur_val_;
} else {
return 0.0f;
}
}
std::pair<int, double> CSC_RowIterator::NextNonZero() {
if (!is_end_) {
auto ret = iter_fun_(nonzero_idx_);
++nonzero_idx_;
if (ret.first < 0) {
is_end_ = true;
}
return ret;
} else {
return std::make_pair(-1, 0.0);
}
}
| 1 | 31,625 | @shiyu1994 @StrikerRUS what do you think about this addition to `c_api.cpp`? I think it's a really useful addition to be able to get this type of information from the `Booster`, but I want more opinions since `c_api` is the main public API for the library. | microsoft-LightGBM | cpp |
@@ -213,6 +213,16 @@ type deferedCommit struct {
lookback basics.Round
}
+// RoundOffsetError is an error for when requested round is behind earliest stored db entry
+type RoundOffsetError struct {
+ Round basics.Round
+ DbRound basics.Round
+}
+
+func (e *RoundOffsetError) Error() string {
+ return fmt.Sprintf("round %d before dbRound %d", e.Round, e.DbRound)
+}
+
// initialize initializes the accountUpdates structure
func (au *accountUpdates) initialize(cfg config.Local, dbPathPrefix string, genesisProto config.ConsensusParams, genesisAccounts map[basics.Address]basics.AccountData) {
au.initProto = genesisProto | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package ledger
import (
"container/heap"
"context"
"database/sql"
"encoding/hex"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"sync"
"time"
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merkletrie"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
)
const (
// balancesFlushInterval defines how frequently we want to flush our balances to disk.
balancesFlushInterval = 5 * time.Second
// pendingDeltasFlushThreshold is the deltas count threshold above we flush the pending balances regardless of the flush interval.
pendingDeltasFlushThreshold = 128
// trieRebuildAccountChunkSize defines the number of accounts that would get read at a single chunk
// before added to the trie during trie construction
trieRebuildAccountChunkSize = 16384
// trieRebuildCommitFrequency defines the number of accounts that would get added before we call evict to commit the changes and adjust the memory cache.
trieRebuildCommitFrequency = 65536
// trieAccumulatedChangesFlush defines the number of pending changes that would be applied to the merkle trie before
// we attempt to commit them to disk while writing a batch of rounds balances to disk.
trieAccumulatedChangesFlush = 256
)
// trieCachedNodesCount defines how many balances trie nodes we would like to keep around in memory.
// value was calibrated using BenchmarkCalibrateCacheNodeSize
var trieCachedNodesCount = 9000
// A modifiedAccount represents an account that has been modified since
// the persistent state stored in the account DB (i.e., in the range of
// rounds covered by the accountUpdates tracker).
type modifiedAccount struct {
// data stores the most recent AccountData for this modified
// account.
data basics.AccountData
// ndelta keeps track of how many times this account appears in
// accountUpdates.deltas. This is used to evict modifiedAccount
// entries when all changes to an account have been reflected in
// the account DB, and no outstanding modifications remain.
ndeltas int
}
type modifiedCreatable struct {
// Type of the creatable: app or asset
ctype basics.CreatableType
// Created if true, deleted if false
created bool
// creator of the app/asset
creator basics.Address
// Keeps track of how many times this app/asset appears in
// accountUpdates.creatableDeltas
ndeltas int
}
type accountUpdates struct {
// constant variables ( initialized on initialize, and never changed afterward )
// initAccounts specifies initial account values for database.
initAccounts map[basics.Address]basics.AccountData
// initProto specifies the initial consensus parameters.
initProto config.ConsensusParams
// dbDirectory is the directory where the ledger and block sql file resides as well as the parent directroy for the catchup files to be generated
dbDirectory string
// catchpointInterval is the configured interval at which the accountUpdates would generate catchpoint labels and catchpoint files.
catchpointInterval uint64
// archivalLedger determines whether the associated ledger was configured as archival ledger or not.
archivalLedger bool
// catchpointFileHistoryLength defines how many catchpoint files we want to store back.
// 0 means don't store any, -1 mean unlimited and positive number suggest the number of most recent catchpoint files.
catchpointFileHistoryLength int
// vacuumOnStartup controls whether the accounts database would get vacuumed on startup.
vacuumOnStartup bool
// dynamic variables
// Connection to the database.
dbs dbPair
// Prepared SQL statements for fast accounts DB lookups.
accountsq *accountsDbQueries
// dbRound is always exactly accountsRound(),
// cached to avoid SQL queries.
dbRound basics.Round
// deltas stores updates for every round after dbRound.
deltas []map[basics.Address]accountDelta
// accounts stores the most recent account state for every
// address that appears in deltas.
accounts map[basics.Address]modifiedAccount
// creatableDeltas stores creatable updates for every round after dbRound.
creatableDeltas []map[basics.CreatableIndex]modifiedCreatable
// creatables stores the most recent state for every creatable that
// appears in creatableDeltas
creatables map[basics.CreatableIndex]modifiedCreatable
// protos stores consensus parameters dbRound and every
// round after it; i.e., protos is one longer than deltas.
protos []config.ConsensusParams
// totals stores the totals for dbRound and every round after it;
// i.e., totals is one longer than deltas.
roundTotals []AccountTotals
// roundDigest stores the digest of the block for every round starting with dbRound and every round after it.
roundDigest []crypto.Digest
// log copied from ledger
log logging.Logger
// lastFlushTime is the time we last flushed updates to
// the accounts DB (bumping dbRound).
lastFlushTime time.Time
// ledger is the source ledger, which is used to syncronize
// the rounds at which we need to flush the balances to disk
// in favor of the catchpoint to be generated.
ledger ledgerForTracker
// The Trie tracking the current account balances. Always matches the balances that were
// written to the database.
balancesTrie *merkletrie.Trie
// The last catchpoint label that was writted to the database. Should always align with what's in the database.
// note that this is the last catchpoint *label* and not the catchpoint file.
lastCatchpointLabel string
// catchpointWriting help to syncronize the catchpoint file writing. When this channel is closed, no writting is going on.
// the channel is non-closed while writing the current accounts state to disk.
catchpointWriting chan struct{}
// catchpointSlowWriting suggest to the accounts writer that it should finish writing up the catchpoint file ASAP.
// when this channel is closed, the accounts writer would try and complete the writing as soon as possible.
// otherwise, it would take it's time and perform periodic sleeps between chunks processing.
catchpointSlowWriting chan struct{}
// ctx is the context for the committing go-routine. It's also used as the "parent" of the catchpoint generation operation.
ctx context.Context
// ctxCancel is the canceling function for canceling the commiting go-routine ( i.e. signaling the commiting go-routine that it's time to abort )
ctxCancel context.CancelFunc
// deltasAccum stores the accumulated deltas for every round starting dbRound-1.
deltasAccum []int
// committedOffset is the offset at which we'd like to persist all the previous account information to disk.
committedOffset chan deferedCommit
// accountsMu is the syncronization mutex for accessing the various non-static varaibles.
accountsMu deadlock.RWMutex
// accountsWriting provides syncronization around the background writing of account balances.
accountsWriting sync.WaitGroup
// commitSyncerClosed is the blocking channel for syncronizing closing the commitSyncer goroutine. Once it's closed, the
// commitSyncer can be assumed to have aborted.
commitSyncerClosed chan struct{}
}
type deferedCommit struct {
offset uint64
dbRound basics.Round
lookback basics.Round
}
// initialize initializes the accountUpdates structure
func (au *accountUpdates) initialize(cfg config.Local, dbPathPrefix string, genesisProto config.ConsensusParams, genesisAccounts map[basics.Address]basics.AccountData) {
au.initProto = genesisProto
au.initAccounts = genesisAccounts
au.dbDirectory = filepath.Dir(dbPathPrefix)
au.archivalLedger = cfg.Archival
switch cfg.CatchpointTracking {
case -1:
au.catchpointInterval = 0
default:
// give a warning, then fall thought
logging.Base().Warnf("accountUpdates: the CatchpointTracking field in the config.json file contains an invalid value (%d). The default value of 0 would be used instead.", cfg.CatchpointTracking)
fallthrough
case 0:
if au.archivalLedger {
au.catchpointInterval = cfg.CatchpointInterval
} else {
au.catchpointInterval = 0
}
case 1:
au.catchpointInterval = cfg.CatchpointInterval
}
au.catchpointFileHistoryLength = cfg.CatchpointFileHistoryLength
if cfg.CatchpointFileHistoryLength < -1 {
au.catchpointFileHistoryLength = -1
}
au.vacuumOnStartup = cfg.OptimizeAccountsDatabaseOnStartup
// initialize the commitSyncerClosed with a closed channel ( since the commitSyncer go-routine is not active )
au.commitSyncerClosed = make(chan struct{})
close(au.commitSyncerClosed)
}
// loadFromDisk is the 2nd level initialization, and is required before the accountUpdates becomes functional
// The close function is expected to be call in pair with loadFromDisk
func (au *accountUpdates) loadFromDisk(l ledgerForTracker) error {
au.accountsMu.Lock()
defer au.accountsMu.Unlock()
var writingCatchpointRound uint64
lastBalancesRound, lastestBlockRound, err := au.initializeFromDisk(l)
if err != nil {
return err
}
var writingCatchpointDigest crypto.Digest
writingCatchpointRound, _, err = au.accountsq.readCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint)
if err != nil {
return err
}
writingCatchpointDigest, err = au.initializeCaches(lastBalancesRound, lastestBlockRound, basics.Round(writingCatchpointRound))
if err != nil {
return err
}
if writingCatchpointRound != 0 && au.catchpointInterval != 0 {
au.generateCatchpoint(basics.Round(writingCatchpointRound), au.lastCatchpointLabel, writingCatchpointDigest, time.Duration(0))
}
return nil
}
// waitAccountsWriting waits for all the pending ( or current ) account writing to be completed.
func (au *accountUpdates) waitAccountsWriting() {
au.accountsWriting.Wait()
}
// close closes the accountUpdates, waiting for all the child go-routine to complete
func (au *accountUpdates) close() {
if au.ctxCancel != nil {
au.ctxCancel()
}
au.waitAccountsWriting()
// this would block until the commitSyncerClosed channel get closed.
<-au.commitSyncerClosed
}
// IsWritingCatchpointFile returns true when a catchpoint file is being generated. The function is used by the catchup service
// to avoid memory pressure until the catchpoint file writing is complete.
func (au *accountUpdates) IsWritingCatchpointFile() bool {
au.accountsMu.Lock()
defer au.accountsMu.Unlock()
// if we're still writing the previous balances, we can't move forward yet.
select {
case <-au.catchpointWriting:
// the channel catchpointWriting is currently closed, meaning that we're currently not writing any
// catchpoint file.
return false
default:
return true
}
}
// Lookup returns the accound data for a given address at a given round. The withRewards indicates whether the
// rewards should be added to the AccountData before returning. Note that the function doesn't update the account with the rewards,
// even while it could return the AccoutData which represent the "rewarded" account data.
func (au *accountUpdates) Lookup(rnd basics.Round, addr basics.Address, withRewards bool) (data basics.AccountData, err error) {
au.accountsMu.RLock()
defer au.accountsMu.RUnlock()
return au.lookupImpl(rnd, addr, withRewards)
}
// ListAssets lists the assets by their asset index, limiting to the first maxResults
func (au *accountUpdates) ListAssets(maxAssetIdx basics.AssetIndex, maxResults uint64) ([]basics.CreatableLocator, error) {
return au.listCreatables(basics.CreatableIndex(maxAssetIdx), maxResults, basics.AssetCreatable)
}
// ListApplications lists the application by their app index, limiting to the first maxResults
func (au *accountUpdates) ListApplications(maxAppIdx basics.AppIndex, maxResults uint64) ([]basics.CreatableLocator, error) {
return au.listCreatables(basics.CreatableIndex(maxAppIdx), maxResults, basics.AppCreatable)
}
// listCreatables lists the application/asset by their app/asset index, limiting to the first maxResults
func (au *accountUpdates) listCreatables(maxCreatableIdx basics.CreatableIndex, maxResults uint64, ctype basics.CreatableType) ([]basics.CreatableLocator, error) {
au.accountsMu.RLock()
defer au.accountsMu.RUnlock()
// Sort indices for creatables that have been created/deleted. If this
// turns out to be too inefficient, we could keep around a heap of
// created/deleted asset indices in memory.
keys := make([]basics.CreatableIndex, 0, len(au.creatables))
for cidx, delta := range au.creatables {
if delta.ctype != ctype {
continue
}
if cidx <= maxCreatableIdx {
keys = append(keys, cidx)
}
}
sort.Slice(keys, func(i, j int) bool { return keys[i] > keys[j] })
// Check for creatables that haven't been synced to disk yet.
var unsyncedCreatables []basics.CreatableLocator
deletedCreatables := make(map[basics.CreatableIndex]bool)
for _, cidx := range keys {
delta := au.creatables[cidx]
if delta.created {
// Created but only exists in memory
unsyncedCreatables = append(unsyncedCreatables, basics.CreatableLocator{
Type: delta.ctype,
Index: cidx,
Creator: delta.creator,
})
} else {
// Mark deleted creatables for exclusion from the results set
deletedCreatables[cidx] = true
}
}
// Check in-memory created creatables, which will always be newer than anything
// in the database
var res []basics.CreatableLocator
for _, loc := range unsyncedCreatables {
if uint64(len(res)) == maxResults {
return res, nil
}
res = append(res, loc)
}
// Fetch up to maxResults - len(res) + len(deletedCreatables) from the database,
// so we have enough extras in case creatables were deleted
numToFetch := maxResults - uint64(len(res)) + uint64(len(deletedCreatables))
dbResults, err := au.accountsq.listCreatables(maxCreatableIdx, numToFetch, ctype)
if err != nil {
return nil, err
}
// Now we merge the database results with the in-memory results
for _, loc := range dbResults {
// Check if we have enough results
if uint64(len(res)) == maxResults {
return res, nil
}
// Creatable was deleted
if _, ok := deletedCreatables[loc.Index]; ok {
continue
}
// We're OK to include this result
res = append(res, loc)
}
return res, nil
}
// onlineTop returns the top n online accounts, sorted by their normalized
// balance and address, whose voting keys are valid in voteRnd. See the
// normalization description in AccountData.NormalizedOnlineBalance().
func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n uint64) ([]*onlineAccount, error) {
au.accountsMu.RLock()
defer au.accountsMu.RUnlock()
offset, err := au.roundOffset(rnd)
if err != nil {
return nil, err
}
proto := au.protos[offset]
// Determine how many accounts have been modified in-memory,
// so that we obtain enough top accounts from disk (accountdb).
// If the *onlineAccount is nil, that means the account is offline
// as of the most recent change to that account, or its vote key
// is not valid in voteRnd. Otherwise, the *onlineAccount is the
// representation of the most recent state of the account, and it
// is online and can vote in voteRnd.
modifiedAccounts := make(map[basics.Address]*onlineAccount)
for o := uint64(0); o < offset; o++ {
for addr, d := range au.deltas[o] {
if d.new.Status != basics.Online {
modifiedAccounts[addr] = nil
continue
}
if !(d.new.VoteFirstValid <= voteRnd && voteRnd <= d.new.VoteLastValid) {
modifiedAccounts[addr] = nil
continue
}
modifiedAccounts[addr] = accountDataToOnline(addr, &d.new, proto)
}
}
// Build up a set of candidate accounts. Start by loading the
// top N + len(modifiedAccounts) accounts from disk (accountdb).
// This ensures that, even if the worst case if all in-memory
// changes are deleting the top accounts in accountdb, we still
// will have top N left.
//
// Keep asking for more accounts until we get the desired number,
// or there are no more accounts left.
candidates := make(map[basics.Address]*onlineAccount)
batchOffset := uint64(0)
batchSize := uint64(1024)
for uint64(len(candidates)) < n+uint64(len(modifiedAccounts)) {
var accts map[basics.Address]*onlineAccount
err = au.dbs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
accts, err = accountsOnlineTop(tx, batchOffset, batchSize, proto)
return
})
if err != nil {
return nil, err
}
for addr, data := range accts {
if !(data.VoteFirstValid <= voteRnd && voteRnd <= data.VoteLastValid) {
continue
}
candidates[addr] = data
}
// If we got fewer than batchSize accounts, there are no
// more accounts to look at.
if uint64(len(accts)) < batchSize {
break
}
batchOffset += batchSize
}
// Now update the candidates based on the in-memory deltas.
for addr, oa := range modifiedAccounts {
if oa == nil {
delete(candidates, addr)
} else {
candidates[addr] = oa
}
}
// Get the top N accounts from the candidate set, by inserting all of
// the accounts into a heap and then pulling out N elements from the
// heap.
topHeap := &onlineTopHeap{
accts: nil,
}
for _, data := range candidates {
heap.Push(topHeap, data)
}
var res []*onlineAccount
for topHeap.Len() > 0 && uint64(len(res)) < n {
acct := heap.Pop(topHeap).(*onlineAccount)
res = append(res, acct)
}
return res, nil
}
// GetLastCatchpointLabel retrieves the last catchpoint label that was stored to the database.
func (au *accountUpdates) GetLastCatchpointLabel() string {
au.accountsMu.RLock()
defer au.accountsMu.RUnlock()
return au.lastCatchpointLabel
}
// GetCreatorForRound returns the creator for a given asset/app index at a given round
func (au *accountUpdates) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
au.accountsMu.RLock()
defer au.accountsMu.RUnlock()
return au.getCreatorForRoundImpl(rnd, cidx, ctype)
}
// committedUpTo enqueues commiting the balances for round committedRound-lookback.
// The defered committing is done so that we could calculate the historical balances lookback rounds back.
// Since we don't want to hold off the tracker's mutex for too long, we'll defer the database persistance of this
// operation to a syncer goroutine. The one caviat is that when storing a catchpoint round, we would want to
// wait until the catchpoint creation is done, so that the persistance of the catchpoint file would have an
// uninterrupted view of the balances at a given point of time.
func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound basics.Round) {
var isCatchpointRound, hasMultipleIntermediateCatchpoint bool
var offset uint64
var dc deferedCommit
au.accountsMu.RLock()
defer func() {
au.accountsMu.RUnlock()
if dc.offset != 0 {
au.committedOffset <- dc
}
}()
retRound = basics.Round(0)
var pendingDeltas int
lookback := basics.Round(au.protos[len(au.protos)-1].MaxBalLookback)
if committedRound < lookback {
return
}
retRound = au.dbRound
newBase := committedRound - lookback
if newBase <= au.dbRound {
// Already forgotten
return
}
if newBase > au.dbRound+basics.Round(len(au.deltas)) {
au.log.Panicf("committedUpTo: block %d too far in the future, lookback %d, dbRound %d, deltas %d", committedRound, lookback, au.dbRound, len(au.deltas))
}
hasIntermediateCatchpoint := false
hasMultipleIntermediateCatchpoint = false
// check if there was a catchpoint between au.dbRound+lookback and newBase+lookback
if au.catchpointInterval > 0 {
nextCatchpointRound := ((uint64(au.dbRound+lookback) + au.catchpointInterval) / au.catchpointInterval) * au.catchpointInterval
if nextCatchpointRound < uint64(newBase+lookback) {
mostRecentCatchpointRound := (uint64(committedRound) / au.catchpointInterval) * au.catchpointInterval
newBase = basics.Round(nextCatchpointRound) - lookback
if mostRecentCatchpointRound > nextCatchpointRound {
hasMultipleIntermediateCatchpoint = true
// skip if there is more than one catchpoint in queue
newBase = basics.Round(mostRecentCatchpointRound) - lookback
}
hasIntermediateCatchpoint = true
}
}
// if we're still writing the previous balances, we can't move forward yet.
select {
case <-au.catchpointWriting:
// the channel catchpointWriting is currently closed, meaning that we're currently not writing any
// catchpoint file. At this point, we should attempt to enqueue further tasks as usual.
default:
// if we hit this path, it means that the channel is currently non-closed, which means that we're still writing a catchpoint.
// see if we're writing a catchpoint in that range.
if hasIntermediateCatchpoint {
// check if we're already attempting to perform fast-writing.
select {
case <-au.catchpointSlowWriting:
// yes, we're already doing fast-writing.
default:
// no, we're not yet doing fast writing, make it so.
close(au.catchpointSlowWriting)
}
}
return
}
offset = uint64(newBase - au.dbRound)
// check to see if this is a catchpoint round
isCatchpointRound = ((offset + uint64(lookback+au.dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+au.dbRound))) % au.catchpointInterval))
// calculate the number of pending deltas
pendingDeltas = au.deltasAccum[offset] - au.deltasAccum[0]
// If we recently flushed, wait to aggregate some more blocks.
// ( unless we're creating a catchpoint, in which case we want to flush it right away
// so that all the instances of the catchpoint would contain the exacy same data )
flushTime := time.Now()
if !flushTime.After(au.lastFlushTime.Add(balancesFlushInterval)) && !isCatchpointRound && pendingDeltas < pendingDeltasFlushThreshold {
return au.dbRound
}
if isCatchpointRound && au.archivalLedger {
au.catchpointWriting = make(chan struct{}, 1)
au.catchpointSlowWriting = make(chan struct{}, 1)
if hasMultipleIntermediateCatchpoint {
close(au.catchpointSlowWriting)
}
}
dc = deferedCommit{
offset: offset,
dbRound: au.dbRound,
lookback: lookback,
}
au.accountsWriting.Add(1)
return
}
// newBlock is the accountUpdates implementation of the ledgerTracker interface. This is the "external" facing function
// which invokes the internal implementation after taking the lock.
func (au *accountUpdates) newBlock(blk bookkeeping.Block, delta StateDelta) {
au.accountsMu.Lock()
defer au.accountsMu.Unlock()
au.newBlockImpl(blk, delta)
}
// Totals returns the totals for a given round
func (au *accountUpdates) Totals(rnd basics.Round) (totals AccountTotals, err error) {
au.accountsMu.RLock()
defer au.accountsMu.RUnlock()
return au.totalsImpl(rnd)
}
// GetCatchpointStream returns an io.Reader to the catchpoint file associated with the provided round
func (au *accountUpdates) GetCatchpointStream(round basics.Round) (io.ReadCloser, error) {
dbFileName := ""
err := au.dbs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
dbFileName, _, _, err = getCatchpoint(tx, round)
return
})
if err != nil && err != sql.ErrNoRows {
// we had some sql error.
return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to lookup catchpoint %d: %v", round, err)
}
if dbFileName != "" {
catchpointPath := filepath.Join(au.dbDirectory, dbFileName)
file, err := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
if err == nil && file != nil {
return file, nil
}
// else, see if this is a file-not-found error
if os.IsNotExist(err) {
// the database told us that we have this file.. but we couldn't find it.
// delete it from the database.
err := au.saveCatchpointFile(round, "", 0, "")
if err != nil {
au.log.Warnf("accountUpdates: getCatchpointStream: unable to delete missing catchpoint entry: %v", err)
return nil, err
}
return nil, ErrNoEntry{}
}
// it's some other error.
return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to open catchpoint file '%s' %v", catchpointPath, err)
}
// if the database doesn't know about that round, see if we have that file anyway:
fileName := filepath.Join("catchpoints", catchpointRoundToPath(round))
catchpointPath := filepath.Join(au.dbDirectory, fileName)
file, err := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
if err == nil && file != nil {
// great, if found that we should have had this in the database.. add this one now :
fileInfo, err := file.Stat()
if err != nil {
// we couldn't get the stat, so just return with the file.
return file, nil
}
err = au.saveCatchpointFile(round, fileName, fileInfo.Size(), "")
if err != nil {
au.log.Warnf("accountUpdates: getCatchpointStream: unable to save missing catchpoint entry: %v", err)
}
return file, nil
}
return nil, ErrNoEntry{}
}
// functions below this line are all internal functions
// accountUpdatesLedgerEvaluator is a "ledger emulator" which is used *only* by initializeCaches, as a way to shortcut
// the locks taken by the real ledger object when making requests that are being served by the accountUpdates.
// Using this struct allow us to take the tracker lock *before* calling the loadFromDisk, and having the operation complete
// without taking any locks. Note that it's not only the locks performance that is gained : by having the loadFrom disk
// not requiring any external locks, we can safely take a trackers lock on the ledger during reloadLedger, which ensures
// that even during catchpoint catchup mode switch, we're still correctly protected by a mutex.
type accountUpdatesLedgerEvaluator struct {
// au is the associated accountUpdates structure which invoking the trackerEvalVerified function, passing this structure as input.
// the accountUpdatesLedgerEvaluator would access the underlying accountUpdates function directly, bypassing the balances mutex lock.
au *accountUpdates
// prevHeader is the previous header to the current one. The usage of this is only in the context of initializeCaches where we iteratively
// building the StateDelta, which requires a peek on the "previous" header information.
prevHeader bookkeeping.BlockHeader
}
// GenesisHash returns the genesis hash
func (aul *accountUpdatesLedgerEvaluator) GenesisHash() crypto.Digest {
return aul.au.ledger.GenesisHash()
}
// BlockHdr returns the header of the given round. When the evaluator is running, it's only referring to the previous header, which is what we
// are providing here. Any attempt to access a different header would get denied.
func (aul *accountUpdatesLedgerEvaluator) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) {
if r == aul.prevHeader.Round {
return aul.prevHeader, nil
}
return bookkeeping.BlockHeader{}, ErrNoEntry{}
}
// Lookup returns the account balance for a given address at a given round
func (aul *accountUpdatesLedgerEvaluator) Lookup(rnd basics.Round, addr basics.Address) (basics.AccountData, error) {
return aul.au.lookupImpl(rnd, addr, true)
}
// Totals returns the totals for a given round
func (aul *accountUpdatesLedgerEvaluator) Totals(rnd basics.Round) (AccountTotals, error) {
return aul.au.totalsImpl(rnd)
}
// isDup return whether a transaction is a duplicate one. It's not needed by the accountUpdatesLedgerEvaluator and implemeted as a stub.
func (aul *accountUpdatesLedgerEvaluator) isDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, txlease) (bool, error) {
// this is a non-issue since this call will never be made on non-validating evaluation
return false, fmt.Errorf("accountUpdatesLedgerEvaluator: tried to check for dup during accountUpdates initilization ")
}
// LookupWithoutRewards returns the account balance for a given address at a given round, without the reward
func (aul *accountUpdatesLedgerEvaluator) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (basics.AccountData, error) {
return aul.au.lookupImpl(rnd, addr, false)
}
// GetCreatorForRound returns the asset/app creator for a given asset/app index at a given round
func (aul *accountUpdatesLedgerEvaluator) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
return aul.au.getCreatorForRoundImpl(rnd, cidx, ctype)
}
// totalsImpl returns the totals for a given round
func (au *accountUpdates) totalsImpl(rnd basics.Round) (totals AccountTotals, err error) {
offset, err := au.roundOffset(rnd)
if err != nil {
return
}
totals = au.roundTotals[offset]
return
}
// initializeCaches fills up the accountUpdates cache with the most recent ~320 blocks
func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, writingCatchpointRound basics.Round) (catchpointBlockDigest crypto.Digest, err error) {
var blk bookkeeping.Block
var delta StateDelta
accLedgerEval := accountUpdatesLedgerEvaluator{
au: au,
}
if lastBalancesRound < lastestBlockRound {
accLedgerEval.prevHeader, err = au.ledger.BlockHdr(lastBalancesRound)
if err != nil {
return
}
}
for lastBalancesRound < lastestBlockRound {
next := lastBalancesRound + 1
blk, err = au.ledger.Block(next)
if err != nil {
return
}
delta, err = au.ledger.trackerEvalVerified(blk, &accLedgerEval)
if err != nil {
return
}
au.newBlockImpl(blk, delta)
lastBalancesRound = next
if next == basics.Round(writingCatchpointRound) {
catchpointBlockDigest = blk.Digest()
}
accLedgerEval.prevHeader = *delta.hdr
}
return
}
// initializeFromDisk performs the atomic operation of loading the accounts data information from disk
// and preparing the accountUpdates for operation, including initlizating the commitSyncer goroutine.
func (au *accountUpdates) initializeFromDisk(l ledgerForTracker) (lastBalancesRound, lastestBlockRound basics.Round, err error) {
au.dbs = l.trackerDB()
au.log = l.trackerLog()
au.ledger = l
if au.initAccounts == nil {
err = fmt.Errorf("accountUpdates.initializeFromDisk: initAccounts not set")
return
}
lastestBlockRound = l.Latest()
err = au.dbs.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
var err0 error
au.dbRound, err0 = au.accountsInitialize(ctx, tx)
if err0 != nil {
return err0
}
// Check for blocks DB and tracker DB un-sync
if au.dbRound > lastestBlockRound {
au.log.Warnf("accountUpdates.initializeFromDisk: resetting accounts DB (on round %v, but blocks DB's latest is %v)", au.dbRound, lastestBlockRound)
err0 = accountsReset(tx)
if err0 != nil {
return err0
}
au.dbRound, err0 = au.accountsInitialize(ctx, tx)
if err0 != nil {
return err0
}
}
totals, err0 := accountsTotals(tx, false)
if err0 != nil {
return err0
}
au.roundTotals = []AccountTotals{totals}
return nil
})
if err != nil {
return
}
// the VacuumDatabase would be a no-op if au.vacuumOnStartup is cleared.
au.vacuumDatabase(context.Background())
if err != nil {
return
}
au.accountsq, err = accountsDbInit(au.dbs.rdb.Handle, au.dbs.wdb.Handle)
au.lastCatchpointLabel, _, err = au.accountsq.readCatchpointStateString(context.Background(), catchpointStateLastCatchpoint)
if err != nil {
return
}
hdr, err := l.BlockHdr(au.dbRound)
if err != nil {
return
}
au.protos = []config.ConsensusParams{config.Consensus[hdr.CurrentProtocol]}
au.deltas = nil
au.creatableDeltas = nil
au.accounts = make(map[basics.Address]modifiedAccount)
au.creatables = make(map[basics.CreatableIndex]modifiedCreatable)
au.deltasAccum = []int{0}
// keep these channel closed if we're not generating catchpoint
au.catchpointWriting = make(chan struct{}, 1)
au.catchpointSlowWriting = make(chan struct{}, 1)
close(au.catchpointSlowWriting)
close(au.catchpointWriting)
au.ctx, au.ctxCancel = context.WithCancel(context.Background())
au.committedOffset = make(chan deferedCommit, 1)
au.commitSyncerClosed = make(chan struct{})
go au.commitSyncer(au.committedOffset)
lastBalancesRound = au.dbRound
return
}
// accountHashBuilder calculates the hash key used for the trie by combining the account address and the account data
func accountHashBuilder(addr basics.Address, accountData basics.AccountData, encodedAccountData []byte) []byte {
hash := make([]byte, 4+crypto.DigestSize)
// write out the lowest 32 bits of the reward base. This should improve the caching of the trie by allowing
// recent updated to be in-cache, and "older" nodes will be left alone.
for i, rewards := 3, accountData.RewardsBase; i >= 0; i, rewards = i-1, rewards>>8 {
// the following takes the rewards & 255 -> hash[i]
hash[i] = byte(rewards)
}
entryHash := crypto.Hash(append(addr[:], encodedAccountData[:]...))
copy(hash[4:], entryHash[:])
return hash[:]
}
// accountsInitialize initializes the accounts DB if needed and return currrent account round.
// as part of the initialization, it tests the current database schema version, and perform upgrade
// procedures to bring it up to the database schema supported by the binary.
func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (basics.Round, error) {
// check current database version.
dbVersion, err := db.GetUserVersion(ctx, tx)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to read database schema version : %v", err)
}
// if database version is greater than supported by current binary, write a warning. This would keep the existing
// fallback behaviour where we could use an older binary iff the schema happen to be backward compatible.
if dbVersion > accountDBVersion {
au.log.Warnf("accountsInitialize database schema version is %d, but algod supports only %d", dbVersion, accountDBVersion)
}
if dbVersion < accountDBVersion {
au.log.Infof("accountsInitialize upgrading database schema from version %d to version %d", dbVersion, accountDBVersion)
for dbVersion < accountDBVersion {
au.log.Infof("accountsInitialize performing upgrade from version %d", dbVersion)
// perform the initialization/upgrade
switch dbVersion {
case 0:
dbVersion, err = au.upgradeDatabaseSchema0(ctx, tx)
if err != nil {
au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 0 : %v", err)
return 0, err
}
case 1:
dbVersion, err = au.upgradeDatabaseSchema1(ctx, tx)
if err != nil {
au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 1 : %v", err)
return 0, err
}
case 2:
dbVersion, err = au.upgradeDatabaseSchema2(ctx, tx)
if err != nil {
au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 2 : %v", err)
return 0, err
}
case 3:
dbVersion, err = au.upgradeDatabaseSchema3(ctx, tx)
if err != nil {
au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 3 : %v", err)
return 0, err
}
default:
return 0, fmt.Errorf("accountsInitialize unable to upgrade database from schema version %d", dbVersion)
}
}
au.log.Infof("accountsInitialize database schema upgrade complete")
}
rnd, hashRound, err := accountsRound(tx)
if err != nil {
return 0, err
}
if hashRound != rnd {
// if the hashed round is different then the base round, something was modified, and the accounts aren't in sync
// with the hashes.
err = resetAccountHashes(tx)
if err != nil {
return 0, err
}
// if catchpoint is disabled on this node, we could complete the initialization right here.
if au.catchpointInterval == 0 {
return rnd, nil
}
}
// create the merkle trie for the balances
committer, err := makeMerkleCommitter(tx, false)
if err != nil {
return 0, fmt.Errorf("accountsInitialize was unable to makeMerkleCommitter: %v", err)
}
trie, err := merkletrie.MakeTrie(committer, trieCachedNodesCount)
if err != nil {
return 0, fmt.Errorf("accountsInitialize was unable to MakeTrie: %v", err)
}
// we might have a database that was previously initialized, and now we're adding the balances trie. In that case, we need to add all the existing balances to this trie.
// we can figure this out by examinine the hash of the root:
rootHash, err := trie.RootHash()
if err != nil {
return rnd, fmt.Errorf("accountsInitialize was unable to retrieve trie root hash: %v", err)
}
if rootHash.IsZero() {
au.log.Infof("accountsInitialize rebuilding merkle trie for round %d", rnd)
var accountsIterator encodedAccountsBatchIter
defer accountsIterator.Close()
startTrieBuildTime := time.Now()
accountsCount := 0
lastRebuildTime := startTrieBuildTime
pendingAccounts := 0
for {
bal, err := accountsIterator.Next(ctx, tx, trieRebuildAccountChunkSize)
if err != nil {
return rnd, err
}
if len(bal) == 0 {
break
}
accountsCount += len(bal)
pendingAccounts += len(bal)
for _, balance := range bal {
var accountData basics.AccountData
err = protocol.Decode(balance.AccountData, &accountData)
if err != nil {
return rnd, err
}
hash := accountHashBuilder(balance.Address, accountData, balance.AccountData)
added, err := trie.Add(hash)
if err != nil {
return rnd, fmt.Errorf("accountsInitialize was unable to add changes to trie: %v", err)
}
if !added {
au.log.Warnf("accountsInitialize attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(hash), balance.Address)
}
}
if pendingAccounts >= trieRebuildCommitFrequency {
// this trie Evict will commit using the current transaction.
// if anything goes wrong, it will still get rolled back.
_, err = trie.Evict(true)
if err != nil {
return 0, fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
}
pendingAccounts = 0
}
if len(bal) < trieRebuildAccountChunkSize {
break
}
if time.Now().Sub(lastRebuildTime) > 5*time.Second {
// let the user know that the trie is still being rebuilt.
au.log.Infof("accountsInitialize still building the trie, and processed so far %d accounts", accountsCount)
lastRebuildTime = time.Now()
}
}
// this trie Evict will commit using the current transaction.
// if anything goes wrong, it will still get rolled back.
_, err = trie.Evict(true)
if err != nil {
return 0, fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
}
// we've just updated the markle trie, update the hashRound to reflect that.
err = updateAccountsRound(tx, rnd, rnd)
if err != nil {
return 0, fmt.Errorf("accountsInitialize was unable to update the account round to %d: %v", rnd, err)
}
au.log.Infof("accountsInitialize rebuilt the merkle trie with %d entries in %v", accountsCount, time.Now().Sub(startTrieBuildTime))
}
au.balancesTrie = trie
return rnd, nil
}
// upgradeDatabaseSchema0 upgrades the database schema from version 0 to version 1
//
// Schema of version 0 is expected to be aligned with the schema used on version 2.0.8 or before.
// Any database of version 2.0.8 would be of version 0. At this point, the database might
// have the following tables : ( i.e. a newly created database would not have these )
// * acctrounds
// * accounttotals
// * accountbase
// * assetcreators
// * storedcatchpoints
// * accounthashes
// * catchpointstate
//
// As the first step of the upgrade, the above tables are being created if they do not already exists.
// Following that, the assetcreators table is being altered by adding a new column to it (ctype).
// Last, in case the database was just created, it would get initialized with the following:
// The accountbase would get initialized with the au.initAccounts
// The accounttotals would get initialized to align with the initialization account added to accountbase
// The acctrounds would get updated to indicate that the balance matches round 0
//
func (au *accountUpdates) upgradeDatabaseSchema0(ctx context.Context, tx *sql.Tx) (updatedDBVersion int32, err error) {
au.log.Infof("accountsInitialize initializing schema")
err = accountsInit(tx, au.initAccounts, au.initProto)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to initialize schema : %v", err)
}
_, err = db.SetUserVersion(ctx, tx, 1)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 0 to 1: %v", err)
}
return 1, nil
}
// upgradeDatabaseSchema1 upgrades the database schema from version 1 to version 2
//
// The schema updated to verison 2 intended to ensure that the encoding of all the accounts data is
// both canonical and identical across the entire network. On release 2.0.5 we released an upgrade to the messagepack.
// the upgraded messagepack was decoding the account data correctly, but would have different
// encoding compared to it's predecessor. As a result, some of the account data that was previously stored
// would have different encoded representation than the one on disk.
// To address this, this startup proceduce would attempt to scan all the accounts data. for each account data, we would
// see if it's encoding aligns with the current messagepack encoder. If it doesn't we would update it's encoding.
// then, depending if we found any such account data, we would reset the merkle trie and stored catchpoints.
// once the upgrade is complete, the accountsInitialize would (if needed) rebuild the merke trie using the new
// encoded accounts.
//
// This upgrade doesn't change any of the actual database schema ( i.e. tables, indexes ) but rather just performing
// a functional update to it's content.
//
func (au *accountUpdates) upgradeDatabaseSchema1(ctx context.Context, tx *sql.Tx) (updatedDBVersion int32, err error) {
// update accounts encoding.
au.log.Infof("accountsInitialize verifying accounts data encoding")
modifiedAccounts, err := reencodeAccounts(ctx, tx)
if err != nil {
return 0, err
}
if modifiedAccounts > 0 {
au.log.Infof("accountsInitialize reencoded %d accounts", modifiedAccounts)
au.log.Infof("accountsInitialize resetting account hashes")
// reset the merkle trie
err = resetAccountHashes(tx)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to reset account hashes : %v", err)
}
au.log.Infof("accountsInitialize preparing queries")
// initialize a new accountsq with the incoming transaction.
accountsq, err := accountsDbInit(tx, tx)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to prepare queries : %v", err)
}
// close the prepared statements when we're done with them.
defer accountsq.close()
au.log.Infof("accountsInitialize resetting prior catchpoints")
// delete the last catchpoint label if we have any.
_, err = accountsq.writeCatchpointStateString(ctx, catchpointStateLastCatchpoint, "")
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to clear prior catchpoint : %v", err)
}
au.log.Infof("accountsInitialize deleting stored catchpoints")
// delete catchpoints.
err = au.deleteStoredCatchpoints(ctx, accountsq)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to delete stored catchpoints : %v", err)
}
} else {
au.log.Infof("accountsInitialize found that no accounts needed to be reencoded")
}
// update version
_, err = db.SetUserVersion(ctx, tx, 2)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 1 to 2: %v", err)
}
return 2, nil
}
// upgradeDatabaseSchema2 upgrades the database schema from version 2 to version 3
//
// This upgrade only enables the database vacuuming which will take place once the upgrade process is complete.
// If the user has already specified the OptimizeAccountsDatabaseOnStartup flag in the configuration file, this
// step becomes a no-op.
//
func (au *accountUpdates) upgradeDatabaseSchema2(ctx context.Context, tx *sql.Tx) (updatedDBVersion int32, err error) {
au.vacuumOnStartup = true
// update version
_, err = db.SetUserVersion(ctx, tx, 3)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 2 to 3: %v", err)
}
return 3, nil
}
// upgradeDatabaseSchema3 upgrades the database schema from version 3 to version 4,
// adding the normalizedonlinebalance column to the accountbase table.
func (au *accountUpdates) upgradeDatabaseSchema3(ctx context.Context, tx *sql.Tx) (updatedDBVersion int32, err error) {
err = accountsAddNormalizedBalance(tx, au.ledger.GenesisProto())
if err != nil {
return 0, err
}
// update version
_, err = db.SetUserVersion(ctx, tx, 4)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 3 to 4: %v", err)
}
return 4, nil
}
// deleteStoredCatchpoints iterates over the storedcatchpoints table and deletes all the files stored on disk.
// once all the files have been deleted, it would go ahead and remove the entries from the table.
func (au *accountUpdates) deleteStoredCatchpoints(ctx context.Context, dbQueries *accountsDbQueries) (err error) {
catchpointsFilesChunkSize := 50
for {
fileNames, err := dbQueries.getOldestCatchpointFiles(ctx, catchpointsFilesChunkSize, 0)
if err != nil {
return err
}
if len(fileNames) == 0 {
break
}
for round, fileName := range fileNames {
absCatchpointFileName := filepath.Join(au.dbDirectory, fileName)
err = os.Remove(absCatchpointFileName)
if err == nil || os.IsNotExist(err) {
// it's ok if the file doesn't exist. just remove it from the database and we'll be good to go.
err = nil
} else {
// we can't delete the file, abort -
return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err)
}
// clear the entry from the database
err = dbQueries.storeCatchpoint(ctx, round, "", "", 0)
if err != nil {
return err
}
}
}
return nil
}
// accountsUpdateBalances applies the given deltas array to the merkle trie
func (au *accountUpdates) accountsUpdateBalances(accountsDeltasRound []map[basics.Address]accountDelta, offset uint64) (err error) {
if au.catchpointInterval == 0 {
return nil
}
var added, deleted bool
accumulatedChanges := 0
for i := uint64(0); i < offset; i++ {
accountsDeltas := accountsDeltasRound[i]
for addr, delta := range accountsDeltas {
if !delta.old.IsZero() {
deleteHash := accountHashBuilder(addr, delta.old, protocol.Encode(&delta.old))
deleted, err = au.balancesTrie.Delete(deleteHash)
if err != nil {
return err
}
if !deleted {
au.log.Warnf("failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(deleteHash), addr)
} else {
accumulatedChanges++
}
}
if !delta.new.IsZero() {
addHash := accountHashBuilder(addr, delta.new, protocol.Encode(&delta.new))
added, err = au.balancesTrie.Add(addHash)
if err != nil {
return err
}
if !added {
au.log.Warnf("attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(addHash), addr)
} else {
accumulatedChanges++
}
}
}
if accumulatedChanges >= trieAccumulatedChangesFlush {
accumulatedChanges = 0
err = au.balancesTrie.Commit()
if err != nil {
return
}
}
}
// write it all to disk.
if accumulatedChanges > 0 {
err = au.balancesTrie.Commit()
}
return
}
// newBlockImpl is the accountUpdates implementation of the ledgerTracker interface. This is the "internal" facing function
// which assumes that no lock need to be taken.
func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta StateDelta) {
proto := config.Consensus[blk.CurrentProtocol]
rnd := blk.Round()
if rnd <= au.latest() {
// Duplicate, ignore.
return
}
if rnd != au.latest()+1 {
au.log.Panicf("accountUpdates: newBlock %d too far in the future, dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas))
}
au.deltas = append(au.deltas, delta.accts)
au.protos = append(au.protos, proto)
au.creatableDeltas = append(au.creatableDeltas, delta.creatables)
au.roundDigest = append(au.roundDigest, blk.Digest())
au.deltasAccum = append(au.deltasAccum, len(delta.accts)+au.deltasAccum[len(au.deltasAccum)-1])
var ot basics.OverflowTracker
newTotals := au.roundTotals[len(au.roundTotals)-1]
allBefore := newTotals.All()
newTotals.applyRewards(delta.hdr.RewardsLevel, &ot)
for addr, data := range delta.accts {
newTotals.delAccount(proto, data.old, &ot)
newTotals.addAccount(proto, data.new, &ot)
macct := au.accounts[addr]
macct.ndeltas++
macct.data = data.new
au.accounts[addr] = macct
}
for cidx, cdelta := range delta.creatables {
mcreat := au.creatables[cidx]
mcreat.creator = cdelta.creator
mcreat.created = cdelta.created
mcreat.ctype = cdelta.ctype
mcreat.ndeltas++
au.creatables[cidx] = mcreat
}
if ot.Overflowed {
au.log.Panicf("accountUpdates: newBlock %d overflowed totals", rnd)
}
allAfter := newTotals.All()
if allBefore != allAfter {
au.log.Panicf("accountUpdates: sum of money changed from %d to %d", allBefore.Raw, allAfter.Raw)
}
au.roundTotals = append(au.roundTotals, newTotals)
}
// lookupImpl returns the accound data for a given address at a given round. The withRewards indicates whether the
// rewards should be added to the AccountData before returning. Note that the function doesn't update the account with the rewards,
// even while it could return the AccoutData which represent the "rewarded" account data.
func (au *accountUpdates) lookupImpl(rnd basics.Round, addr basics.Address, withRewards bool) (data basics.AccountData, err error) {
offset, err := au.roundOffset(rnd)
if err != nil {
return
}
offsetForRewards := offset
defer func() {
if withRewards {
totals := au.roundTotals[offsetForRewards]
proto := au.protos[offsetForRewards]
data = data.WithUpdatedRewards(proto, totals.RewardsLevel)
}
}()
// Check if this is the most recent round, in which case, we can
// use a cache of the most recent account state.
if offset == uint64(len(au.deltas)) {
macct, ok := au.accounts[addr]
if ok {
return macct.data, nil
}
} else {
// Check if the account has been updated recently. Traverse the deltas
// backwards to ensure that later updates take priority if present.
for offset > 0 {
offset--
d, ok := au.deltas[offset][addr]
if ok {
return d.new, nil
}
}
}
// No updates of this account in the in-memory deltas; use on-disk DB.
// The check in roundOffset() made sure the round is exactly the one
// present in the on-disk DB. As an optimization, we avoid creating
// a separate transaction here, and directly use a prepared SQL query
// against the database.
return au.accountsq.lookup(addr)
}
// getCreatorForRoundImpl returns the asset/app creator for a given asset/app index at a given round
func (au *accountUpdates) getCreatorForRoundImpl(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
offset, err := au.roundOffset(rnd)
if err != nil {
return basics.Address{}, false, err
}
// If this is the most recent round, au.creatables has will have the latest
// state and we can skip scanning backwards over creatableDeltas
if offset == uint64(len(au.deltas)) {
// Check if we already have the asset/creator in cache
creatableDelta, ok := au.creatables[cidx]
if ok {
if creatableDelta.created && creatableDelta.ctype == ctype {
return creatableDelta.creator, true, nil
}
return basics.Address{}, false, nil
}
} else {
for offset > 0 {
offset--
creatableDelta, ok := au.creatableDeltas[offset][cidx]
if ok {
if creatableDelta.created && creatableDelta.ctype == ctype {
return creatableDelta.creator, true, nil
}
return basics.Address{}, false, nil
}
}
}
// Check the database
return au.accountsq.lookupCreator(cidx, ctype)
}
// accountsCreateCatchpointLabel creates a catchpoint label and write it.
func (au *accountUpdates) accountsCreateCatchpointLabel(committedRound basics.Round, totals AccountTotals, ledgerBlockDigest crypto.Digest, trieBalancesHash crypto.Digest) (label string, err error) {
cpLabel := makeCatchpointLabel(committedRound, ledgerBlockDigest, trieBalancesHash, totals)
label = cpLabel.String()
_, err = au.accountsq.writeCatchpointStateString(context.Background(), catchpointStateLastCatchpoint, label)
return
}
// roundOffset calculates the offset of the given round compared to the current dbRound. Requires that the lock would be taken.
func (au *accountUpdates) roundOffset(rnd basics.Round) (offset uint64, err error) {
if rnd < au.dbRound {
err = fmt.Errorf("round %d before dbRound %d", rnd, au.dbRound)
return
}
off := uint64(rnd - au.dbRound)
if off > uint64(len(au.deltas)) {
err = fmt.Errorf("round %d too high: dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas))
return
}
return off, nil
}
// commitSyncer is the syncer go-routine function which perform the database updates. Internally, it dequeue deferedCommits and
// send the tasks to commitRound for completing the operation.
func (au *accountUpdates) commitSyncer(deferedCommits chan deferedCommit) {
defer close(au.commitSyncerClosed)
for {
select {
case committedOffset, ok := <-deferedCommits:
if !ok {
return
}
au.commitRound(committedOffset.offset, committedOffset.dbRound, committedOffset.lookback)
case <-au.ctx.Done():
// drain the pending commits queue:
drained := false
for !drained {
select {
case <-deferedCommits:
au.accountsWriting.Done()
default:
drained = true
}
}
return
}
}
}
// commitRound write to the database a "chunk" of rounds, and update the dbRound accordingly.
func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookback basics.Round) {
defer au.accountsWriting.Done()
au.accountsMu.RLock()
// we can exit right away, as this is the result of mis-ordered call to committedUpTo.
if au.dbRound < dbRound || offset < uint64(au.dbRound-dbRound) {
// if this is an archival ledger, we might need to close the catchpointWriting channel
if au.archivalLedger {
// determine if this was a catchpoint round
isCatchpointRound := ((offset + uint64(lookback+dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+dbRound))) % au.catchpointInterval))
if isCatchpointRound {
// it was a catchpoint round, so close the channel.
close(au.catchpointWriting)
}
}
au.accountsMu.RUnlock()
return
}
// adjust the offset according to what happend meanwhile..
offset -= uint64(au.dbRound - dbRound)
dbRound = au.dbRound
newBase := basics.Round(offset) + dbRound
flushTime := time.Now()
isCatchpointRound := ((offset + uint64(lookback+dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+dbRound))) % au.catchpointInterval))
// create a copy of the deltas, round totals and protos for the range we're going to flush.
deltas := make([]map[basics.Address]accountDelta, offset, offset)
creatableDeltas := make([]map[basics.CreatableIndex]modifiedCreatable, offset, offset)
roundTotals := make([]AccountTotals, offset+1, offset+1)
protos := make([]config.ConsensusParams, offset+1, offset+1)
copy(deltas, au.deltas[:offset])
copy(creatableDeltas, au.creatableDeltas[:offset])
copy(roundTotals, au.roundTotals[:offset+1])
copy(protos, au.protos[:offset+1])
// Keep track of how many changes to each account we flush to the
// account DB, so that we can drop the corresponding refcounts in
// au.accounts.
flushcount := make(map[basics.Address]int)
creatableFlushcount := make(map[basics.CreatableIndex]int)
var committedRoundDigest crypto.Digest
if isCatchpointRound {
committedRoundDigest = au.roundDigest[offset+uint64(lookback)-1]
}
au.accountsMu.RUnlock()
// in committedUpTo, we expect that this function we close the catchpointWriting when
// it's on a catchpoint round and it's an archival ledger. Doing this in a defered function
// here would prevent us from "forgetting" to close that channel later on.
defer func() {
if isCatchpointRound && au.archivalLedger {
close(au.catchpointWriting)
}
}()
for i := uint64(0); i < offset; i++ {
for addr := range deltas[i] {
flushcount[addr] = flushcount[addr] + 1
}
for cidx := range creatableDeltas[i] {
creatableFlushcount[cidx] = creatableFlushcount[cidx] + 1
}
}
var catchpointLabel string
beforeUpdatingBalancesTime := time.Now()
var trieBalancesHash crypto.Digest
genesisProto := au.ledger.GenesisProto()
err := au.dbs.wdb.AtomicCommitWriteLock(func(ctx context.Context, tx *sql.Tx) (err error) {
treeTargetRound := basics.Round(0)
if au.catchpointInterval > 0 {
mc, err0 := makeMerkleCommitter(tx, false)
if err0 != nil {
return err0
}
if au.balancesTrie == nil {
trie, err := merkletrie.MakeTrie(mc, trieCachedNodesCount)
if err != nil {
au.log.Warnf("unable to create merkle trie during committedUpTo: %v", err)
return err
}
au.balancesTrie = trie
} else {
au.balancesTrie.SetCommitter(mc)
}
treeTargetRound = dbRound + basics.Round(offset)
}
for i := uint64(0); i < offset; i++ {
err = accountsNewRound(tx, deltas[i], creatableDeltas[i], genesisProto)
if err != nil {
return err
}
}
err = totalsNewRounds(tx, deltas[:offset], roundTotals[1:offset+1], protos[1:offset+1])
if err != nil {
return err
}
err = au.accountsUpdateBalances(deltas, offset)
if err != nil {
return err
}
err = updateAccountsRound(tx, dbRound+basics.Round(offset), treeTargetRound)
if err != nil {
return err
}
if isCatchpointRound {
trieBalancesHash, err = au.balancesTrie.RootHash()
if err != nil {
return
}
}
return nil
}, &au.accountsMu)
if err != nil {
au.balancesTrie = nil
au.log.Warnf("unable to advance account snapshot: %v", err)
return
}
if isCatchpointRound {
catchpointLabel, err = au.accountsCreateCatchpointLabel(dbRound+basics.Round(offset)+lookback, roundTotals[offset], committedRoundDigest, trieBalancesHash)
if err != nil {
au.log.Warnf("commitRound : unable to create a catchpoint label: %v", err)
}
}
if au.balancesTrie != nil {
_, err = au.balancesTrie.Evict(false)
if err != nil {
au.log.Warnf("merkle trie failed to evict: %v", err)
}
}
if isCatchpointRound && catchpointLabel != "" {
au.lastCatchpointLabel = catchpointLabel
}
updatingBalancesDuration := time.Now().Sub(beforeUpdatingBalancesTime)
// Drop reference counts to modified accounts, and evict them
// from in-memory cache when no references remain.
for addr, cnt := range flushcount {
macct, ok := au.accounts[addr]
if !ok {
au.log.Panicf("inconsistency: flushed %d changes to %s, but not in au.accounts", cnt, addr)
}
if cnt > macct.ndeltas {
au.log.Panicf("inconsistency: flushed %d changes to %s, but au.accounts had %d", cnt, addr, macct.ndeltas)
}
macct.ndeltas -= cnt
if macct.ndeltas == 0 {
delete(au.accounts, addr)
} else {
au.accounts[addr] = macct
}
}
for cidx, cnt := range creatableFlushcount {
mcreat, ok := au.creatables[cidx]
if !ok {
au.log.Panicf("inconsistency: flushed %d changes to creatable %d, but not in au.creatables", cnt, cidx)
}
if cnt > mcreat.ndeltas {
au.log.Panicf("inconsistency: flushed %d changes to creatable %d, but au.creatables had %d", cnt, cidx, mcreat.ndeltas)
}
mcreat.ndeltas -= cnt
if mcreat.ndeltas == 0 {
delete(au.creatables, cidx)
} else {
au.creatables[cidx] = mcreat
}
}
au.deltas = au.deltas[offset:]
au.deltasAccum = au.deltasAccum[offset:]
au.roundDigest = au.roundDigest[offset:]
au.protos = au.protos[offset:]
au.roundTotals = au.roundTotals[offset:]
au.creatableDeltas = au.creatableDeltas[offset:]
au.dbRound = newBase
au.lastFlushTime = flushTime
au.accountsMu.Unlock()
if isCatchpointRound && au.archivalLedger && catchpointLabel != "" {
// generate the catchpoint file. This need to be done inline so that it will block any new accounts that from being written.
// the generateCatchpoint expects that the accounts data would not be modified in the background during it's execution.
au.generateCatchpoint(basics.Round(offset)+dbRound+lookback, catchpointLabel, committedRoundDigest, updatingBalancesDuration)
}
}
// latest returns the latest round
func (au *accountUpdates) latest() basics.Round {
return au.dbRound + basics.Round(len(au.deltas))
}
// generateCatchpoint generates a single catchpoint file
func (au *accountUpdates) generateCatchpoint(committedRound basics.Round, label string, committedRoundDigest crypto.Digest, updatingBalancesDuration time.Duration) {
beforeGeneratingCatchpointTime := time.Now()
catchpointGenerationStats := telemetryspec.CatchpointGenerationEventDetails{
BalancesWriteTime: uint64(updatingBalancesDuration.Nanoseconds()),
}
// the retryCatchpointCreation is used to repeat the catchpoint file generation in case the node crashed / aborted during startup
// before the catchpoint file generation could be completed.
retryCatchpointCreation := false
au.log.Debugf("accountUpdates: generateCatchpoint: generating catchpoint for round %d", committedRound)
defer func() {
if !retryCatchpointCreation {
// clear the writingCatchpoint flag
_, err := au.accountsq.writeCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint, uint64(0))
if err != nil {
au.log.Warnf("accountUpdates: generateCatchpoint unable to clear catchpoint state '%s' for round %d: %v", catchpointStateWritingCatchpoint, committedRound, err)
}
}
}()
_, err := au.accountsq.writeCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint, uint64(committedRound))
if err != nil {
au.log.Warnf("accountUpdates: generateCatchpoint unable to write catchpoint state '%s' for round %d: %v", catchpointStateWritingCatchpoint, committedRound, err)
return
}
relCatchpointFileName := filepath.Join("catchpoints", catchpointRoundToPath(committedRound))
absCatchpointFileName := filepath.Join(au.dbDirectory, relCatchpointFileName)
more := true
const shortChunkExecutionDuration = 50 * time.Millisecond
const longChunkExecutionDuration = 1 * time.Second
var chunkExecutionDuration time.Duration
select {
case <-au.catchpointSlowWriting:
chunkExecutionDuration = longChunkExecutionDuration
default:
chunkExecutionDuration = shortChunkExecutionDuration
}
var catchpointWriter *catchpointWriter
err = au.dbs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
catchpointWriter = makeCatchpointWriter(au.ctx, absCatchpointFileName, tx, committedRound, committedRoundDigest, label)
for more {
stepCtx, stepCancelFunction := context.WithTimeout(au.ctx, chunkExecutionDuration)
writeStepStartTime := time.Now()
more, err = catchpointWriter.WriteStep(stepCtx)
// accumulate the actual time we've spent writing in this step.
catchpointGenerationStats.CPUTime += uint64(time.Now().Sub(writeStepStartTime).Nanoseconds())
stepCancelFunction()
if more && err == nil {
// we just wrote some data, but there is more to be written.
// go to sleep for while.
// before going to sleep, extend the transaction timeout so that we won't get warnings:
db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(1*time.Second))
select {
case <-time.After(100 * time.Millisecond):
// increase the time slot allocated for writing the catchpoint, but stop when we get to the longChunkExecutionDuration limit.
// this would allow the catchpoint writing speed to ramp up while still leaving some cpu available.
chunkExecutionDuration *= 2
if chunkExecutionDuration > longChunkExecutionDuration {
chunkExecutionDuration = longChunkExecutionDuration
}
case <-au.ctx.Done():
retryCatchpointCreation = true
err2 := catchpointWriter.Abort()
if err2 != nil {
return fmt.Errorf("error removing catchpoint file : %v", err2)
}
return nil
case <-au.catchpointSlowWriting:
chunkExecutionDuration = longChunkExecutionDuration
}
}
if err != nil {
err = fmt.Errorf("unable to create catchpoint : %v", err)
err2 := catchpointWriter.Abort()
if err2 != nil {
au.log.Warnf("accountUpdates: generateCatchpoint: error removing catchpoint file : %v", err2)
}
return
}
}
return
})
if err != nil {
au.log.Warnf("accountUpdates: generateCatchpoint: %v", err)
return
}
if catchpointWriter == nil {
au.log.Warnf("accountUpdates: generateCatchpoint: nil catchpointWriter")
return
}
err = au.saveCatchpointFile(committedRound, relCatchpointFileName, catchpointWriter.GetSize(), catchpointWriter.GetCatchpoint())
if err != nil {
au.log.Warnf("accountUpdates: generateCatchpoint: unable to save catchpoint: %v", err)
return
}
catchpointGenerationStats.FileSize = uint64(catchpointWriter.GetSize())
catchpointGenerationStats.WritingDuration = uint64(time.Now().Sub(beforeGeneratingCatchpointTime).Nanoseconds())
catchpointGenerationStats.AccountsCount = catchpointWriter.GetTotalAccounts()
catchpointGenerationStats.CatchpointLabel = catchpointWriter.GetCatchpoint()
au.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.CatchpointGenerationEvent, catchpointGenerationStats)
au.log.With("writingDuration", catchpointGenerationStats.WritingDuration).
With("CPUTime", catchpointGenerationStats.CPUTime).
With("balancesWriteTime", catchpointGenerationStats.BalancesWriteTime).
With("accountsCount", catchpointGenerationStats.AccountsCount).
With("fileSize", catchpointGenerationStats.FileSize).
With("catchpointLabel", catchpointGenerationStats.CatchpointLabel).
Infof("Catchpoint file was generated")
}
// catchpointRoundToPath calculate the catchpoint file path for a given round
func catchpointRoundToPath(rnd basics.Round) string {
irnd := int64(rnd) / 256
outStr := ""
for irnd > 0 {
outStr = filepath.Join(outStr, fmt.Sprintf("%02x", irnd%256))
irnd = irnd / 256
}
outStr = filepath.Join(outStr, strconv.FormatInt(int64(rnd), 10)+".catchpoint")
return outStr
}
// saveCatchpointFile stores the provided fileName as the stored catchpoint for the given round.
// after a successfull insert operation to the database, it would delete up to 2 old entries, as needed.
// deleting 2 entries while inserting single entry allow us to adjust the size of the backing storage and have the
// database and storage realign.
func (au *accountUpdates) saveCatchpointFile(round basics.Round, fileName string, fileSize int64, catchpoint string) (err error) {
if au.catchpointFileHistoryLength != 0 {
err = au.accountsq.storeCatchpoint(context.Background(), round, fileName, catchpoint, fileSize)
if err != nil {
au.log.Warnf("accountUpdates: saveCatchpoint: unable to save catchpoint: %v", err)
return
}
} else {
err = os.Remove(fileName)
if err != nil {
au.log.Warnf("accountUpdates: saveCatchpoint: unable to remove file (%s): %v", fileName, err)
return
}
}
if au.catchpointFileHistoryLength == -1 {
return
}
var filesToDelete map[basics.Round]string
filesToDelete, err = au.accountsq.getOldestCatchpointFiles(context.Background(), 2, au.catchpointFileHistoryLength)
if err != nil {
return fmt.Errorf("unable to delete catchpoint file, getOldestCatchpointFiles failed : %v", err)
}
for round, fileToDelete := range filesToDelete {
absCatchpointFileName := filepath.Join(au.dbDirectory, fileToDelete)
err = os.Remove(absCatchpointFileName)
if err == nil || os.IsNotExist(err) {
// it's ok if the file doesn't exist. just remove it from the database and we'll be good to go.
err = nil
} else {
// we can't delete the file, abort -
return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err)
}
err = au.accountsq.storeCatchpoint(context.Background(), round, "", "", 0)
if err != nil {
return fmt.Errorf("unable to delete old catchpoint entry '%s' : %v", fileToDelete, err)
}
}
return
}
// the vacuumDatabase performs a full vacuum of the accounts database.
func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) {
if !au.vacuumOnStartup {
return
}
startTime := time.Now()
vacuumExitCh := make(chan struct{}, 1)
vacuumLoggingAbort := sync.WaitGroup{}
vacuumLoggingAbort.Add(1)
// vacuuming the database can take a while. A long while. We want to have a logging function running in a separate go-routine that would log the progress to the log file.
// also, when we're done vacuuming, we should sent an event notifying of the total time it took to vacuum the database.
go func() {
defer vacuumLoggingAbort.Done()
au.log.Infof("Vacuuming accounts database started")
for {
select {
case <-time.After(5 * time.Second):
au.log.Infof("Vacuuming accounts database in progress")
case <-vacuumExitCh:
return
}
}
}()
vacuumStats, err := au.dbs.wdb.Vacuum(ctx)
close(vacuumExitCh)
vacuumLoggingAbort.Wait()
if err != nil {
au.log.Warnf("Vacuuming account database failed : %v", err)
return err
}
vacuumElapsedTime := time.Now().Sub(startTime)
au.log.Infof("Vacuuming accounts database completed within %v, reducing number of pages from %d to %d and size from %d to %d", vacuumElapsedTime, vacuumStats.PagesBefore, vacuumStats.PagesAfter, vacuumStats.SizeBefore, vacuumStats.SizeAfter)
vacuumTelemetryStats := telemetryspec.BalancesAccountVacuumEventDetails{
VacuumTimeNanoseconds: vacuumElapsedTime.Nanoseconds(),
BeforeVacuumPageCount: vacuumStats.PagesBefore,
AfterVacuumPageCount: vacuumStats.PagesAfter,
BeforeVacuumSpaceBytes: vacuumStats.SizeBefore,
AfterVacuumSpaceBytes: vacuumStats.SizeAfter,
}
au.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.BalancesAccountVacuumEvent, vacuumTelemetryStats)
return
}
| 1 | 40,185 | rename Round -> requestedRound and DbRound -> dbRound. we don't need to export the fields here, only the error struct. | algorand-go-algorand | go |
@@ -1052,6 +1052,18 @@ func TestClusterDeploymentReconcile(t *testing.T) {
}
},
},
+ {
+ name: "Add cluster region label",
+ existing: []runtime.Object{
+ testClusterDeploymentWithoutRegionLabel(),
+ },
+ validate: func(c client.Client, t *testing.T) {
+ cd := getCD(c)
+ if assert.NotNil(t, cd, "missing clusterdeployment") {
+ assert.Equal(t, getClusterRegion(cd), cd.Labels[hivev1.HiveClusterRegionLabel], "incorrect cluster region label")
+ }
+ },
+ },
{
name: "Ensure cluster metadata set from provision",
existing: []runtime.Object{ | 1 | package clusterdeployment
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/golang/mock/gomock"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
openshiftapiv1 "github.com/openshift/api/config/v1"
routev1 "github.com/openshift/api/route/v1"
"github.com/openshift/hive/pkg/apis"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
hivev1aws "github.com/openshift/hive/pkg/apis/hive/v1/aws"
"github.com/openshift/hive/pkg/apis/hive/v1/baremetal"
"github.com/openshift/hive/pkg/constants"
controllerutils "github.com/openshift/hive/pkg/controller/utils"
"github.com/openshift/hive/pkg/remoteclient"
remoteclientmock "github.com/openshift/hive/pkg/remoteclient/mock"
)
const (
testName = "foo-lqmsh"
testClusterName = "bar"
testClusterID = "testFooClusterUUID"
testInfraID = "testFooInfraID"
provisionName = "foo-lqmsh-random"
imageSetJobName = "foo-lqmsh-imageset"
testNamespace = "default"
testSyncsetInstanceName = "testSSI"
metadataName = "foo-lqmsh-metadata"
pullSecretSecret = "pull-secret"
globalPullSecret = "global-pull-secret"
adminKubeconfigSecret = "foo-lqmsh-admin-kubeconfig"
adminKubeconfig = `clusters:
- cluster:
certificate-authority-data: JUNK
server: https://bar-api.clusters.example.com:6443
name: bar
`
adminPasswordSecret = "foo-lqmsh-admin-password"
remoteClusterRouteObjectName = "console"
remoteClusterRouteObjectNamespace = "openshift-console"
testClusterImageSetName = "test-image-set"
)
func init() {
log.SetLevel(log.DebugLevel)
}
func TestClusterDeploymentReconcile(t *testing.T) {
apis.AddToScheme(scheme.Scheme)
openshiftapiv1.Install(scheme.Scheme)
routev1.Install(scheme.Scheme)
// Utility function to get the test CD from the fake client
getCD := func(c client.Client) *hivev1.ClusterDeployment {
cd := &hivev1.ClusterDeployment{}
err := c.Get(context.TODO(), client.ObjectKey{Name: testName, Namespace: testNamespace}, cd)
if err == nil {
return cd
}
return nil
}
getDNSZone := func(c client.Client) *hivev1.DNSZone {
zone := &hivev1.DNSZone{}
err := c.Get(context.TODO(), client.ObjectKey{Name: testName + "-zone", Namespace: testNamespace}, zone)
if err == nil {
return zone
}
return nil
}
getDeprovision := func(c client.Client) *hivev1.ClusterDeprovision {
req := &hivev1.ClusterDeprovision{}
err := c.Get(context.TODO(), client.ObjectKey{Name: testName, Namespace: testNamespace}, req)
if err == nil {
return req
}
return nil
}
getImageSetJob := func(c client.Client) *batchv1.Job {
return getJob(c, imageSetJobName)
}
tests := []struct {
name string
existing []runtime.Object
pendingCreation bool
expectErr bool
expectedRequeueAfter time.Duration
expectPendingCreation bool
expectConsoleRouteFetch bool
validate func(client.Client, *testing.T)
}{
{
name: "Add finalizer",
existing: []runtime.Object{
testClusterDeploymentWithoutFinalizer(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if cd == nil || !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
t.Errorf("did not get expected clusterdeployment finalizer")
}
},
},
{
name: "Create provision",
existing: []runtime.Object{
testClusterDeployment(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectPendingCreation: true,
validate: func(c client.Client, t *testing.T) {
provisions := getProvisions(c)
assert.Len(t, provisions, 1, "expected provision to exist")
pvc := &corev1.PersistentVolumeClaim{}
err := c.Get(context.TODO(), client.ObjectKey{Name: testInstallLogPVC().Name, Namespace: testNamespace}, pvc)
assert.NoError(t, err)
assert.Equal(t, testClusterDeployment().Name, pvc.Labels[constants.ClusterDeploymentNameLabel], "incorrect cluster deployment name label")
assert.Equal(t, constants.PVCTypeInstallLogs, pvc.Labels[constants.PVCTypeLabel], "incorrect pvc type label")
},
},
{
name: "Provision not created when pending create",
existing: []runtime.Object{
testClusterDeployment(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
pendingCreation: true,
expectPendingCreation: true,
validate: func(c client.Client, t *testing.T) {
provisions := getProvisions(c)
assert.Empty(t, provisions, "expected provision to not exist")
},
},
{
name: "Adopt provision",
existing: []runtime.Object{
testClusterDeployment(),
testProvision(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "no clusterdeployment found") {
if assert.NotNil(t, cd.Status.ProvisionRef, "missing provision ref") {
assert.Equal(t, provisionName, cd.Status.ProvisionRef.Name, "unexpected provision ref name")
}
}
},
},
{
name: "No-op Running provision",
existing: []runtime.Object{
testClusterDeploymentWithProvision(),
testProvision(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "no clusterdeployment found") {
if e, a := testClusterDeploymentWithProvision(), cd; !assert.True(t, apiequality.Semantic.DeepEqual(e, a), "unexpected change in clusterdeployment") {
t.Logf("diff = %s", diff.ObjectReflectDiff(e, a))
}
}
provisions := getProvisions(c)
if assert.Len(t, provisions, 1, "expected provision to exist") {
if e, a := testProvision(), provisions[0]; !assert.True(t, apiequality.Semantic.DeepEqual(e, a), "unexpected change in provision") {
t.Logf("diff = %s", diff.ObjectReflectDiff(e, a))
}
}
},
},
{
name: "Parse server URL from admin kubeconfig",
existing: []runtime.Object{
testClusterDeploymentWithProvision(),
testSuccessfulProvision(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testMetadataConfigMap(),
},
expectConsoleRouteFetch: true,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
assert.Equal(t, "https://bar-api.clusters.example.com:6443", cd.Status.APIURL)
assert.Equal(t, "https://bar-api.clusters.example.com:6443/console", cd.Status.WebConsoleURL)
},
},
{
name: "Parse server URL from admin kubeconfig for adopted cluster",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.Installed = true
cd.Spec.ClusterMetadata = &hivev1.ClusterMetadata{
InfraID: "fakeinfra",
AdminKubeconfigSecretRef: corev1.LocalObjectReference{Name: adminKubeconfigSecret},
}
return cd
}(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testMetadataConfigMap(),
},
expectConsoleRouteFetch: true,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
assert.Equal(t, "https://bar-api.clusters.example.com:6443", cd.Status.APIURL)
assert.Equal(t, "https://bar-api.clusters.example.com:6443/console", cd.Status.WebConsoleURL)
},
},
{
name: "Add additional CAs to admin kubeconfig",
existing: []runtime.Object{
testClusterDeploymentWithProvision(),
testSuccessfulProvision(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testMetadataConfigMap(),
},
expectConsoleRouteFetch: true,
validate: func(c client.Client, t *testing.T) {
// Ensure the admin kubeconfig secret got a copy of the raw data, indicating that we would have
// added additional CAs if any were configured.
akcSecret := &corev1.Secret{}
err := c.Get(context.TODO(), client.ObjectKey{Name: adminKubeconfigSecret, Namespace: testNamespace},
akcSecret)
require.NoError(t, err)
require.NotNil(t, akcSecret)
assert.Contains(t, akcSecret.Data, rawAdminKubeconfigKey)
},
},
{
name: "Add additional CAs to admin kubeconfig when status URLs set",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.Installed = true
cd.Spec.ClusterMetadata = &hivev1.ClusterMetadata{
InfraID: "fakeinfra",
AdminKubeconfigSecretRef: corev1.LocalObjectReference{Name: adminKubeconfigSecret},
}
cd.Status.WebConsoleURL = "https://example.com"
cd.Status.APIURL = "https://example.com"
return cd
}(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testMetadataConfigMap(),
},
expectConsoleRouteFetch: false,
validate: func(c client.Client, t *testing.T) {
// Ensure the admin kubeconfig secret got a copy of the raw data, indicating that we would have
// added additional CAs if any were configured.
akcSecret := &corev1.Secret{}
err := c.Get(context.TODO(), client.ObjectKey{Name: adminKubeconfigSecret, Namespace: testNamespace},
akcSecret)
require.NoError(t, err)
require.NotNil(t, akcSecret)
assert.Contains(t, akcSecret.Data, rawAdminKubeconfigKey)
},
},
{
name: "Completed provision",
existing: []runtime.Object{
testClusterDeploymentWithProvision(),
testSuccessfulProvision(),
testMetadataConfigMap(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectConsoleRouteFetch: true,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
assert.True(t, cd.Spec.Installed, "expected cluster to be installed")
}
},
},
{
name: "PVC cleanup for successful install",
existing: []runtime.Object{
testInstalledClusterDeployment(time.Now()),
testInstallLogPVC(),
testMetadataConfigMap(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
pvc := &corev1.PersistentVolumeClaim{}
err := c.Get(context.TODO(), client.ObjectKey{Name: GetInstallLogsPVCName(testClusterDeployment()), Namespace: testNamespace}, pvc)
if assert.Error(t, err) {
assert.True(t, errors.IsNotFound(err))
}
},
},
{
name: "PVC preserved for install with restarts",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testInstalledClusterDeployment(time.Now())
cd.Status.InstallRestarts = 5
return cd
}(),
testInstallLogPVC(),
testMetadataConfigMap(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
pvc := &corev1.PersistentVolumeClaim{}
err := c.Get(context.TODO(), client.ObjectKey{Name: testInstallLogPVC().Name, Namespace: testNamespace}, pvc)
assert.NoError(t, err)
},
},
{
name: "PVC cleanup for install with restarts after 7 days",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testInstalledClusterDeployment(time.Now().Add(-8 * 24 * time.Hour))
cd.Status.InstallRestarts = 5
return cd
}(),
testInstallLogPVC(),
testMetadataConfigMap(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
pvc := &corev1.PersistentVolumeClaim{}
err := c.Get(context.TODO(), client.ObjectKey{Name: GetInstallLogsPVCName(testClusterDeployment()), Namespace: testNamespace}, pvc)
if assert.Error(t, err) {
assert.True(t, errors.IsNotFound(err))
}
},
},
{
name: "clusterdeployment must specify pull secret when there is no global pull secret ",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.PullSecretRef = nil
return cd
}(),
},
expectErr: true,
},
{
name: "Legacy dockercfg pull secret causes no errors once installed",
existing: []runtime.Object{
testInstalledClusterDeployment(time.Date(2019, 9, 6, 11, 58, 32, 45, time.UTC)),
testMetadataConfigMap(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
},
{
name: "No-op deleted cluster without finalizer",
existing: []runtime.Object{
testDeletedClusterDeploymentWithoutFinalizer(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
deprovision := getDeprovision(c)
if deprovision != nil {
t.Errorf("got unexpected deprovision request")
}
},
},
{
name: "Skip deprovision for deleted BareMetal cluster",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.Platform.AWS = nil
cd.Spec.Platform.BareMetal = &baremetal.Platform{}
cd.Labels[hivev1.HiveClusterPlatformLabel] = "baremetal"
now := metav1.Now()
cd.DeletionTimestamp = &now
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
deprovision := getDeprovision(c)
assert.Nil(t, deprovision, "expected no deprovision request")
cd := getCD(c)
assert.Equal(t, 0, len(cd.Finalizers))
},
},
{
name: "Delete expired cluster deployment",
existing: []runtime.Object{
testExpiredClusterDeployment(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if cd != nil {
t.Errorf("got unexpected cluster deployment (expected deleted)")
}
},
},
{
name: "Test PreserveOnDelete",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testDeletedClusterDeployment()
cd.Spec.Installed = true
cd.Spec.PreserveOnDelete = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
assert.Empty(t, cd.Finalizers, "expected empty finalizers")
}
deprovision := getDeprovision(c)
assert.Nil(t, deprovision, "expected no deprovision request")
},
},
{
name: "Test creation of uninstall job when PreserveOnDelete is true but cluster deployment is not installed",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testDeletedClusterDeployment()
cd.Spec.PreserveOnDelete = true
cd.Spec.Installed = false
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
deprovision := getDeprovision(c)
require.NotNil(t, deprovision, "expected deprovision request")
assert.Equal(t, testClusterDeployment().Name, deprovision.Labels[constants.ClusterDeploymentNameLabel], "incorrect cluster deployment name label")
},
},
{
name: "Create job to resolve installer image",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Status.InstallerImage = nil
cd.Spec.Provisioning.ImageSetRef = &hivev1.ClusterImageSetReference{Name: testClusterImageSetName}
cd.Status.ClusterVersionStatus.AvailableUpdates = []openshiftapiv1.Update{}
return cd
}(),
testClusterImageSet(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
job := getImageSetJob(c)
if job == nil {
t.Errorf("did not find expected imageset job")
}
// Ensure that the release image from the imageset is used in the job
envVars := job.Spec.Template.Spec.Containers[0].Env
for _, e := range envVars {
if e.Name == "RELEASE_IMAGE" {
if e.Value != testClusterImageSet().Spec.ReleaseImage {
t.Errorf("unexpected release image used in job: %s", e.Value)
}
break
}
}
// Ensure job type labels are set correctly
require.NotNil(t, job, "expected job")
assert.Equal(t, testClusterDeployment().Name, job.Labels[constants.ClusterDeploymentNameLabel], "incorrect cluster deployment name label")
assert.Equal(t, constants.JobTypeImageSet, job.Labels[constants.JobTypeLabel], "incorrect job type label")
},
},
{
name: "Delete imageset job when complete",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Status.InstallerImage = pointer.StringPtr("test-installer-image")
cd.Status.CLIImage = pointer.StringPtr("test-cli-image")
cd.Spec.Provisioning.ImageSetRef = &hivev1.ClusterImageSetReference{Name: testClusterImageSetName}
cd.Status.ClusterVersionStatus.AvailableUpdates = []openshiftapiv1.Update{}
return cd
}(),
testClusterImageSet(),
testCompletedImageSetJob(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectPendingCreation: true,
validate: func(c client.Client, t *testing.T) {
job := getImageSetJob(c)
assert.Nil(t, job, "expected imageset job to be deleted")
},
},
{
name: "Ensure release image from clusterdeployment (when present) is used to generate imageset job",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Status.InstallerImage = nil
cd.Spec.Provisioning.ReleaseImage = "embedded-release-image:latest"
cd.Spec.Provisioning.ImageSetRef = &hivev1.ClusterImageSetReference{Name: testClusterImageSetName}
cd.Status.ClusterVersionStatus.AvailableUpdates = []openshiftapiv1.Update{}
return cd
}(),
testClusterImageSet(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
job := getImageSetJob(c)
if job == nil {
t.Errorf("did not find expected imageset job")
}
envVars := job.Spec.Template.Spec.Containers[0].Env
for _, e := range envVars {
if e.Name == "RELEASE_IMAGE" {
if e.Value != "embedded-release-image:latest" {
t.Errorf("unexpected release image used in job: %s", e.Value)
}
break
}
}
},
},
{
name: "Ensure release image from clusterimageset is used as override image in install job",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Status.InstallerImage = pointer.StringPtr("test-installer-image:latest")
cd.Spec.Provisioning.ImageSetRef = &hivev1.ClusterImageSetReference{Name: testClusterImageSetName}
return cd
}(),
func() *hivev1.ClusterImageSet {
cis := testClusterImageSet()
cis.Spec.ReleaseImage = "test-release-image:latest"
return cis
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectPendingCreation: true,
validate: func(c client.Client, t *testing.T) {
provisions := getProvisions(c)
if assert.Len(t, provisions, 1, "expected provision to exist") {
env := provisions[0].Spec.PodSpec.Containers[0].Env
variable := corev1.EnvVar{}
found := false
for _, e := range env {
if e.Name == "OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE" {
variable = e
found = true
break
}
}
if !found {
t.Errorf("did not find expected override environment variable in job")
return
}
if variable.Value != "test-release-image:latest" {
t.Errorf("environment variable did not have the expected value. actual: %s", variable.Value)
}
}
},
},
{
name: "Create DNSZone when manageDNS is true",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
zone := getDNSZone(c)
require.NotNil(t, zone, "dns zone should exist")
assert.Equal(t, testClusterDeployment().Name, zone.Labels[constants.ClusterDeploymentNameLabel], "incorrect cluster deployment name label")
assert.Equal(t, constants.DNSZoneTypeChild, zone.Labels[constants.DNSZoneTypeLabel], "incorrect dnszone type label")
},
},
{
name: "Wait when DNSZone is not available yet",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testDNSZone(),
},
validate: func(c client.Client, t *testing.T) {
provisions := getProvisions(c)
assert.Empty(t, provisions, "provision should not exist")
},
},
{
name: "Set condition when DNSZone is not available yet",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testDNSZone(),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
assertConditionStatus(t, cd, hivev1.DNSNotReadyCondition, corev1.ConditionTrue)
},
},
{
name: "Clear condition when DNSZone is available",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
cd.Status.Conditions = append(cd.Status.Conditions, hivev1.ClusterDeploymentCondition{
Type: hivev1.DNSNotReadyCondition,
Status: corev1.ConditionTrue,
})
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testAvailableDNSZone(),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
assertConditionStatus(t, cd, hivev1.DNSNotReadyCondition, corev1.ConditionFalse)
},
},
{
name: "Do not use unowned DNSZone",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
func() *hivev1.DNSZone {
zone := testDNSZone()
zone.OwnerReferences = []metav1.OwnerReference{}
return zone
}(),
},
expectErr: true,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
cond := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.DNSNotReadyCondition)
if assert.NotNil(t, cond, "expected to find condition") {
assert.Equal(t, corev1.ConditionTrue, cond.Status, "unexpected condition status")
assert.Equal(t, "Existing DNS zone not owned by cluster deployment", cond.Message, "unexpected condition message")
}
}
zone := getDNSZone(c)
assert.NotNil(t, zone, "expected DNSZone to exist")
},
},
{
name: "Do not use DNSZone owned by other clusterdeployment",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
func() *hivev1.DNSZone {
zone := testDNSZone()
zone.OwnerReferences[0].UID = "other-uid"
return zone
}(),
},
expectErr: true,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
cond := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.DNSNotReadyCondition)
if assert.NotNil(t, cond, "expected to find condition") {
assert.Equal(t, corev1.ConditionTrue, cond.Status, "unexpected condition status")
assert.Equal(t, "Existing DNS zone not owned by cluster deployment", cond.Message, "unexpected condition message")
}
}
zone := getDNSZone(c)
assert.NotNil(t, zone, "expected DNSZone to exist")
},
},
{
name: "Create provision when DNSZone is ready",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
cd.Annotations = map[string]string{dnsReadyAnnotation: "NOW"}
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testAvailableDNSZone(),
},
expectPendingCreation: true,
validate: func(c client.Client, t *testing.T) {
provisions := getProvisions(c)
assert.Len(t, provisions, 1, "expected provision to exist")
},
},
{
name: "Set DNS delay metric",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testAvailableDNSZone(),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
assert.NotNil(t, cd.Annotations, "annotations should be set on clusterdeployment")
assert.Contains(t, cd.Annotations, dnsReadyAnnotation)
},
},
{
name: "Ensure managed DNSZone is deleted with cluster deployment",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testDeletedClusterDeployment()
cd.Spec.ManageDNS = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testDNSZone(),
},
validate: func(c client.Client, t *testing.T) {
dnsZone := getDNSZone(c)
assert.Nil(t, dnsZone, "dnsZone should not exist")
},
},
{
name: "Delete cluster deployment with missing clusterimageset",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testDeletedClusterDeployment()
cd.Spec.Provisioning.ImageSetRef = &hivev1.ClusterImageSetReference{Name: testClusterImageSetName}
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
deprovision := getDeprovision(c)
assert.NotNil(t, deprovision, "expected deprovision request to be created")
},
},
{
name: "Delete old provisions",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeployment()
cd.Status.InstallRestarts = 4
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testFailedProvisionAttempt(0),
testFailedProvisionAttempt(1),
testFailedProvisionAttempt(2),
testFailedProvisionAttempt(3),
},
expectPendingCreation: true,
validate: func(c client.Client, t *testing.T) {
actualAttempts := []int{}
for _, p := range getProvisions(c) {
actualAttempts = append(actualAttempts, p.Spec.Attempt)
}
expectedAttempts := []int{0, 2, 3, 4}
assert.ElementsMatch(t, expectedAttempts, actualAttempts, "unexpected provisions kept")
},
},
{
name: "Adopt provision",
existing: []runtime.Object{
testClusterDeployment(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testProvision(),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing cluster deployment") {
if assert.NotNil(t, cd.Status.ProvisionRef, "provision reference not set") {
assert.Equal(t, provisionName, cd.Status.ProvisionRef.Name, "unexpected provision referenced")
}
}
},
},
{
name: "Do not adopt failed provision",
existing: []runtime.Object{
testClusterDeployment(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testFailedProvisionAttempt(0),
},
expectPendingCreation: true,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing cluster deployment") {
assert.Nil(t, cd.Status.ProvisionRef, "expected provision reference to not be set")
}
},
},
{
name: "Delete-after requeue",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeploymentWithProvision()
cd.CreationTimestamp = metav1.Now()
cd.Annotations[deleteAfterAnnotation] = "8h"
return cd
}(),
testProvision(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectedRequeueAfter: 8*time.Hour + 60*time.Second,
},
{
name: "Wait after failed provision",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeploymentWithProvision()
cd.CreationTimestamp = metav1.Now()
cd.Annotations[deleteAfterAnnotation] = "8h"
return cd
}(),
testFailedProvisionTime(time.Now()),
testMetadataConfigMap(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectedRequeueAfter: 1 * time.Minute,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
if assert.NotNil(t, cd.Status.ProvisionRef, "missing provision ref") {
assert.Equal(t, provisionName, cd.Status.ProvisionRef.Name, "unexpected provision ref name")
}
}
},
},
{
name: "Clear out provision after wait time",
existing: []runtime.Object{
testClusterDeploymentWithProvision(),
testFailedProvisionTime(time.Now().Add(-2 * time.Minute)),
testMetadataConfigMap(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
assert.Nil(t, cd.Status.ProvisionRef, "expected empty provision ref")
assert.Equal(t, 1, cd.Status.InstallRestarts, "expected incremented install restart count")
}
},
},
{
name: "Delete outstanding provision on delete",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeploymentWithProvision()
now := metav1.Now()
cd.DeletionTimestamp = &now
return cd
}(),
testProvision(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectedRequeueAfter: defaultRequeueTime,
validate: func(c client.Client, t *testing.T) {
provisions := getProvisions(c)
assert.Empty(t, provisions, "expected provision to be deleted")
deprovision := getDeprovision(c)
assert.Nil(t, deprovision, "expect not to create deprovision request until provision removed")
},
},
{
name: "Remove finalizer after early-failure provision removed",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeploymentWithProvision()
now := metav1.Now()
cd.DeletionTimestamp = &now
cd.Spec.ClusterMetadata = nil
return cd
}(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
assert.Empty(t, cd.Finalizers, "expected empty finalizers")
}
},
},
{
name: "Create deprovision after late-failure provision removed",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeploymentWithProvision()
now := metav1.Now()
cd.DeletionTimestamp = &now
return cd
}(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
assert.Contains(t, cd.Finalizers, hivev1.FinalizerDeprovision, "expected hive finalizer")
}
deprovision := getDeprovision(c)
assert.NotNil(t, deprovision, "missing deprovision request")
},
},
{
name: "setSyncSetFailedCondition should be present",
existing: []runtime.Object{
testInstalledClusterDeployment(time.Now()),
createSyncSetInstanceObj(hivev1.ApplyFailureSyncCondition),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
cond := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.SyncSetFailedCondition)
if assert.NotNil(t, cond, "missing SyncSetFailedCondition status condition") {
assert.Equal(t, corev1.ConditionTrue, cond.Status, "did not get expected state for SyncSetFailedCondition condition")
}
}
},
},
{
name: "setSyncSetFailedCondition value should be corev1.ConditionFalse",
existing: []runtime.Object{
func() runtime.Object {
cd := testInstalledClusterDeployment(time.Now())
cd.Status.Conditions = append(
cd.Status.Conditions,
hivev1.ClusterDeploymentCondition{
Type: hivev1.SyncSetFailedCondition,
Status: corev1.ConditionTrue,
},
)
return cd
}(),
createSyncSetInstanceObj(hivev1.ApplySuccessSyncCondition),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
cond := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.SyncSetFailedCondition)
if assert.NotNil(t, cond, "missing SyncSetFailedCondition status condition") {
assert.Equal(t, corev1.ConditionFalse, cond.Status, "did not get expected state for SyncSetFailedCondition condition")
}
},
},
{
name: "Add cluster platform label",
existing: []runtime.Object{
testClusterDeploymentWithoutPlatformLabel(),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
assert.Equal(t, getClusterPlatform(cd), cd.Labels[hivev1.HiveClusterPlatformLabel], "incorrect cluster platform label")
}
},
},
{
name: "Ensure cluster metadata set from provision",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeploymentWithProvision()
cd.Spec.ClusterMetadata = nil
return cd
}(),
testSuccessfulProvision(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
if assert.NotNil(t, cd.Spec.ClusterMetadata, "expected cluster metadata to be set") {
assert.Equal(t, testInfraID, cd.Spec.ClusterMetadata.InfraID, "unexpected infra ID")
assert.Equal(t, testClusterID, cd.Spec.ClusterMetadata.ClusterID, "unexpected cluster ID")
assert.Equal(t, adminKubeconfigSecret, cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name, "unexpected admin kubeconfig")
assert.Equal(t, adminPasswordSecret, cd.Spec.ClusterMetadata.AdminPasswordSecretRef.Name, "unexpected admin password")
}
}
},
},
{
name: "Ensure cluster metadata overwrites from provision",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeploymentWithProvision()
cd.Spec.ClusterMetadata = &hivev1.ClusterMetadata{
InfraID: "old-infra-id",
ClusterID: "old-cluster-id",
AdminKubeconfigSecretRef: corev1.LocalObjectReference{Name: "old-kubeconfig-secret"},
AdminPasswordSecretRef: corev1.LocalObjectReference{Name: "old-password-secret"},
}
return cd
}(),
testSuccessfulProvision(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
if assert.NotNil(t, cd.Spec.ClusterMetadata, "expected cluster metadata to be set") {
assert.Equal(t, testInfraID, cd.Spec.ClusterMetadata.InfraID, "unexpected infra ID")
assert.Equal(t, testClusterID, cd.Spec.ClusterMetadata.ClusterID, "unexpected cluster ID")
assert.Equal(t, adminKubeconfigSecret, cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name, "unexpected admin kubeconfig")
assert.Equal(t, adminPasswordSecret, cd.Spec.ClusterMetadata.AdminPasswordSecretRef.Name, "unexpected admin password")
}
}
},
},
{
name: "set ClusterImageSet missing condition",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.Provisioning.ImageSetRef = &hivev1.ClusterImageSetReference{Name: "doesntexist"}
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectErr: true,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
require.Equal(t, 1, len(cd.Status.Conditions))
require.Equal(t, clusterImageSetNotFoundReason, cd.Status.Conditions[0].Reason)
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
logger := log.WithField("controller", "clusterDeployment")
fakeClient := fake.NewFakeClient(test.existing...)
controllerExpectations := controllerutils.NewExpectations(logger)
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockRemoteClientBuilder := remoteclientmock.NewMockBuilder(mockCtrl)
rcd := &ReconcileClusterDeployment{
Client: fakeClient,
scheme: scheme.Scheme,
logger: logger,
expectations: controllerExpectations,
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder },
}
reconcileRequest := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: testName,
Namespace: testNamespace,
},
}
if test.pendingCreation {
controllerExpectations.ExpectCreations(reconcileRequest.String(), 1)
}
if test.expectConsoleRouteFetch {
mockRemoteClientBuilder.EXPECT().APIURL().Return("https://bar-api.clusters.example.com:6443", nil)
mockRemoteClientBuilder.EXPECT().Build().Return(testRemoteClusterAPIClient(), nil)
}
result, err := rcd.Reconcile(reconcileRequest)
if test.validate != nil {
test.validate(fakeClient, t)
}
if err != nil && !test.expectErr {
t.Errorf("Unexpected error: %v", err)
}
if err == nil && test.expectErr {
t.Errorf("Expected error but got none")
}
if test.expectedRequeueAfter == 0 {
assert.Zero(t, result.RequeueAfter, "expected empty requeue after")
} else {
assert.InDelta(t, test.expectedRequeueAfter, result.RequeueAfter, float64(10*time.Second), "unexpected requeue after")
}
actualPendingCreation := !controllerExpectations.SatisfiedExpectations(reconcileRequest.String())
assert.Equal(t, test.expectPendingCreation, actualPendingCreation, "unexpected pending creation")
})
}
}
func TestClusterDeploymentReconcileResults(t *testing.T) {
apis.AddToScheme(scheme.Scheme)
tests := []struct {
name string
existing []runtime.Object
exptectedReconcileResult reconcile.Result
}{
{
name: "Requeue after adding finalizer",
existing: []runtime.Object{
testClusterDeploymentWithoutFinalizer(),
},
exptectedReconcileResult: reconcile.Result{},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
logger := log.WithField("controller", "clusterDeployment")
fakeClient := fake.NewFakeClient(test.existing...)
controllerExpectations := controllerutils.NewExpectations(logger)
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockRemoteClientBuilder := remoteclientmock.NewMockBuilder(mockCtrl)
rcd := &ReconcileClusterDeployment{
Client: fakeClient,
scheme: scheme.Scheme,
logger: logger,
expectations: controllerExpectations,
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder },
}
reconcileResult, err := rcd.Reconcile(reconcile.Request{
NamespacedName: types.NamespacedName{
Name: testName,
Namespace: testNamespace,
},
})
assert.NoError(t, err, "unexpected error")
assert.Equal(t, test.exptectedReconcileResult, reconcileResult, "unexpected reconcile result")
})
}
}
func TestCalculateNextProvisionTime(t *testing.T) {
cases := []struct {
name string
failureTime time.Time
attempt int
expectedNextTime time.Time
}{
{
name: "first attempt",
failureTime: time.Date(2019, time.July, 16, 0, 0, 0, 0, time.UTC),
attempt: 0,
expectedNextTime: time.Date(2019, time.July, 16, 0, 1, 0, 0, time.UTC),
},
{
name: "second attempt",
failureTime: time.Date(2019, time.July, 16, 0, 0, 0, 0, time.UTC),
attempt: 1,
expectedNextTime: time.Date(2019, time.July, 16, 0, 2, 0, 0, time.UTC),
},
{
name: "third attempt",
failureTime: time.Date(2019, time.July, 16, 0, 0, 0, 0, time.UTC),
attempt: 2,
expectedNextTime: time.Date(2019, time.July, 16, 0, 4, 0, 0, time.UTC),
},
{
name: "eleventh attempt",
failureTime: time.Date(2019, time.July, 16, 0, 0, 0, 0, time.UTC),
attempt: 10,
expectedNextTime: time.Date(2019, time.July, 16, 17, 4, 0, 0, time.UTC),
},
{
name: "twelfth attempt",
failureTime: time.Date(2019, time.July, 16, 0, 0, 0, 0, time.UTC),
attempt: 11,
expectedNextTime: time.Date(2019, time.July, 17, 0, 0, 0, 0, time.UTC),
},
{
name: "thirteenth attempt",
failureTime: time.Date(2019, time.July, 16, 0, 0, 0, 0, time.UTC),
attempt: 12,
expectedNextTime: time.Date(2019, time.July, 17, 0, 0, 0, 0, time.UTC),
},
{
name: "millionth attempt",
failureTime: time.Date(2019, time.July, 16, 0, 0, 0, 0, time.UTC),
attempt: 999999,
expectedNextTime: time.Date(2019, time.July, 17, 0, 0, 0, 0, time.UTC),
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
actualNextTime := calculateNextProvisionTime(tc.failureTime, tc.attempt, log.WithField("controller", "clusterDeployment"))
assert.Equal(t, tc.expectedNextTime.String(), actualNextTime.String(), "unexpected next provision time")
})
}
}
func TestDeleteStaleProvisions(t *testing.T) {
apis.AddToScheme(scheme.Scheme)
cases := []struct {
name string
existingAttempts []int
expectedAttempts []int
}{
{
name: "none",
},
{
name: "one",
existingAttempts: []int{0},
expectedAttempts: []int{0},
},
{
name: "three",
existingAttempts: []int{0, 1, 2},
expectedAttempts: []int{0, 1, 2},
},
{
name: "four",
existingAttempts: []int{0, 1, 2, 3},
expectedAttempts: []int{0, 2, 3},
},
{
name: "five",
existingAttempts: []int{0, 1, 2, 3, 4},
expectedAttempts: []int{0, 3, 4},
},
{
name: "five mixed order",
existingAttempts: []int{10, 3, 7, 8, 1},
expectedAttempts: []int{1, 8, 10},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
provisions := make([]runtime.Object, len(tc.existingAttempts))
for i, a := range tc.existingAttempts {
provisions[i] = testFailedProvisionAttempt(a)
}
fakeClient := fake.NewFakeClient(provisions...)
rcd := &ReconcileClusterDeployment{
Client: fakeClient,
scheme: scheme.Scheme,
}
rcd.deleteStaleProvisions(getProvisions(fakeClient), log.WithField("test", "TestDeleteStaleProvisions"))
actualAttempts := []int{}
for _, p := range getProvisions(fakeClient) {
actualAttempts = append(actualAttempts, p.Spec.Attempt)
}
assert.ElementsMatch(t, tc.expectedAttempts, actualAttempts, "unexpected provisions kept")
})
}
}
func testEmptyClusterDeployment() *hivev1.ClusterDeployment {
cd := &hivev1.ClusterDeployment{
ObjectMeta: metav1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
Finalizers: []string{hivev1.FinalizerDeprovision},
UID: types.UID("1234"),
Annotations: map[string]string{},
Labels: map[string]string{},
},
}
return cd
}
func testClusterDeployment() *hivev1.ClusterDeployment {
cd := testEmptyClusterDeployment()
cd.Spec = hivev1.ClusterDeploymentSpec{
ClusterName: testClusterName,
PullSecretRef: &corev1.LocalObjectReference{
Name: pullSecretSecret,
},
Platform: hivev1.Platform{
AWS: &hivev1aws.Platform{
CredentialsSecretRef: corev1.LocalObjectReference{
Name: "aws-credentials",
},
Region: "us-east-1",
},
},
Provisioning: &hivev1.Provisioning{
InstallConfigSecretRef: corev1.LocalObjectReference{Name: "install-config-secret"},
},
ClusterMetadata: &hivev1.ClusterMetadata{
ClusterID: testClusterID,
InfraID: testInfraID,
AdminKubeconfigSecretRef: corev1.LocalObjectReference{Name: adminKubeconfigSecret},
AdminPasswordSecretRef: corev1.LocalObjectReference{Name: adminPasswordSecret},
},
}
cd.Labels[hivev1.HiveClusterPlatformLabel] = "aws"
cd.Status = hivev1.ClusterDeploymentStatus{
InstallerImage: pointer.StringPtr("installer-image:latest"),
CLIImage: pointer.StringPtr("cli:latest"),
}
return cd
}
func testInstalledClusterDeployment(installedAt time.Time) *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.Installed = true
cd.Status.InstalledTimestamp = &metav1.Time{Time: installedAt}
cd.Status.APIURL = "http://quite.fake.com"
cd.Status.WebConsoleURL = "http://quite.fake.com/console"
return cd
}
func testClusterDeploymentWithoutFinalizer() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Finalizers = []string{}
return cd
}
func testClusterDeploymentWithoutPlatformLabel() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
delete(cd.Labels, hivev1.HiveClusterPlatformLabel)
return cd
}
func testDeletedClusterDeployment() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
now := metav1.Now()
cd.DeletionTimestamp = &now
return cd
}
func testDeletedClusterDeploymentWithoutFinalizer() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
now := metav1.Now()
cd.DeletionTimestamp = &now
cd.Finalizers = []string{}
return cd
}
func testExpiredClusterDeployment() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.CreationTimestamp = metav1.Time{Time: metav1.Now().Add(-60 * time.Minute)}
cd.Annotations[deleteAfterAnnotation] = "5m"
return cd
}
func testClusterDeploymentWithProvision() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Status.ProvisionRef = &corev1.LocalObjectReference{Name: provisionName}
return cd
}
func testProvision() *hivev1.ClusterProvision {
cd := testClusterDeployment()
provision := &hivev1.ClusterProvision{
ObjectMeta: metav1.ObjectMeta{
Name: provisionName,
Namespace: testNamespace,
Labels: map[string]string{
constants.ClusterDeploymentNameLabel: testName,
},
},
Spec: hivev1.ClusterProvisionSpec{
ClusterDeploymentRef: corev1.LocalObjectReference{
Name: testName,
},
Stage: hivev1.ClusterProvisionStageInitializing,
},
}
controllerutil.SetControllerReference(cd, provision, scheme.Scheme)
return provision
}
func testSuccessfulProvision() *hivev1.ClusterProvision {
provision := testProvision()
provision.Spec.Stage = hivev1.ClusterProvisionStageComplete
provision.Spec.ClusterID = pointer.StringPtr(testClusterID)
provision.Spec.InfraID = pointer.StringPtr(testInfraID)
provision.Spec.AdminKubeconfigSecretRef = &corev1.LocalObjectReference{Name: adminKubeconfigSecret}
provision.Spec.AdminPasswordSecretRef = &corev1.LocalObjectReference{Name: adminPasswordSecret}
return provision
}
func testFailedProvisionAttempt(attempt int) *hivev1.ClusterProvision {
provision := testProvision()
provision.Name = fmt.Sprintf("%s-%02d", provision.Name, attempt)
provision.Spec.Attempt = attempt
provision.Spec.Stage = hivev1.ClusterProvisionStageFailed
return provision
}
func testFailedProvisionTime(time time.Time) *hivev1.ClusterProvision {
provision := testProvision()
provision.Spec.Stage = hivev1.ClusterProvisionStageFailed
provision.Status.Conditions = []hivev1.ClusterProvisionCondition{
{
Type: hivev1.ClusterProvisionFailedCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: metav1.NewTime(time),
},
}
return provision
}
func testInstallLogPVC() *corev1.PersistentVolumeClaim {
pvc := &corev1.PersistentVolumeClaim{}
pvc.Name = GetInstallLogsPVCName(testClusterDeployment())
pvc.Namespace = testNamespace
return pvc
}
func testMetadataConfigMap() *corev1.ConfigMap {
cm := &corev1.ConfigMap{}
cm.Name = metadataName
cm.Namespace = testNamespace
metadataJSON := `{
"aws": {
"identifier": [{"openshiftClusterID": "testFooClusterUUID"}]
}
}`
cm.Data = map[string]string{"metadata.json": metadataJSON}
return cm
}
func testSecret(secretType corev1.SecretType, name, key, value string) *corev1.Secret {
s := &corev1.Secret{
Type: secretType,
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: testNamespace,
},
Data: map[string][]byte{
key: []byte(value),
},
}
return s
}
func testRemoteClusterAPIClient() client.Client {
remoteClusterRouteObject := &routev1.Route{
ObjectMeta: metav1.ObjectMeta{
Name: remoteClusterRouteObjectName,
Namespace: remoteClusterRouteObjectNamespace,
},
}
remoteClusterRouteObject.Spec.Host = "bar-api.clusters.example.com:6443/console"
return fake.NewFakeClient(remoteClusterRouteObject)
}
func testClusterImageSet() *hivev1.ClusterImageSet {
cis := &hivev1.ClusterImageSet{}
cis.Name = testClusterImageSetName
cis.Spec.ReleaseImage = "test-release-image:latest"
return cis
}
func testDNSZone() *hivev1.DNSZone {
zone := &hivev1.DNSZone{}
zone.Name = testName + "-zone"
zone.Namespace = testNamespace
zone.OwnerReferences = append(
zone.OwnerReferences,
*metav1.NewControllerRef(
testClusterDeployment(),
schema.GroupVersionKind{
Group: "hive.openshift.io",
Version: "v1",
Kind: "clusterdeployment",
},
),
)
return zone
}
func testAvailableDNSZone() *hivev1.DNSZone {
zone := testDNSZone()
zone.Status.Conditions = []hivev1.DNSZoneCondition{
{
Type: hivev1.ZoneAvailableDNSZoneCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: metav1.Time{
Time: time.Now(),
},
},
}
return zone
}
func assertConditionStatus(t *testing.T, cd *hivev1.ClusterDeployment, condType hivev1.ClusterDeploymentConditionType, status corev1.ConditionStatus) {
found := false
for _, cond := range cd.Status.Conditions {
if cond.Type == condType {
found = true
assert.Equal(t, string(status), string(cond.Status), "condition found with unexpected status")
}
}
assert.True(t, found, "did not find expected condition type: %v", condType)
}
func getJob(c client.Client, name string) *batchv1.Job {
job := &batchv1.Job{}
err := c.Get(context.TODO(), client.ObjectKey{Name: name, Namespace: testNamespace}, job)
if err == nil {
return job
}
return nil
}
func TestUpdatePullSecretInfo(t *testing.T) {
apis.AddToScheme(scheme.Scheme)
testPullSecret1 := `{"auths": {"registry.svc.ci.okd.org": {"auth": "dXNljlfjldsfSDD"}}}`
tests := []struct {
name string
existingCD []runtime.Object
validate func(*testing.T, *corev1.Secret)
}{
{
name: "update existing merged pull secret with the new pull secret",
existingCD: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef = corev1.LocalObjectReference{Name: adminKubeconfigSecret}
return cd
}(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockercfg, pullSecretSecret, corev1.DockerConfigJsonKey, testPullSecret1),
testSecret(corev1.SecretTypeDockercfg, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(t *testing.T, pullSecretObj *corev1.Secret) {
pullSecret, ok := pullSecretObj.Data[corev1.DockerConfigJsonKey]
if !ok {
t.Error("Error getting pull secret")
}
assert.Equal(t, string(pullSecret), testPullSecret1)
},
},
{
name: "Add a new merged pull secret",
existingCD: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef = corev1.LocalObjectReference{Name: adminKubeconfigSecret}
return cd
}(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockercfg, pullSecretSecret, corev1.DockerConfigJsonKey, testPullSecret1),
},
validate: func(t *testing.T, pullSecretObj *corev1.Secret) {
assert.Equal(t, testClusterDeployment().Name, pullSecretObj.Labels[constants.ClusterDeploymentNameLabel], "incorrect cluster deployment name label")
assert.Equal(t, constants.SecretTypeMergedPullSecret, pullSecretObj.Labels[constants.SecretTypeLabel], "incorrect secret type label")
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeClient := fake.NewFakeClient(test.existingCD...)
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockRemoteClientBuilder := remoteclientmock.NewMockBuilder(mockCtrl)
rcd := &ReconcileClusterDeployment{
Client: fakeClient,
scheme: scheme.Scheme,
logger: log.WithField("controller", "clusterDeployment"),
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder },
}
_, err := rcd.Reconcile(reconcile.Request{
NamespacedName: types.NamespacedName{
Name: testName,
Namespace: testNamespace,
},
})
assert.NoError(t, err, "unexpected error")
cd := getCDFromClient(rcd.Client)
mergedSecretName := constants.GetMergedPullSecretName(cd)
existingPullSecretObj := &corev1.Secret{}
err = rcd.Get(context.TODO(), types.NamespacedName{Name: mergedSecretName, Namespace: cd.Namespace}, existingPullSecretObj)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if test.validate != nil {
test.validate(t, existingPullSecretObj)
}
})
}
}
func getCDWithoutPullSecret() *hivev1.ClusterDeployment {
cd := testEmptyClusterDeployment()
cd.Spec = hivev1.ClusterDeploymentSpec{
ClusterName: testClusterName,
Platform: hivev1.Platform{
AWS: &hivev1aws.Platform{
CredentialsSecretRef: corev1.LocalObjectReference{
Name: "aws-credentials",
},
Region: "us-east-1",
},
},
ClusterMetadata: &hivev1.ClusterMetadata{
ClusterID: testClusterID,
InfraID: testInfraID,
AdminKubeconfigSecretRef: corev1.LocalObjectReference{Name: adminKubeconfigSecret},
},
}
cd.Status = hivev1.ClusterDeploymentStatus{
InstallerImage: pointer.StringPtr("installer-image:latest"),
}
return cd
}
func getCDFromClient(c client.Client) *hivev1.ClusterDeployment {
cd := &hivev1.ClusterDeployment{}
err := c.Get(context.TODO(), client.ObjectKey{Name: testName, Namespace: testNamespace}, cd)
if err == nil {
return cd
}
return nil
}
func createGlobalPullSecretObj(secretType corev1.SecretType, name, key, value string) *corev1.Secret {
secret := &corev1.Secret{
Type: secretType,
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: constants.HiveNamespace,
},
Data: map[string][]byte{
key: []byte(value),
},
}
return secret
}
func TestMergePullSecrets(t *testing.T) {
apis.AddToScheme(scheme.Scheme)
tests := []struct {
name string
localPullSecret string
globalPullSecret string
mergedPullSecret string
existingObjs []runtime.Object
expectedErr bool
addGlobalSecretToHiveNs bool
}{
{
name: "merged pull secret should be be equal to local secret",
localPullSecret: `{"auths": {"registry.svc.ci.okd.org": {"auth": "dXNljlfjldsfSDD"}}}`,
mergedPullSecret: `{"auths": {"registry.svc.ci.okd.org": {"auth": "dXNljlfjldsfSDD"}}}`,
existingObjs: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := getCDWithoutPullSecret()
cd.Spec.PullSecretRef = &corev1.LocalObjectReference{
Name: pullSecretSecret,
}
return cd
}(),
},
},
{
name: "merged pull secret should be be equal to global pull secret",
globalPullSecret: `{"auths": {"registry.svc.ci.okd.org": {"auth": "dXNljlfjldsfSDD"}}}`,
mergedPullSecret: `{"auths": {"registry.svc.ci.okd.org": {"auth": "dXNljlfjldsfSDD"}}}`,
existingObjs: []runtime.Object{
getCDWithoutPullSecret(),
},
addGlobalSecretToHiveNs: true,
},
{
name: "Both local secret and global pull secret available",
localPullSecret: `{"auths": {"registry.svc.ci.okd.org": {"auth": "dXNljlfjldsfSDD"}}}`,
globalPullSecret: `{"auths":{"cloud.okd.com":{"auth":"b34xVjWERckjfUyV1pMQTc=","email":"[email protected]"}}}`,
mergedPullSecret: `{"auths":{"cloud.okd.com":{"auth":"b34xVjWERckjfUyV1pMQTc=","email":"[email protected]"},"registry.svc.ci.okd.org":{"auth":"dXNljlfjldsfSDD"}}}`,
existingObjs: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := getCDWithoutPullSecret()
cd.Spec.PullSecretRef = &corev1.LocalObjectReference{
Name: pullSecretSecret,
}
return cd
}(),
},
addGlobalSecretToHiveNs: true,
},
{
name: "global pull secret does not exist in Hive namespace",
globalPullSecret: `{"auths": {"registry.svc.ci.okd.org": {"auth": "dXNljlfjldsfSDD"}}}`,
existingObjs: []runtime.Object{
getCDWithoutPullSecret(),
},
addGlobalSecretToHiveNs: false,
expectedErr: true,
},
{
name: "Test should fail as local an global pull secret is not available",
existingObjs: []runtime.Object{
getCDWithoutPullSecret(),
},
expectedErr: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if test.globalPullSecret != "" && test.addGlobalSecretToHiveNs == true {
globalPullSecretObj := createGlobalPullSecretObj(corev1.SecretTypeDockerConfigJson, globalPullSecret, corev1.DockerConfigJsonKey, test.globalPullSecret)
test.existingObjs = append(test.existingObjs, globalPullSecretObj)
}
if test.localPullSecret != "" {
localSecretObject := testSecret(corev1.SecretTypeDockercfg, pullSecretSecret, corev1.DockerConfigJsonKey, test.localPullSecret)
test.existingObjs = append(test.existingObjs, localSecretObject)
}
fakeClient := fake.NewFakeClient(test.existingObjs...)
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockRemoteClientBuilder := remoteclientmock.NewMockBuilder(mockCtrl)
rcd := &ReconcileClusterDeployment{
Client: fakeClient,
scheme: scheme.Scheme,
logger: log.WithField("controller", "clusterDeployment"),
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder },
}
cd := getCDFromClient(rcd.Client)
if test.globalPullSecret != "" {
os.Setenv(constants.GlobalPullSecret, globalPullSecret)
}
defer os.Unsetenv(constants.GlobalPullSecret)
expetedPullSecret, err := rcd.mergePullSecrets(cd, rcd.logger)
if test.expectedErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
if test.mergedPullSecret != "" {
assert.Equal(t, test.mergedPullSecret, expetedPullSecret)
}
})
}
}
func getProvisions(c client.Client) []*hivev1.ClusterProvision {
provisionList := &hivev1.ClusterProvisionList{}
if err := c.List(context.TODO(), provisionList); err != nil {
return nil
}
provisions := make([]*hivev1.ClusterProvision, len(provisionList.Items))
for i := range provisionList.Items {
provisions[i] = &provisionList.Items[i]
}
return provisions
}
func createSyncSetInstanceObj(syncCondType hivev1.SyncConditionType) *hivev1.SyncSetInstance {
ssi := &hivev1.SyncSetInstance{
ObjectMeta: metav1.ObjectMeta{
Name: testSyncsetInstanceName,
Namespace: testNamespace,
},
}
ssi.Spec.ClusterDeploymentRef.Name = testName
ssi.Status = createSyncSetInstanceStatus(syncCondType)
return ssi
}
func createSyncSetInstanceStatus(syncCondType hivev1.SyncConditionType) hivev1.SyncSetInstanceStatus {
conditionTime := metav1.NewTime(time.Now())
var ssiStatus corev1.ConditionStatus
var condType hivev1.SyncConditionType
if syncCondType == hivev1.ApplyFailureSyncCondition {
ssiStatus = corev1.ConditionTrue
condType = syncCondType
} else {
ssiStatus = corev1.ConditionFalse
condType = syncCondType
}
status := hivev1.SyncSetInstanceStatus{
Conditions: []hivev1.SyncCondition{
{
Type: condType,
Status: ssiStatus,
LastTransitionTime: conditionTime,
LastProbeTime: conditionTime,
},
},
}
return status
}
func testCompletedImageSetJob() *batchv1.Job {
return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: imageSetJobName,
Namespace: testNamespace,
},
Status: batchv1.JobStatus{
Conditions: []batchv1.JobCondition{{
Type: batchv1.JobComplete,
Status: corev1.ConditionTrue,
}},
},
}
}
| 1 | 10,974 | This expected value should probably be what you literally expect, otherwise there's a chance getClusterRegion is doing something wrong and the test wouldn't catch it because it's being run for both expected and actual. | openshift-hive | go |
@@ -27,6 +27,7 @@ package encryption
import (
"crypto/tls"
"crypto/x509"
+ "encoding/base64"
"errors"
"fmt"
"io/ioutil" | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package encryption
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"sync"
"go.temporal.io/server/common/service/config"
)
var _ CertProvider = (*localStoreCertProvider)(nil)
type localStoreCertProvider struct {
sync.RWMutex
tlsSettings *config.GroupTLS
serverCert *tls.Certificate
clientCAs *x509.CertPool
serverCAs *x509.CertPool
}
func (s *localStoreCertProvider) GetSettings() *config.GroupTLS {
return s.tlsSettings
}
func (s *localStoreCertProvider) FetchServerCertificate() (*tls.Certificate, error) {
if s.tlsSettings.Server.CertFile == "" {
return nil, nil
}
// Check under a read lock first
s.RLock()
if s.serverCert != nil {
defer s.RUnlock()
return s.serverCert, nil
}
// Not found, manually unlock read lock and move to write lock
s.RUnlock()
s.Lock()
defer s.Unlock()
// Get serverCert from disk
serverCert, err := tls.LoadX509KeyPair(s.tlsSettings.Server.CertFile, s.tlsSettings.Server.KeyFile)
if err != nil {
return nil, fmt.Errorf("loading server tls certificate failed: %v", err)
}
s.serverCert = &serverCert
return s.serverCert, nil
}
func (s *localStoreCertProvider) FetchClientCAs() (*x509.CertPool, error) {
if s.tlsSettings.Server.ClientCAFiles == nil {
return nil, nil
}
// Check under a read lock first
s.RLock()
if s.clientCAs != nil {
defer s.RUnlock()
return s.clientCAs, nil
}
// Not found, manually unlock read lock and move to write lock
s.RUnlock()
s.Lock()
defer s.Unlock()
var clientCaPool *x509.CertPool
if len(s.tlsSettings.Server.ClientCAFiles) > 0 {
var err error
clientCaPool, err = buildCAPool(s.tlsSettings.Server.ClientCAFiles)
if err != nil {
return nil, err
}
}
s.clientCAs = clientCaPool
return s.clientCAs, nil
}
func (s *localStoreCertProvider) FetchServerRootCAsForClient() (*x509.CertPool, error) {
if s.tlsSettings.Client.RootCAFiles == nil {
return nil, nil
}
// Check under a read lock first
s.RLock()
if s.serverCAs != nil {
defer s.RUnlock()
return s.clientCAs, nil
}
// Not found, manually unlock read lock and move to write lock
s.RUnlock()
s.Lock()
defer s.Unlock()
var serverCAPool *x509.CertPool
if len(s.tlsSettings.Client.RootCAFiles) > 0 {
var err error
serverCAPool, err = buildCAPool(s.tlsSettings.Client.RootCAFiles)
if err != nil {
return nil, err
}
}
s.serverCAs = serverCAPool
return s.serverCAs, nil
}
func buildCAPool(caFiles []string) (*x509.CertPool, error) {
caPool := x509.NewCertPool()
for _, ca := range caFiles {
caBytes, err := ioutil.ReadFile(ca)
if err != nil {
return nil, fmt.Errorf("failed reading client ca cert: %v", err)
}
if !caPool.AppendCertsFromPEM(caBytes) {
return nil, errors.New("unknown failure constructing cert pool for ca")
}
}
return caPool, nil
}
| 1 | 10,488 | TODO: update unit tests to exercise base64-inline path | temporalio-temporal | go |
@@ -551,6 +551,16 @@ public class UserPreferences {
restartUpdateAlarm(false);
}
+ public static boolean shouldShowOnboarding(String location) {
+ String key = "onboarding_" + location;
+ if (prefs.getBoolean(key, true)) {
+ prefs.edit().putBoolean(key, false).apply();
+ return true;
+ } else {
+ return false;
+ }
+ }
+
/**
* Change the auto-flattr settings
* | 1 | package de.danoeh.antennapod.core.preferences;
import android.app.AlarmManager;
import android.app.PendingIntent;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.os.SystemClock;
import android.preference.PreferenceManager;
import android.support.annotation.IntRange;
import android.support.annotation.NonNull;
import android.support.v4.app.NotificationCompat;
import android.text.TextUtils;
import android.util.Log;
import org.json.JSONArray;
import org.json.JSONException;
import java.io.File;
import java.io.IOException;
import java.net.Proxy;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.List;
import java.util.concurrent.TimeUnit;
import de.danoeh.antennapod.core.R;
import de.danoeh.antennapod.core.receiver.FeedUpdateReceiver;
import de.danoeh.antennapod.core.service.download.ProxyConfig;
import de.danoeh.antennapod.core.storage.APCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.APNullCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.APQueueCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.EpisodeCleanupAlgorithm;
import de.danoeh.antennapod.core.util.Converter;
/**
* Provides access to preferences set by the user in the settings screen. A
* private instance of this class must first be instantiated via
* init() or otherwise every public method will throw an Exception
* when called.
*/
public class UserPreferences {
private static final String IMPORT_DIR = "import/";
private static final String TAG = "UserPreferences";
// User Interface
public static final String PREF_THEME = "prefTheme";
public static final String PREF_HIDDEN_DRAWER_ITEMS = "prefHiddenDrawerItems";
private static final String PREF_DRAWER_FEED_ORDER = "prefDrawerFeedOrder";
private static final String PREF_DRAWER_FEED_COUNTER = "prefDrawerFeedIndicator";
private static final String PREF_EXPANDED_NOTIFICATION = "prefExpandNotify";
private static final String PREF_PERSISTENT_NOTIFICATION = "prefPersistNotify";
public static final String PREF_COMPACT_NOTIFICATION_BUTTONS = "prefCompactNotificationButtons";
public static final String PREF_LOCKSCREEN_BACKGROUND = "prefLockscreenBackground";
private static final String PREF_SHOW_DOWNLOAD_REPORT = "prefShowDownloadReport";
// Queue
private static final String PREF_QUEUE_ADD_TO_FRONT = "prefQueueAddToFront";
// Playback
public static final String PREF_PAUSE_ON_HEADSET_DISCONNECT = "prefPauseOnHeadsetDisconnect";
public static final String PREF_UNPAUSE_ON_HEADSET_RECONNECT = "prefUnpauseOnHeadsetReconnect";
private static final String PREF_UNPAUSE_ON_BLUETOOTH_RECONNECT = "prefUnpauseOnBluetoothReconnect";
private static final String PREF_HARDWARE_FOWARD_BUTTON_SKIPS = "prefHardwareForwardButtonSkips";
private static final String PREF_HARDWARE_PREVIOUS_BUTTON_RESTARTS = "prefHardwarePreviousButtonRestarts";
public static final String PREF_FOLLOW_QUEUE = "prefFollowQueue";
private static final String PREF_SKIP_KEEPS_EPISODE = "prefSkipKeepsEpisode";
private static final String PREF_FAVORITE_KEEPS_EPISODE = "prefFavoriteKeepsEpisode";
private static final String PREF_AUTO_DELETE = "prefAutoDelete";
public static final String PREF_SMART_MARK_AS_PLAYED_SECS = "prefSmartMarkAsPlayedSecs";
private static final String PREF_PLAYBACK_SPEED_ARRAY = "prefPlaybackSpeedArray";
private static final String PREF_PAUSE_PLAYBACK_FOR_FOCUS_LOSS = "prefPauseForFocusLoss";
private static final String PREF_RESUME_AFTER_CALL = "prefResumeAfterCall";
public static final String PREF_VIDEO_BEHAVIOR = "prefVideoBehavior";
// Network
private static final String PREF_ENQUEUE_DOWNLOADED = "prefEnqueueDownloaded";
public static final String PREF_UPDATE_INTERVAL = "prefAutoUpdateIntervall";
private static final String PREF_MOBILE_UPDATE = "prefMobileUpdate";
public static final String PREF_EPISODE_CLEANUP = "prefEpisodeCleanup";
public static final String PREF_PARALLEL_DOWNLOADS = "prefParallelDownloads";
public static final String PREF_EPISODE_CACHE_SIZE = "prefEpisodeCacheSize";
public static final String PREF_ENABLE_AUTODL = "prefEnableAutoDl";
public static final String PREF_ENABLE_AUTODL_ON_BATTERY = "prefEnableAutoDownloadOnBattery";
public static final String PREF_ENABLE_AUTODL_WIFI_FILTER = "prefEnableAutoDownloadWifiFilter";
public static final String PREF_ENABLE_AUTODL_ON_MOBILE = "prefEnableAutoDownloadOnMobile";
private static final String PREF_AUTODL_SELECTED_NETWORKS = "prefAutodownloadSelectedNetworks";
private static final String PREF_PROXY_TYPE = "prefProxyType";
private static final String PREF_PROXY_HOST = "prefProxyHost";
private static final String PREF_PROXY_PORT = "prefProxyPort";
private static final String PREF_PROXY_USER = "prefProxyUser";
private static final String PREF_PROXY_PASSWORD = "prefProxyPassword";
// Services
private static final String PREF_AUTO_FLATTR = "pref_auto_flattr";
private static final String PREF_AUTO_FLATTR_PLAYED_DURATION_THRESHOLD = "prefAutoFlattrPlayedDurationThreshold";
private static final String PREF_GPODNET_NOTIFICATIONS = "pref_gpodnet_notifications";
// Other
private static final String PREF_DATA_FOLDER = "prefDataFolder";
public static final String PREF_IMAGE_CACHE_SIZE = "prefImageCacheSize";
// Mediaplayer
private static final String PREF_PLAYBACK_SPEED = "prefPlaybackSpeed";
private static final String PREF_FAST_FORWARD_SECS = "prefFastForwardSecs";
private static final String PREF_REWIND_SECS = "prefRewindSecs";
private static final String PREF_QUEUE_LOCKED = "prefQueueLocked";
private static final String IMAGE_CACHE_DEFAULT_VALUE = "100";
private static final int IMAGE_CACHE_SIZE_MINIMUM = 20;
private static final String PREF_LEFT_VOLUME = "prefLeftVolume";
private static final String PREF_RIGHT_VOLUME = "prefRightVolume";
// Experimental
public static final String PREF_SONIC = "prefSonic";
private static final String PREF_STEREO_TO_MONO = "PrefStereoToMono";
public static final String PREF_NORMALIZER = "prefNormalizer";
public static final String PREF_CAST_ENABLED = "prefCast"; //Used for enabling Chromecast support
public static final int EPISODE_CLEANUP_QUEUE = -1;
public static final int EPISODE_CLEANUP_NULL = -2;
public static final int EPISODE_CLEANUP_DEFAULT = 0;
// Constants
private static final int NOTIFICATION_BUTTON_REWIND = 0;
private static final int NOTIFICATION_BUTTON_FAST_FORWARD = 1;
private static final int NOTIFICATION_BUTTON_SKIP = 2;
private static final int EPISODE_CACHE_SIZE_UNLIMITED = -1;
public static final int FEED_ORDER_COUNTER = 0;
public static final int FEED_ORDER_ALPHABETICAL = 1;
public static final int FEED_ORDER_LAST_UPDATE = 2;
public static final int FEED_ORDER_MOST_PLAYED = 3;
public static final int FEED_COUNTER_SHOW_NEW_UNPLAYED_SUM = 0;
public static final int FEED_COUNTER_SHOW_NEW = 1;
public static final int FEED_COUNTER_SHOW_UNPLAYED = 2;
public static final int FEED_COUNTER_SHOW_NONE = 3;
public static final int FEED_COUNTER_SHOW_DOWNLOADED = 4;
private static Context context;
private static SharedPreferences prefs;
/**
* Sets up the UserPreferences class.
*
* @throws IllegalArgumentException if context is null
*/
public static void init(@NonNull Context context) {
Log.d(TAG, "Creating new instance of UserPreferences");
UserPreferences.context = context.getApplicationContext();
UserPreferences.prefs = PreferenceManager.getDefaultSharedPreferences(context);
createImportDirectory();
createNoMediaFile();
}
/**
* Returns theme as R.style value
*
* @return R.style.Theme_AntennaPod_Light or R.style.Theme_AntennaPod_Dark
*/
public static int getTheme() {
return readThemeValue(prefs.getString(PREF_THEME, "0"));
}
public static int getNoTitleTheme() {
int theme = getTheme();
if (theme == R.style.Theme_AntennaPod_Dark) {
return R.style.Theme_AntennaPod_Dark_NoTitle;
} else {
return R.style.Theme_AntennaPod_Light_NoTitle;
}
}
public static List<String> getHiddenDrawerItems() {
String hiddenItems = prefs.getString(PREF_HIDDEN_DRAWER_ITEMS, "");
return new ArrayList<>(Arrays.asList(TextUtils.split(hiddenItems, ",")));
}
public static List<Integer> getCompactNotificationButtons() {
String[] buttons = TextUtils.split(
prefs.getString(PREF_COMPACT_NOTIFICATION_BUTTONS,
String.valueOf(NOTIFICATION_BUTTON_SKIP)),
",");
List<Integer> notificationButtons = new ArrayList<>();
for (String button : buttons) {
notificationButtons.add(Integer.parseInt(button));
}
return notificationButtons;
}
/**
* Helper function to return whether the specified button should be shown on compact
* notifications.
*
* @param buttonId Either NOTIFICATION_BUTTON_REWIND, NOTIFICATION_BUTTON_FAST_FORWARD or
* NOTIFICATION_BUTTON_SKIP.
* @return {@code true} if button should be shown, {@code false} otherwise
*/
private static boolean showButtonOnCompactNotification(int buttonId) {
return getCompactNotificationButtons().contains(buttonId);
}
public static boolean showRewindOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_REWIND);
}
public static boolean showFastForwardOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_FAST_FORWARD);
}
public static boolean showSkipOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_SKIP);
}
public static int getFeedOrder() {
String value = prefs.getString(PREF_DRAWER_FEED_ORDER, "0");
return Integer.parseInt(value);
}
public static int getFeedCounterSetting() {
String value = prefs.getString(PREF_DRAWER_FEED_COUNTER, "0");
return Integer.parseInt(value);
}
/**
* Returns notification priority.
*
* @return NotificationCompat.PRIORITY_MAX or NotificationCompat.PRIORITY_DEFAULT
*/
public static int getNotifyPriority() {
if (prefs.getBoolean(PREF_EXPANDED_NOTIFICATION, false)) {
return NotificationCompat.PRIORITY_MAX;
} else {
return NotificationCompat.PRIORITY_DEFAULT;
}
}
/**
* Returns true if notifications are persistent
*
* @return {@code true} if notifications are persistent, {@code false} otherwise
*/
public static boolean isPersistNotify() {
return prefs.getBoolean(PREF_PERSISTENT_NOTIFICATION, true);
}
/**
* Returns true if the lockscreen background should be set to the current episode's image
*
* @return {@code true} if the lockscreen background should be set, {@code false} otherwise
*/
public static boolean setLockscreenBackground() {
return prefs.getBoolean(PREF_LOCKSCREEN_BACKGROUND, true);
}
/**
* Returns true if download reports are shown
*
* @return {@code true} if download reports are shown, {@code false} otherwise
*/
public static boolean showDownloadReport() {
return prefs.getBoolean(PREF_SHOW_DOWNLOAD_REPORT, true);
}
public static boolean enqueueDownloadedEpisodes() {
return prefs.getBoolean(PREF_ENQUEUE_DOWNLOADED, true);
}
public static boolean enqueueAtFront() {
return prefs.getBoolean(PREF_QUEUE_ADD_TO_FRONT, false);
}
public static boolean isPauseOnHeadsetDisconnect() {
return prefs.getBoolean(PREF_PAUSE_ON_HEADSET_DISCONNECT, true);
}
public static boolean isUnpauseOnHeadsetReconnect() {
return prefs.getBoolean(PREF_UNPAUSE_ON_HEADSET_RECONNECT, true);
}
public static boolean isUnpauseOnBluetoothReconnect() {
return prefs.getBoolean(PREF_UNPAUSE_ON_BLUETOOTH_RECONNECT, false);
}
public static boolean shouldHardwareButtonSkip() {
return prefs.getBoolean(PREF_HARDWARE_FOWARD_BUTTON_SKIPS, false);
}
public static boolean shouldHardwarePreviousButtonRestart() {
return prefs.getBoolean(PREF_HARDWARE_PREVIOUS_BUTTON_RESTARTS, false);
}
public static boolean isFollowQueue() {
return prefs.getBoolean(PREF_FOLLOW_QUEUE, true);
}
public static boolean shouldSkipKeepEpisode() { return prefs.getBoolean(PREF_SKIP_KEEPS_EPISODE, true); }
public static boolean shouldFavoriteKeepEpisode() {
return prefs.getBoolean(PREF_FAVORITE_KEEPS_EPISODE, true);
}
public static boolean isAutoDelete() {
return prefs.getBoolean(PREF_AUTO_DELETE, false);
}
public static int getSmartMarkAsPlayedSecs() {
return Integer.parseInt(prefs.getString(PREF_SMART_MARK_AS_PLAYED_SECS, "30"));
}
public static boolean isAutoFlattr() {
return prefs.getBoolean(PREF_AUTO_FLATTR, false);
}
public static String getPlaybackSpeed() {
return prefs.getString(PREF_PLAYBACK_SPEED, "1.00");
}
public static String[] getPlaybackSpeedArray() {
return readPlaybackSpeedArray(prefs.getString(PREF_PLAYBACK_SPEED_ARRAY, null));
}
public static float getLeftVolume() {
int volume = prefs.getInt(PREF_LEFT_VOLUME, 100);
return Converter.getVolumeFromPercentage(volume);
}
public static float getRightVolume() {
int volume = prefs.getInt(PREF_RIGHT_VOLUME, 100);
return Converter.getVolumeFromPercentage(volume);
}
public static int getLeftVolumePercentage() {
return prefs.getInt(PREF_LEFT_VOLUME, 100);
}
public static int getRightVolumePercentage() {
return prefs.getInt(PREF_RIGHT_VOLUME, 100);
}
public static boolean shouldPauseForFocusLoss() {
return prefs.getBoolean(PREF_PAUSE_PLAYBACK_FOR_FOCUS_LOSS, false);
}
/*
* Returns update interval in milliseconds; value 0 means that auto update is disabled
* or feeds are updated at a certain time of day
*/
public static long getUpdateInterval() {
String updateInterval = prefs.getString(PREF_UPDATE_INTERVAL, "0");
if(!updateInterval.contains(":")) {
return readUpdateInterval(updateInterval);
} else {
return 0;
}
}
public static int[] getUpdateTimeOfDay() {
String datetime = prefs.getString(PREF_UPDATE_INTERVAL, "");
if(datetime.length() >= 3 && datetime.contains(":")) {
String[] parts = datetime.split(":");
int hourOfDay = Integer.parseInt(parts[0]);
int minute = Integer.parseInt(parts[1]);
return new int[] { hourOfDay, minute };
} else {
return new int[0];
}
}
public static boolean isAllowMobileUpdate() {
return prefs.getBoolean(PREF_MOBILE_UPDATE, false);
}
public static int getParallelDownloads() {
return Integer.parseInt(prefs.getString(PREF_PARALLEL_DOWNLOADS, "4"));
}
public static int getEpisodeCacheSizeUnlimited() {
return context.getResources().getInteger(R.integer.episode_cache_size_unlimited);
}
/**
* Returns the capacity of the episode cache. This method will return the
* negative integer EPISODE_CACHE_SIZE_UNLIMITED if the cache size is set to
* 'unlimited'.
*/
public static int getEpisodeCacheSize() {
return readEpisodeCacheSizeInternal(prefs.getString(PREF_EPISODE_CACHE_SIZE, "20"));
}
public static boolean isEnableAutodownload() {
return prefs.getBoolean(PREF_ENABLE_AUTODL, false);
}
public static boolean isEnableAutodownloadOnBattery() {
return prefs.getBoolean(PREF_ENABLE_AUTODL_ON_BATTERY, true);
}
public static boolean isEnableAutodownloadWifiFilter() {
return prefs.getBoolean(PREF_ENABLE_AUTODL_WIFI_FILTER, false);
}
public static boolean isEnableAutodownloadOnMobile() {
return prefs.getBoolean(PREF_ENABLE_AUTODL_ON_MOBILE, false);
}
public static int getImageCacheSize() {
String cacheSizeString = prefs.getString(PREF_IMAGE_CACHE_SIZE, IMAGE_CACHE_DEFAULT_VALUE);
int cacheSizeInt = Integer.parseInt(cacheSizeString);
// if the cache size is too small the user won't get any images at all
// that's bad, force it back to the default.
if (cacheSizeInt < IMAGE_CACHE_SIZE_MINIMUM) {
prefs.edit().putString(PREF_IMAGE_CACHE_SIZE, IMAGE_CACHE_DEFAULT_VALUE).apply();
cacheSizeInt = Integer.parseInt(IMAGE_CACHE_DEFAULT_VALUE);
}
int cacheSizeMB = cacheSizeInt * 1024 * 1024;
return cacheSizeMB;
}
public static int getFastForwardSecs() {
return prefs.getInt(PREF_FAST_FORWARD_SECS, 30);
}
public static int getRewindSecs() {
return prefs.getInt(PREF_REWIND_SECS, 30);
}
/**
* Returns the time after which an episode should be auto-flattr'd in percent of the episode's
* duration.
*/
public static float getAutoFlattrPlayedDurationThreshold() {
return prefs.getFloat(PREF_AUTO_FLATTR_PLAYED_DURATION_THRESHOLD, 0.8f);
}
public static String[] getAutodownloadSelectedNetworks() {
String selectedNetWorks = prefs.getString(PREF_AUTODL_SELECTED_NETWORKS, "");
return TextUtils.split(selectedNetWorks, ",");
}
public static void setProxyConfig(ProxyConfig config) {
SharedPreferences.Editor editor = prefs.edit();
editor.putString(PREF_PROXY_TYPE, config.type.name());
if(TextUtils.isEmpty(config.host)) {
editor.remove(PREF_PROXY_HOST);
} else {
editor.putString(PREF_PROXY_HOST, config.host);
}
if(config.port <= 0 || config.port > 65535) {
editor.remove(PREF_PROXY_PORT);
} else {
editor.putInt(PREF_PROXY_PORT, config.port);
}
if(TextUtils.isEmpty(config.username)) {
editor.remove(PREF_PROXY_USER);
} else {
editor.putString(PREF_PROXY_USER, config.username);
}
if(TextUtils.isEmpty(config.password)) {
editor.remove(PREF_PROXY_PASSWORD);
} else {
editor.putString(PREF_PROXY_PASSWORD, config.password);
}
editor.apply();
}
public static ProxyConfig getProxyConfig() {
Proxy.Type type = Proxy.Type.valueOf(prefs.getString(PREF_PROXY_TYPE, Proxy.Type.DIRECT.name()));
String host = prefs.getString(PREF_PROXY_HOST, null);
int port = prefs.getInt(PREF_PROXY_PORT, 0);
String username = prefs.getString(PREF_PROXY_USER, null);
String password = prefs.getString(PREF_PROXY_PASSWORD, null);
return new ProxyConfig(type, host, port, username, password);
}
public static boolean shouldResumeAfterCall() {
return prefs.getBoolean(PREF_RESUME_AFTER_CALL, true);
}
public static boolean isQueueLocked() {
return prefs.getBoolean(PREF_QUEUE_LOCKED, false);
}
public static void setFastForwardSecs(int secs) {
prefs.edit()
.putInt(PREF_FAST_FORWARD_SECS, secs)
.apply();
}
public static void setRewindSecs(int secs) {
prefs.edit()
.putInt(PREF_REWIND_SECS, secs)
.apply();
}
public static void setPlaybackSpeed(String speed) {
prefs.edit()
.putString(PREF_PLAYBACK_SPEED, speed)
.apply();
}
public static void setPlaybackSpeedArray(String[] speeds) {
JSONArray jsonArray = new JSONArray();
for (String speed : speeds) {
jsonArray.put(speed);
}
prefs.edit()
.putString(PREF_PLAYBACK_SPEED_ARRAY, jsonArray.toString())
.apply();
}
public static void setVolume(@IntRange(from = 0, to = 100) int leftVolume,
@IntRange(from = 0, to = 100) int rightVolume) {
prefs.edit()
.putInt(PREF_LEFT_VOLUME, leftVolume)
.putInt(PREF_RIGHT_VOLUME, rightVolume)
.apply();
}
public static void setAutodownloadSelectedNetworks(String[] value) {
prefs.edit()
.putString(PREF_AUTODL_SELECTED_NETWORKS, TextUtils.join(",", value))
.apply();
}
/**
* Sets the update interval value.
*/
public static void setUpdateInterval(long hours) {
prefs.edit()
.putString(PREF_UPDATE_INTERVAL, String.valueOf(hours))
.apply();
// when updating with an interval, we assume the user wants
// to update *now* and then every 'hours' interval thereafter.
restartUpdateAlarm(true);
}
/**
* Sets the update interval value.
*/
public static void setUpdateTimeOfDay(int hourOfDay, int minute) {
prefs.edit()
.putString(PREF_UPDATE_INTERVAL, hourOfDay + ":" + minute)
.apply();
restartUpdateAlarm(false);
}
/**
* Change the auto-flattr settings
*
* @param enabled Whether automatic flattring should be enabled at all
* @param autoFlattrThreshold The percentage of playback time after which an episode should be
* flattrd. Must be a value between 0 and 1 (inclusive)
* */
public static void setAutoFlattrSettings( boolean enabled, float autoFlattrThreshold) {
if(autoFlattrThreshold < 0.0 || autoFlattrThreshold > 1.0) {
throw new IllegalArgumentException("Flattr threshold must be in range [0.0, 1.0]");
}
prefs.edit()
.putBoolean(PREF_AUTO_FLATTR, enabled)
.putFloat(PREF_AUTO_FLATTR_PLAYED_DURATION_THRESHOLD, autoFlattrThreshold)
.apply();
}
public static boolean gpodnetNotificationsEnabled() {
return prefs.getBoolean(PREF_GPODNET_NOTIFICATIONS, true);
}
public static void setGpodnetNotificationsEnabled() {
prefs.edit()
.putBoolean(PREF_GPODNET_NOTIFICATIONS, true)
.apply();
}
public static void setHiddenDrawerItems(List<String> items) {
String str = TextUtils.join(",", items);
prefs.edit()
.putString(PREF_HIDDEN_DRAWER_ITEMS, str)
.apply();
}
public static void setCompactNotificationButtons(List<Integer> items) {
String str = TextUtils.join(",", items);
prefs.edit()
.putString(PREF_COMPACT_NOTIFICATION_BUTTONS, str)
.apply();
}
public static void setQueueLocked(boolean locked) {
prefs.edit()
.putBoolean(PREF_QUEUE_LOCKED, locked)
.apply();
}
private static int readThemeValue(String valueFromPrefs) {
switch (Integer.parseInt(valueFromPrefs)) {
case 0:
return R.style.Theme_AntennaPod_Light;
case 1:
return R.style.Theme_AntennaPod_Dark;
default:
return R.style.Theme_AntennaPod_Light;
}
}
private static long readUpdateInterval(String valueFromPrefs) {
int hours = Integer.parseInt(valueFromPrefs);
return TimeUnit.HOURS.toMillis(hours);
}
private static int readEpisodeCacheSizeInternal(String valueFromPrefs) {
if (valueFromPrefs.equals(context.getString(R.string.pref_episode_cache_unlimited))) {
return EPISODE_CACHE_SIZE_UNLIMITED;
} else {
return Integer.parseInt(valueFromPrefs);
}
}
private static String[] readPlaybackSpeedArray(String valueFromPrefs) {
String[] selectedSpeeds = null;
// If this preference hasn't been set yet, return the default options
if (valueFromPrefs == null) {
selectedSpeeds = new String[] { "1.00", "1.25", "1.50", "1.75", "2.00" };
} else {
try {
JSONArray jsonArray = new JSONArray(valueFromPrefs);
selectedSpeeds = new String[jsonArray.length()];
for (int i = 0; i < jsonArray.length(); i++) {
selectedSpeeds[i] = jsonArray.getString(i);
}
} catch (JSONException e) {
Log.e(TAG, "Got JSON error when trying to get speeds from JSONArray");
e.printStackTrace();
}
}
return selectedSpeeds;
}
public static boolean useSonic() {
return prefs.getBoolean(PREF_SONIC, false);
}
public static void enableSonic(boolean enable) {
prefs.edit()
.putBoolean(PREF_SONIC, enable)
.apply();
}
public static boolean stereoToMono() {
return prefs.getBoolean(PREF_STEREO_TO_MONO, false);
}
public static void stereoToMono(boolean enable) {
prefs.edit()
.putBoolean(PREF_STEREO_TO_MONO, enable)
.apply();
}
public static VideoBackgroundBehavior getVideoBackgroundBehavior() {
switch (prefs.getString(PREF_VIDEO_BEHAVIOR, "stop")) {
case "stop": return VideoBackgroundBehavior.STOP;
case "pip": return VideoBackgroundBehavior.PICTURE_IN_PICTURE;
case "continue": return VideoBackgroundBehavior.CONTINUE_PLAYING;
default: return VideoBackgroundBehavior.STOP;
}
}
public static EpisodeCleanupAlgorithm getEpisodeCleanupAlgorithm() {
int cleanupValue = Integer.parseInt(prefs.getString(PREF_EPISODE_CLEANUP, "-1"));
if (cleanupValue == EPISODE_CLEANUP_QUEUE) {
return new APQueueCleanupAlgorithm();
} else if (cleanupValue == EPISODE_CLEANUP_NULL) {
return new APNullCleanupAlgorithm();
} else {
return new APCleanupAlgorithm(cleanupValue);
}
}
/**
* Return the folder where the app stores all of its data. This method will
* return the standard data folder if none has been set by the user.
*
* @param type The name of the folder inside the data folder. May be null
* when accessing the root of the data folder.
* @return The data folder that has been requested or null if the folder
* could not be created.
*/
public static File getDataFolder(String type) {
String strDir = prefs.getString(PREF_DATA_FOLDER, null);
if (strDir == null) {
Log.d(TAG, "Using default data folder");
return context.getExternalFilesDir(type);
} else {
File dataDir = new File(strDir);
if (!dataDir.exists()) {
if (!dataDir.mkdir()) {
Log.w(TAG, "Could not create data folder");
return null;
}
}
if (type == null) {
return dataDir;
} else {
// handle path separators
String[] dirs = type.split("/");
for (int i = 0; i < dirs.length; i++) {
if (dirs.length > 0) {
if (i < dirs.length - 1) {
dataDir = getDataFolder(dirs[i]);
if (dataDir == null) {
return null;
}
}
type = dirs[i];
}
}
File typeDir = new File(dataDir, type);
if (!typeDir.exists()) {
if (dataDir.canWrite()) {
if (!typeDir.mkdir()) {
Log.e(TAG, "Could not create data folder named " + type);
return null;
}
}
}
return typeDir;
}
}
}
public static void setDataFolder(String dir) {
Log.d(TAG, "setDataFolder(dir: " + dir + ")");
prefs.edit()
.putString(PREF_DATA_FOLDER, dir)
.apply();
createImportDirectory();
}
/**
* Create a .nomedia file to prevent scanning by the media scanner.
*/
private static void createNoMediaFile() {
File f = new File(context.getExternalFilesDir(null), ".nomedia");
if (!f.exists()) {
try {
f.createNewFile();
} catch (IOException e) {
Log.e(TAG, "Could not create .nomedia file");
e.printStackTrace();
}
Log.d(TAG, ".nomedia file created");
}
}
/**
* Creates the import directory if it doesn't exist and if storage is
* available
*/
private static void createImportDirectory() {
File importDir = getDataFolder(IMPORT_DIR);
if (importDir != null) {
if (importDir.exists()) {
Log.d(TAG, "Import directory already exists");
} else {
Log.d(TAG, "Creating import directory");
importDir.mkdir();
}
} else {
Log.d(TAG, "Could not access external storage.");
}
}
public static void restartUpdateAlarm(boolean now) {
int[] timeOfDay = getUpdateTimeOfDay();
Log.d(TAG, "timeOfDay: " + Arrays.toString(timeOfDay));
if (timeOfDay.length == 2) {
restartUpdateTimeOfDayAlarm(timeOfDay[0], timeOfDay[1]);
} else {
long milliseconds = getUpdateInterval();
long startTrigger = milliseconds;
if (now) {
startTrigger = TimeUnit.SECONDS.toMillis(10);
}
restartUpdateIntervalAlarm(startTrigger, milliseconds);
}
}
/**
* Sets the interval in which the feeds are refreshed automatically
*/
private static void restartUpdateIntervalAlarm(long triggerAtMillis, long intervalMillis) {
Log.d(TAG, "Restarting update alarm.");
AlarmManager alarmManager = (AlarmManager) context.getSystemService(Context.ALARM_SERVICE);
Intent intent = new Intent(context, FeedUpdateReceiver.class);
PendingIntent updateIntent = PendingIntent.getBroadcast(context, 0, intent, 0);
alarmManager.cancel(updateIntent);
if (intervalMillis > 0) {
alarmManager.set(AlarmManager.ELAPSED_REALTIME_WAKEUP,
SystemClock.elapsedRealtime() + triggerAtMillis,
updateIntent);
Log.d(TAG, "Changed alarm to new interval " + TimeUnit.MILLISECONDS.toHours(intervalMillis) + " h");
} else {
Log.d(TAG, "Automatic update was deactivated");
}
}
/**
* Sets time of day the feeds are refreshed automatically
*/
private static void restartUpdateTimeOfDayAlarm(int hoursOfDay, int minute) {
Log.d(TAG, "Restarting update alarm.");
AlarmManager alarmManager = (AlarmManager) context.getSystemService(Context.ALARM_SERVICE);
PendingIntent updateIntent = PendingIntent.getBroadcast(context, 0,
new Intent(context, FeedUpdateReceiver.class), 0);
alarmManager.cancel(updateIntent);
Calendar now = Calendar.getInstance();
Calendar alarm = (Calendar)now.clone();
alarm.set(Calendar.HOUR_OF_DAY, hoursOfDay);
alarm.set(Calendar.MINUTE, minute);
if (alarm.before(now) || alarm.equals(now)) {
alarm.add(Calendar.DATE, 1);
}
Log.d(TAG, "Alarm set for: " + alarm.toString() + " : " + alarm.getTimeInMillis());
alarmManager.set(AlarmManager.RTC_WAKEUP,
alarm.getTimeInMillis(),
updateIntent);
Log.d(TAG, "Changed alarm to new time of day " + hoursOfDay + ":" + minute);
}
/**
* Reads episode cache size as it is saved in the episode_cache_size_values array.
*/
public static int readEpisodeCacheSize(String valueFromPrefs) {
return readEpisodeCacheSizeInternal(valueFromPrefs);
}
/**
* Evaluates whether Cast support (Chromecast, Audio Cast, etc) is enabled on the preferences.
*/
public static boolean isCastEnabled() {
return prefs.getBoolean(PREF_CAST_ENABLED, false);
}
public enum VideoBackgroundBehavior {
STOP, PICTURE_IN_PICTURE, CONTINUE_PLAYING
}
}
| 1 | 13,828 | this method knows too much - it is kind of a strange side effect I'd prefer if we had separate method for acknowledging that the onboarding was done and should not be shown again | AntennaPod-AntennaPod | java |
@@ -125,6 +125,7 @@ public class StorageCallbacksImpl implements StorageCallbacks {
PodDBAdapter.KEY_CHAPTER_TYPE));
}
if(oldVersion <= 14) {
+
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_AUTO_DOWNLOAD + " INTEGER");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS | 1 | package de.danoeh.antennapod.config;
import android.content.ContentValues;
import android.database.Cursor;
import android.database.sqlite.SQLiteDatabase;
import android.util.Log;
import de.danoeh.antennapod.core.StorageCallbacks;
import de.danoeh.antennapod.core.storage.PodDBAdapter;
public class StorageCallbacksImpl implements StorageCallbacks {
@Override
public int getDatabaseVersion() {
return 15;
}
@Override
public void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) {
Log.w("DBAdapter", "Upgrading from version " + oldVersion + " to "
+ newVersion + ".");
if (oldVersion <= 1) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS + " ADD COLUMN "
+ PodDBAdapter.KEY_TYPE + " TEXT");
}
if (oldVersion <= 2) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS
+ " ADD COLUMN " + PodDBAdapter.KEY_LINK + " TEXT");
}
if (oldVersion <= 3) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_ITEM_IDENTIFIER + " TEXT");
}
if (oldVersion <= 4) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS + " ADD COLUMN "
+ PodDBAdapter.KEY_FEED_IDENTIFIER + " TEXT");
}
if (oldVersion <= 5) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_DOWNLOAD_LOG
+ " ADD COLUMN " + PodDBAdapter.KEY_REASON_DETAILED + " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_DOWNLOAD_LOG
+ " ADD COLUMN " + PodDBAdapter.KEY_DOWNLOADSTATUS_TITLE + " TEXT");
}
if (oldVersion <= 6) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS
+ " ADD COLUMN " + PodDBAdapter.KEY_CHAPTER_TYPE + " INTEGER");
}
if (oldVersion <= 7) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_PLAYBACK_COMPLETION_DATE
+ " INTEGER");
}
if (oldVersion <= 8) {
final int KEY_ID_POSITION = 0;
final int KEY_MEDIA_POSITION = 1;
// Add feeditem column to feedmedia table
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_FEEDITEM
+ " INTEGER");
Cursor feeditemCursor = db.query(PodDBAdapter.TABLE_NAME_FEED_ITEMS,
new String[]{PodDBAdapter.KEY_ID, PodDBAdapter.KEY_MEDIA}, "? > 0",
new String[]{PodDBAdapter.KEY_MEDIA}, null, null, null);
if (feeditemCursor.moveToFirst()) {
db.beginTransaction();
ContentValues contentValues = new ContentValues();
do {
long mediaId = feeditemCursor.getLong(KEY_MEDIA_POSITION);
contentValues.put(PodDBAdapter.KEY_FEEDITEM, feeditemCursor.getLong(KEY_ID_POSITION));
db.update(PodDBAdapter.TABLE_NAME_FEED_MEDIA, contentValues, PodDBAdapter.KEY_ID + "=?", new String[]{String.valueOf(mediaId)});
contentValues.clear();
} while (feeditemCursor.moveToNext());
db.setTransactionSuccessful();
db.endTransaction();
}
feeditemCursor.close();
}
if (oldVersion <= 9) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_AUTO_DOWNLOAD
+ " INTEGER DEFAULT 1");
}
if (oldVersion <= 10) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_FLATTR_STATUS
+ " INTEGER");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_FLATTR_STATUS
+ " INTEGER");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_PLAYED_DURATION
+ " INTEGER");
}
if (oldVersion <= 11) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_USERNAME
+ " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_PASSWORD
+ " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_IMAGE
+ " INTEGER");
}
if (oldVersion <= 12) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_IS_PAGED + " INTEGER DEFAULT 0");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_NEXT_PAGE_LINK + " TEXT");
}
if (oldVersion <= 13) {
// remove duplicate rows in "Chapters" table that were created because of a bug.
db.execSQL(String.format("DELETE FROM %s WHERE %s NOT IN " +
"(SELECT MIN(%s) as %s FROM %s GROUP BY %s,%s,%s,%s,%s)",
PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS,
PodDBAdapter.KEY_ID,
PodDBAdapter.KEY_ID,
PodDBAdapter.KEY_ID,
PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS,
PodDBAdapter.KEY_TITLE,
PodDBAdapter.KEY_START,
PodDBAdapter.KEY_FEEDITEM,
PodDBAdapter.KEY_LINK,
PodDBAdapter.KEY_CHAPTER_TYPE));
}
if(oldVersion <= 14) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_AUTO_DOWNLOAD + " INTEGER");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " SET " + PodDBAdapter.KEY_AUTO_DOWNLOAD + " = "
+ "(SELECT " + PodDBAdapter.KEY_AUTO_DOWNLOAD
+ " FROM " + PodDBAdapter.TABLE_NAME_FEEDS
+ " WHERE " + PodDBAdapter.TABLE_NAME_FEEDS + "." + PodDBAdapter.KEY_ID
+ " = " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_FEED + ")");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_HIDE + " TEXT");
}
}
}
| 1 | 12,138 | Do we need to increase the DB version? Also, should probably be a constant, no? | AntennaPod-AntennaPod | java |
@@ -50,6 +50,11 @@ func NewWindow(every, period, offset values.Duration) (Window, error) {
return w, nil
}
+// IsZero checks if the window's every duration is zero
+func (w Window) IsZero() bool {
+ return w.every.IsZero()
+}
+
func (w Window) isValid() error {
if w.every.IsZero() {
return errors.New(codes.Invalid, "duration used as an interval cannot be zero") | 1 | package interval
import (
"github.com/influxdata/flux/codes"
"github.com/influxdata/flux/internal/errors"
"github.com/influxdata/flux/values"
)
const epoch = values.Time(0)
var epochYear, epochMonth int64
func init() {
ts := epoch.Time()
y, m, _ := ts.Date()
epochYear = int64(y)
epochMonth = int64(m - 1)
}
// TODO(nathanielc): Make the epoch a parameter to the window
// See https://github.com/influxdata/flux/issues/2093
//
// Window is a description of an infinite set of boundaries in time.
type Window struct {
// The ith window start is expressed via this equation:
// window_start_i = zero + every * i
// window_stop_i = zero + every * i + period
every values.Duration
period values.Duration
zero values.Time
zeroMonths int64
}
// NewWindow creates a window which can be used to determine the boundaries for a given point.
// Window boundaries start at the epoch plus the offset.
// Each subsequent window starts at a multiple of the every duration.
// Each window's length is the start boundary plus the period.
// Every must not be a mix of months and nanoseconds in order to preserve constant time bounds lookup.
func NewWindow(every, period, offset values.Duration) (Window, error) {
zero := epoch.Add(offset)
w := Window{
every: every,
period: period,
zero: zero,
zeroMonths: monthsSince(zero),
}
if err := w.isValid(); err != nil {
return Window{}, err
}
return w, nil
}
func (w Window) isValid() error {
if w.every.IsZero() {
return errors.New(codes.Invalid, "duration used as an interval cannot be zero")
}
if w.every.IsMixed() {
const docURL = "https://v2.docs.influxdata.com/v2.0/reference/flux/stdlib/built-in/transformations/window/#calendar-months-and-years"
return errors.New(codes.Invalid, "duration used as an interval cannot mix month and nanosecond units").
WithDocURL(docURL)
}
if w.every.IsNegative() {
return errors.New(codes.Invalid, "duration used as an interval cannot be negative")
}
return nil
}
// GetLatestBounds returns the bounds for the latest window bounds that contains the given time t.
// For underlapping windows that do not contain time t, the window directly before time t will be returned.
func (w Window) GetLatestBounds(t values.Time) Bounds {
// Get the latest index that should contain the time t
index := w.lastIndex(t)
// Construct the bounds from the index
start := w.zero.Add(w.every.Mul(index))
b := Bounds{
start: start,
stop: start.Add(w.period),
index: index,
}
// If the period is negative its possible future bounds can still contain this point
if w.period.IsNegative() {
// swap start and stop since the period was negative
b.start, b.stop = b.stop, b.start
// If period is NOT mixed we can do a direct calculation
// to determine how far into the future a bounds may be found.
if !w.period.IsMixed() {
// Since its not mixed we can adjust the index closer based
// on how many windows a period can span
var period, every int64
if w.every.MonthsOnly() {
every = w.every.Months()
period = w.period.Months()
} else {
every = w.every.Nanoseconds()
period = w.period.Nanoseconds()
}
if period > every {
indexDelta := period / every
index += int(indexDelta)
}
}
// Now do a direct search
next := w.NextBounds(b)
for next.Contains(t) {
b = next
next = w.NextBounds(next)
}
}
return b
}
// GetOverlappingBounds returns a slice of bounds that overlaps the input bounds.
// The returned set of bounds are ordered by decreasing time.
func (w Window) GetOverlappingBounds(start, stop values.Time) []Bounds {
bounds := Bounds{
start: start,
stop: stop,
}
if bounds.IsEmpty() {
return []Bounds{}
}
// Estimate the number of windows by using a rough approximation.
count := (bounds.Length().Duration() / w.every.Duration()) + (w.period.Duration() / w.every.Duration())
bs := make([]Bounds, 0, count)
curr := w.GetLatestBounds(stop)
for curr.stop > start {
if curr.Overlaps(bounds) {
bs = append(bs, curr)
}
curr = w.PrevBounds(curr)
}
return bs
}
// NextBounds returns the next boundary in sequence from the given boundary.
func (w Window) NextBounds(b Bounds) Bounds {
index := b.index + 1
start := w.zero.Add(w.every.Mul(index))
stop := start.Add(w.period)
if w.period.IsNegative() {
start, stop = stop, start
}
return Bounds{
start: start,
stop: stop,
index: index,
}
}
// PrevBounds returns the previous boundary in sequence from the given boundary.
func (w Window) PrevBounds(b Bounds) Bounds {
index := b.index - 1
start := w.zero.Add(w.every.Mul(index))
stop := start.Add(w.period)
if w.period.IsNegative() {
start, stop = stop, start
}
return Bounds{
start: start,
stop: stop,
index: index,
}
}
// lastIndex will compute the index of the last bounds to contain t
func (w Window) lastIndex(t values.Time) int {
// We treat both nanoseconds and months as the space of whole numbers (aka integers).
// This keeps the math the same once we transform into the correct space.
// For months, we operate in the number of months since the epoch.
// For nanoseconds, we operate in the number of nanoseconds since the epoch.
if w.every.MonthsOnly() {
target := monthsSince(t)
// Check if the target day and time of the month is before the zero day and time of the month.
// If it is, that means that in _months_ space we are really in the previous month.
if isBeforeWithinMonth(t, w.zero) {
target -= 1
}
return lastIndex(w.zeroMonths, target, w.every.Months())
}
return lastIndex(int64(w.zero), int64(t), w.every.Nanoseconds())
}
// lastIndex computes the index where zero + every * index ≤ target
// The zero, target and every values can be in any units so long as they are consistent and zero based.
func lastIndex(zero, target, every int64) int {
// Given
// zero + every * index ≤ target
// Therefore
// index ≤ (target - zero) / every
// We want to find the most positive index where the above is true
// Example: Positive Index
// zero = 3 target = 14 every = 5
// Number line with window starts marked:
// -2 -1 0 1 2 |3 4 5 6 7 |8 9 10 11 12 |13 14 15 16 17
// 0 1 2
// We can see that the index we want is 2
// (target - zero) /every
// = (14 - 3) / 5
// = 11 / 5
// = 2
// We do not adjust because the delta was positive
// Example: Positive Index on boundary
// zero = 3 target = 13 every = 5
// Number line with window starts marked:
// -2 -1 0 1 2 |3 4 5 6 7 |8 9 10 11 12 |13 14 15 16 17
// 0 1 2
// We can see that the index we want is 2
// (target - zero) /every
// = (13 - 3) / 5
// = 10 / 5
// = 2
// We do not adjust because the delta was positive
// Example: Negative Index
// zero = 3 target = -9 every = 5
// Number line with window starts marked:
// |-12 -11 -10 -9 -8 |-7 -6 -5 -4 -3 |-2 -1 0 1 2 |3 4 5 6 7
// -3 -2 -1 0
// We can see that the index we want is -3
// (target - zero) /every
// = (-9 - 3) / 5
// = -12 / 5
// = -2
// We have to adjust by 1 because the delta was negative
// and we get -3
// Example: Negative Index on boundary
// zero = 3 target = -7 every = 5
// Number line with window starts marked:
// |-12 -11 -10 -9 -8 |-7 -6 -5 -4 -3 |-2 -1 0 1 2 |3 4 5 6 7
// -3 -2 -1 0
// We can see that the index we want is -2
// (target - zero) /every
// = (-7 - 3) / 5
// = -10 / 5
// = -2
// This time we land right on the boundary, since we are lower inclusive
// we do not need to adjust.
delta := target - zero
index := delta / every
// For targets before the zero we need to adjust the index,
// but only if we did not land right on the boundary.
if delta < 0 && delta%every != 0 {
index -= 1
}
return int(index)
}
// monthsSince converts a time into the number of months since the unix epoch
func monthsSince(t values.Time) int64 {
ts := t.Time()
year, month, _ := ts.Date()
return (int64(year)-epochYear)*12 + int64(month-1) - epochMonth
}
// isBeforeWithinMonth reports whether a comes before b within the month.
// The year and month of a and b are not relevant.
func isBeforeWithinMonth(a, b values.Time) bool {
at := a.Time()
bt := b.Time()
ad := at.Day()
bd := bt.Day()
if ad > bd {
return false
}
if ad < bd {
return true
}
ah, am, as := at.Clock()
bh, bm, bs := bt.Clock()
if ah > bh {
return false
}
if ah < bh {
return true
}
if am > bm {
return false
}
if am < bm {
return true
}
if as > bs {
return false
}
if as < bs {
return true
}
an := at.Nanosecond()
bn := bt.Nanosecond()
if an > bn {
return false
}
if an < bn {
return true
}
return false
}
//TODO
// Add tests very far away from the epoch
| 1 | 15,567 | We could utilize the new isZero method in this if-statement, right? | influxdata-flux | go |
@@ -107,6 +107,14 @@ func (config testBlockOpsConfig) blockCache() BlockCache {
return config.cache
}
+func (config testBlockOpsConfig) MakeLogger(module string) logger.Logger {
+ return logger.NewNull()
+}
+
+func (config testBlockOpsConfig) DataVersion() DataVer {
+ return FilesWithHolesDataVer
+}
+
func makeTestBlockOpsConfig(t *testing.T) testBlockOpsConfig {
bserver := NewBlockServerMemory(logger.NewTestLogger(t))
codec := kbfscodec.NewMsgpack() | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"sync"
"testing"
"github.com/golang/mock/gomock"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfshash"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
)
// fakeKeyMetadata is an implementation of KeyMetadata that just
// stores TLFCryptKeys directly. It's meant to be used with
// fakeBlockKeyGetter.
type fakeKeyMetadata struct {
// Embed a KeyMetadata that's always empty, so that all
// methods besides TlfID() panic.
KeyMetadata
tlfID tlf.ID
keys []kbfscrypto.TLFCryptKey
}
var _ KeyMetadata = fakeKeyMetadata{}
// makeFakeKeyMetadata returns a fakeKeyMetadata with keys for each
// KeyGen up to latestKeyGen. The key for KeyGen i is a deterministic
// function of i, so multiple calls to this function will have the
// same keys.
func makeFakeKeyMetadata(tlfID tlf.ID, latestKeyGen KeyGen) fakeKeyMetadata {
keys := make([]kbfscrypto.TLFCryptKey, 0,
latestKeyGen-FirstValidKeyGen+1)
for keyGen := FirstValidKeyGen; keyGen <= latestKeyGen; keyGen++ {
keys = append(keys,
kbfscrypto.MakeTLFCryptKey([32]byte{byte(keyGen)}))
}
return fakeKeyMetadata{nil, tlfID, keys}
}
func (kmd fakeKeyMetadata) TlfID() tlf.ID {
return kmd.tlfID
}
type fakeBlockKeyGetter struct{}
func (kg fakeBlockKeyGetter) GetTLFCryptKeyForEncryption(
ctx context.Context, kmd KeyMetadata) (kbfscrypto.TLFCryptKey, error) {
fkmd := kmd.(fakeKeyMetadata)
if len(fkmd.keys) == 0 {
return kbfscrypto.TLFCryptKey{}, errors.New(
"no keys for encryption")
}
return fkmd.keys[len(fkmd.keys)-1], nil
}
func (kg fakeBlockKeyGetter) GetTLFCryptKeyForBlockDecryption(
ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer) (
kbfscrypto.TLFCryptKey, error) {
fkmd := kmd.(fakeKeyMetadata)
i := int(blockPtr.KeyGen - FirstValidKeyGen)
if i >= len(fkmd.keys) {
return kbfscrypto.TLFCryptKey{}, errors.Errorf(
"no key for block decryption (keygen=%d)",
blockPtr.KeyGen)
}
return fkmd.keys[i], nil
}
type testBlockOpsConfig struct {
bserver BlockServer
testCodec kbfscodec.Codec
cryptoPure cryptoPure
cache BlockCache
}
var _ blockOpsConfig = (*testBlockOpsConfig)(nil)
func (config testBlockOpsConfig) blockServer() BlockServer {
return config.bserver
}
func (config testBlockOpsConfig) codec() kbfscodec.Codec {
return config.testCodec
}
func (config testBlockOpsConfig) crypto() cryptoPure {
return config.cryptoPure
}
func (config testBlockOpsConfig) keyGetter() blockKeyGetter {
return fakeBlockKeyGetter{}
}
func (config testBlockOpsConfig) blockCache() BlockCache {
return config.cache
}
func makeTestBlockOpsConfig(t *testing.T) testBlockOpsConfig {
bserver := NewBlockServerMemory(logger.NewTestLogger(t))
codec := kbfscodec.NewMsgpack()
crypto := MakeCryptoCommon(codec)
cache := NewBlockCacheStandard(10, getDefaultCleanBlockCacheCapacity())
return testBlockOpsConfig{bserver, codec, crypto, cache}
}
// TestBlockOpsReadySuccess checks that BlockOpsStandard.Ready()
// encrypts its given block properly.
func TestBlockOpsReadySuccess(t *testing.T) {
config := makeTestBlockOpsConfig(t)
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
tlfID := tlf.FakeID(0, false)
var latestKeyGen KeyGen = 5
kmd := makeFakeKeyMetadata(tlfID, latestKeyGen)
block := FileBlock{
Contents: []byte{1, 2, 3, 4, 5},
}
encodedBlock, err := config.testCodec.Encode(block)
require.NoError(t, err)
ctx := context.Background()
id, plainSize, readyBlockData, err := bops.Ready(ctx, kmd, &block)
require.NoError(t, err)
require.Equal(t, len(encodedBlock), plainSize)
err = kbfsblock.VerifyID(readyBlockData.buf, id)
require.NoError(t, err)
var encryptedBlock EncryptedBlock
err = config.testCodec.Decode(readyBlockData.buf, &encryptedBlock)
require.NoError(t, err)
blockCryptKey := kbfscrypto.UnmaskBlockCryptKey(
readyBlockData.serverHalf,
kmd.keys[latestKeyGen-FirstValidKeyGen])
var decryptedBlock FileBlock
err = config.cryptoPure.DecryptBlock(
encryptedBlock, blockCryptKey, &decryptedBlock)
require.NoError(t, err)
decryptedBlock.SetEncodedSize(uint32(readyBlockData.GetEncodedSize()))
require.Equal(t, block, decryptedBlock)
}
// TestBlockOpsReadyFailKeyGet checks that BlockOpsStandard.Ready()
// fails properly if we fail to retrieve the key.
func TestBlockOpsReadyFailKeyGet(t *testing.T) {
config := makeTestBlockOpsConfig(t)
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
tlfID := tlf.FakeID(0, false)
kmd := makeFakeKeyMetadata(tlfID, 0)
ctx := context.Background()
_, _, _, err := bops.Ready(ctx, kmd, &FileBlock{})
require.EqualError(t, err, "no keys for encryption")
}
type badServerHalfMaker struct {
cryptoPure
}
func (c badServerHalfMaker) MakeRandomBlockCryptKeyServerHalf() (
kbfscrypto.BlockCryptKeyServerHalf, error) {
return kbfscrypto.BlockCryptKeyServerHalf{}, errors.New(
"could not make server half")
}
// TestBlockOpsReadyFailServerHalfGet checks that BlockOpsStandard.Ready()
// fails properly if we fail to generate a server half.
func TestBlockOpsReadyFailServerHalfGet(t *testing.T) {
config := makeTestBlockOpsConfig(t)
config.cryptoPure = badServerHalfMaker{config.cryptoPure}
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
tlfID := tlf.FakeID(0, false)
kmd := makeFakeKeyMetadata(tlfID, FirstValidKeyGen)
ctx := context.Background()
_, _, _, err := bops.Ready(ctx, kmd, &FileBlock{})
require.EqualError(t, err, "could not make server half")
}
type badBlockEncryptor struct {
cryptoPure
}
func (c badBlockEncryptor) EncryptBlock(
block Block, key kbfscrypto.BlockCryptKey) (
plainSize int, encryptedBlock EncryptedBlock, err error) {
return 0, EncryptedBlock{}, errors.New("could not encrypt block")
}
// TestBlockOpsReadyFailEncryption checks that BlockOpsStandard.Ready()
// fails properly if we fail to encrypt the block.
func TestBlockOpsReadyFailEncryption(t *testing.T) {
config := makeTestBlockOpsConfig(t)
config.cryptoPure = badBlockEncryptor{config.cryptoPure}
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
tlfID := tlf.FakeID(0, false)
kmd := makeFakeKeyMetadata(tlfID, FirstValidKeyGen)
ctx := context.Background()
_, _, _, err := bops.Ready(ctx, kmd, &FileBlock{})
require.EqualError(t, err, "could not encrypt block")
}
type tooSmallBlockEncryptor struct {
CryptoCommon
}
func (c tooSmallBlockEncryptor) EncryptBlock(
block Block, key kbfscrypto.BlockCryptKey) (
plainSize int, encryptedBlock EncryptedBlock, err error) {
plainSize, encryptedBlock, err = c.CryptoCommon.EncryptBlock(block, key)
if err != nil {
return 0, EncryptedBlock{}, err
}
encryptedBlock.EncryptedData = nil
return plainSize, encryptedBlock, nil
}
type badEncoder struct {
kbfscodec.Codec
}
func (c badEncoder) Encode(o interface{}) ([]byte, error) {
return nil, errors.New("could not encode")
}
// TestBlockOpsReadyFailEncode checks that BlockOpsStandard.Ready()
// fails properly if we fail to encode the encrypted block.
func TestBlockOpsReadyFailEncode(t *testing.T) {
config := makeTestBlockOpsConfig(t)
config.testCodec = badEncoder{config.testCodec}
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
tlfID := tlf.FakeID(0, false)
kmd := makeFakeKeyMetadata(tlfID, FirstValidKeyGen)
ctx := context.Background()
_, _, _, err := bops.Ready(ctx, kmd, &FileBlock{})
require.EqualError(t, err, "could not encode")
}
type tooSmallEncoder struct {
kbfscodec.Codec
}
func (c tooSmallEncoder) Encode(o interface{}) ([]byte, error) {
return []byte{0x1}, nil
}
// TestBlockOpsReadyTooSmallEncode checks that
// BlockOpsStandard.Ready() fails properly if the encrypted block
// encodes to a too-small buffer.
func TestBlockOpsReadyTooSmallEncode(t *testing.T) {
config := makeTestBlockOpsConfig(t)
config.testCodec = tooSmallEncoder{config.testCodec}
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
tlfID := tlf.FakeID(0, false)
kmd := makeFakeKeyMetadata(tlfID, FirstValidKeyGen)
ctx := context.Background()
_, _, _, err := bops.Ready(ctx, kmd, &FileBlock{})
require.IsType(t, TooLowByteCountError{}, err)
}
// TestBlockOpsReadySuccess checks that BlockOpsStandard.Get()
// retrieves a block properly, even if that block was encoded for a
// previous key generation.
func TestBlockOpsGetSuccess(t *testing.T) {
config := makeTestBlockOpsConfig(t)
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
tlfID := tlf.FakeID(0, false)
var keyGen KeyGen = 3
kmd1 := makeFakeKeyMetadata(tlfID, keyGen)
block := FileBlock{
Contents: []byte{1, 2, 3, 4, 5},
}
ctx := context.Background()
id, _, readyBlockData, err := bops.Ready(ctx, kmd1, &block)
require.NoError(t, err)
bCtx := kbfsblock.MakeFirstContext(keybase1.MakeTestUID(1))
err = config.bserver.Put(ctx, tlfID, id, bCtx,
readyBlockData.buf, readyBlockData.serverHalf)
require.NoError(t, err)
kmd2 := makeFakeKeyMetadata(tlfID, keyGen+3)
var decryptedBlock FileBlock
err = bops.Get(ctx, kmd2,
BlockPointer{ID: id, KeyGen: keyGen, Context: bCtx},
&decryptedBlock, NoCacheEntry)
require.NoError(t, err)
require.Equal(t, block, decryptedBlock)
}
// TestBlockOpsReadySuccess checks that BlockOpsStandard.Get() fails
// if it can't retrieve the block from the server.
func TestBlockOpsGetFailServerGet(t *testing.T) {
config := makeTestBlockOpsConfig(t)
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
tlfID := tlf.FakeID(0, false)
var latestKeyGen KeyGen = 5
kmd := makeFakeKeyMetadata(tlfID, latestKeyGen)
ctx := context.Background()
id, _, _, err := bops.Ready(ctx, kmd, &FileBlock{})
require.NoError(t, err)
bCtx := kbfsblock.MakeFirstContext(keybase1.MakeTestUID(1))
var decryptedBlock FileBlock
err = bops.Get(ctx, kmd,
BlockPointer{ID: id, KeyGen: latestKeyGen, Context: bCtx},
&decryptedBlock, NoCacheEntry)
require.IsType(t, kbfsblock.BServerErrorBlockNonExistent{}, err)
}
type badGetBlockServer struct {
BlockServer
}
func (bserver badGetBlockServer) Get(
ctx context.Context, tlfID tlf.ID, id kbfsblock.ID,
context kbfsblock.Context) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {
buf, serverHalf, err := bserver.BlockServer.Get(ctx, tlfID, id, context)
if err != nil {
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, nil
}
return append(buf, 0x1), serverHalf, nil
}
// TestBlockOpsReadyFailVerify checks that BlockOpsStandard.Get()
// fails if it can't verify the block retrieved from the server.
func TestBlockOpsGetFailVerify(t *testing.T) {
config := makeTestBlockOpsConfig(t)
config.bserver = badGetBlockServer{config.bserver}
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
tlfID := tlf.FakeID(0, false)
var latestKeyGen KeyGen = 5
kmd := makeFakeKeyMetadata(tlfID, latestKeyGen)
ctx := context.Background()
id, _, readyBlockData, err := bops.Ready(ctx, kmd, &FileBlock{})
require.NoError(t, err)
bCtx := kbfsblock.MakeFirstContext(keybase1.MakeTestUID(1))
err = config.bserver.Put(ctx, tlfID, id, bCtx,
readyBlockData.buf, readyBlockData.serverHalf)
require.NoError(t, err)
var decryptedBlock FileBlock
err = bops.Get(ctx, kmd,
BlockPointer{ID: id, KeyGen: latestKeyGen, Context: bCtx},
&decryptedBlock, NoCacheEntry)
require.IsType(t, kbfshash.HashMismatchError{}, errors.Cause(err))
}
// TestBlockOpsReadyFailKeyGet checks that BlockOpsStandard.Get()
// fails if it can't get the decryption key.
func TestBlockOpsGetFailKeyGet(t *testing.T) {
config := makeTestBlockOpsConfig(t)
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
tlfID := tlf.FakeID(0, false)
var latestKeyGen KeyGen = 5
kmd := makeFakeKeyMetadata(tlfID, latestKeyGen)
ctx := context.Background()
id, _, readyBlockData, err := bops.Ready(ctx, kmd, &FileBlock{})
require.NoError(t, err)
bCtx := kbfsblock.MakeFirstContext(keybase1.MakeTestUID(1))
err = config.bserver.Put(ctx, tlfID, id, bCtx,
readyBlockData.buf, readyBlockData.serverHalf)
require.NoError(t, err)
var decryptedBlock FileBlock
err = bops.Get(ctx, kmd,
BlockPointer{ID: id, KeyGen: latestKeyGen + 1, Context: bCtx},
&decryptedBlock, NoCacheEntry)
require.EqualError(t, err, fmt.Sprintf(
"no key for block decryption (keygen=%d)", latestKeyGen+1))
}
// badDecoder maintains a map from stringified byte buffers to
// error. If Decode is called with a buffer that matches anything in
// the map, the corresponding error is returned.
//
// This is necessary because codec functions are used everywhere.
type badDecoder struct {
kbfscodec.Codec
errorsLock sync.RWMutex
errors map[string]error
}
func (c *badDecoder) putError(buf []byte, err error) {
k := string(buf)
c.errorsLock.Lock()
c.errorsLock.Unlock()
c.errors[k] = err
}
func (c *badDecoder) Decode(buf []byte, o interface{}) error {
k := string(buf)
err := func() error {
c.errorsLock.RLock()
defer c.errorsLock.RUnlock()
return c.errors[k]
}()
if err != nil {
return err
}
return c.Codec.Decode(buf, o)
}
// TestBlockOpsReadyFailDecode checks that BlockOpsStandard.Get()
// fails if it can't decode the encrypted block.
func TestBlockOpsGetFailDecode(t *testing.T) {
config := makeTestBlockOpsConfig(t)
badDecoder := badDecoder{
Codec: config.testCodec,
errors: make(map[string]error),
}
config.testCodec = &badDecoder
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
tlfID := tlf.FakeID(0, false)
var latestKeyGen KeyGen = 5
kmd := makeFakeKeyMetadata(tlfID, latestKeyGen)
ctx := context.Background()
id, _, readyBlockData, err := bops.Ready(ctx, kmd, &FileBlock{})
require.NoError(t, err)
decodeErr := errors.New("could not decode")
badDecoder.putError(readyBlockData.buf, decodeErr)
bCtx := kbfsblock.MakeFirstContext(keybase1.MakeTestUID(1))
err = config.bserver.Put(ctx, tlfID, id, bCtx,
readyBlockData.buf, readyBlockData.serverHalf)
require.NoError(t, err)
var decryptedBlock FileBlock
err = bops.Get(ctx, kmd,
BlockPointer{ID: id, KeyGen: latestKeyGen, Context: bCtx},
&decryptedBlock, NoCacheEntry)
require.Equal(t, decodeErr, err)
}
type badBlockDecryptor struct {
cryptoPure
}
func (c badBlockDecryptor) DecryptBlock(encryptedBlock EncryptedBlock,
key kbfscrypto.BlockCryptKey, block Block) error {
return errors.New("could not decrypt block")
}
// TestBlockOpsReadyFailDecrypt checks that BlockOpsStandard.Get()
// fails if it can't decrypt the encrypted block.
func TestBlockOpsGetFailDecrypt(t *testing.T) {
config := makeTestBlockOpsConfig(t)
config.cryptoPure = badBlockDecryptor{config.cryptoPure}
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
tlfID := tlf.FakeID(0, false)
var latestKeyGen KeyGen = 5
kmd := makeFakeKeyMetadata(tlfID, latestKeyGen)
ctx := context.Background()
id, _, readyBlockData, err := bops.Ready(ctx, kmd, &FileBlock{})
require.NoError(t, err)
bCtx := kbfsblock.MakeFirstContext(keybase1.MakeTestUID(1))
err = config.bserver.Put(ctx, tlfID, id, bCtx,
readyBlockData.buf, readyBlockData.serverHalf)
require.NoError(t, err)
var decryptedBlock FileBlock
err = bops.Get(ctx, kmd,
BlockPointer{ID: id, KeyGen: latestKeyGen, Context: bCtx},
&decryptedBlock, NoCacheEntry)
require.EqualError(t, err, "could not decrypt block")
}
func TestBlockOpsDeleteSuccess(t *testing.T) {
ctr := NewSafeTestReporter(t)
mockCtrl := gomock.NewController(ctr)
defer mockCtrl.Finish()
bserver := NewMockBlockServer(mockCtrl)
config := makeTestBlockOpsConfig(t)
config.bserver = bserver
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
// Expect one call to delete several blocks.
b1 := BlockPointer{ID: kbfsblock.FakeID(1)}
b2 := BlockPointer{ID: kbfsblock.FakeID(2)}
contexts := kbfsblock.ContextMap{
b1.ID: {b1.Context},
b2.ID: {b2.Context},
}
expectedLiveCounts := map[kbfsblock.ID]int{
b1.ID: 5,
b2.ID: 3,
}
ctx := context.Background()
tlfID := tlf.FakeID(1, false)
bserver.EXPECT().RemoveBlockReferences(ctx, tlfID, contexts).
Return(expectedLiveCounts, nil)
liveCounts, err := bops.Delete(ctx, tlfID, []BlockPointer{b1, b2})
require.NoError(t, err)
require.Equal(t, expectedLiveCounts, liveCounts)
}
func TestBlockOpsDeleteFail(t *testing.T) {
ctr := NewSafeTestReporter(t)
mockCtrl := gomock.NewController(ctr)
defer mockCtrl.Finish()
bserver := NewMockBlockServer(mockCtrl)
config := makeTestBlockOpsConfig(t)
config.bserver = bserver
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
b1 := BlockPointer{ID: kbfsblock.FakeID(1)}
b2 := BlockPointer{ID: kbfsblock.FakeID(2)}
contexts := kbfsblock.ContextMap{
b1.ID: {b1.Context},
b2.ID: {b2.Context},
}
// Fail the delete call.
ctx := context.Background()
tlfID := tlf.FakeID(1, false)
expectedErr := errors.New("Fake fail")
bserver.EXPECT().RemoveBlockReferences(ctx, tlfID, contexts).
Return(nil, expectedErr)
_, err := bops.Delete(ctx, tlfID, []BlockPointer{b1, b2})
require.Equal(t, expectedErr, err)
}
func TestBlockOpsArchiveSuccess(t *testing.T) {
ctr := NewSafeTestReporter(t)
mockCtrl := gomock.NewController(ctr)
defer func() {
ctr.CheckForFailures()
mockCtrl.Finish()
}()
bserver := NewMockBlockServer(mockCtrl)
config := makeTestBlockOpsConfig(t)
config.bserver = bserver
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
// Expect one call to archive several blocks.
b1 := BlockPointer{ID: kbfsblock.FakeID(1)}
b2 := BlockPointer{ID: kbfsblock.FakeID(2)}
contexts := kbfsblock.ContextMap{
b1.ID: {b1.Context},
b2.ID: {b2.Context},
}
ctx := context.Background()
tlfID := tlf.FakeID(1, false)
bserver.EXPECT().ArchiveBlockReferences(ctx, tlfID, contexts).
Return(nil)
err := bops.Archive(ctx, tlfID, []BlockPointer{b1, b2})
require.NoError(t, err)
}
func TestBlockOpsArchiveFail(t *testing.T) {
ctr := NewSafeTestReporter(t)
mockCtrl := gomock.NewController(ctr)
defer func() {
ctr.CheckForFailures()
mockCtrl.Finish()
}()
bserver := NewMockBlockServer(mockCtrl)
config := makeTestBlockOpsConfig(t)
config.bserver = bserver
bops := NewBlockOpsStandard(config, testBlockRetrievalWorkerQueueSize)
defer bops.Shutdown()
b1 := BlockPointer{ID: kbfsblock.FakeID(1)}
b2 := BlockPointer{ID: kbfsblock.FakeID(2)}
contexts := kbfsblock.ContextMap{
b1.ID: {b1.Context},
b2.ID: {b2.Context},
}
// Fail the archive call.
ctx := context.Background()
tlfID := tlf.FakeID(1, false)
expectedErr := errors.New("Fake fail")
bserver.EXPECT().ArchiveBlockReferences(ctx, tlfID, contexts).
Return(expectedErr)
err := bops.Archive(ctx, tlfID, []BlockPointer{b1, b2})
require.Equal(t, expectedErr, err)
}
| 1 | 15,002 | should probably have config have a `t` or a `logger.NewTestLogger(t)`, and return the latter | keybase-kbfs | go |
@@ -51,8 +51,10 @@ func (p *heuristicPlanner) Plan(inputPlan *PlanSpec) (*PlanSpec, error) {
visited := make(map[PlanNode]struct{})
- nodeStack := make([]PlanNode, len(inputPlan.Results()))
- copy(nodeStack, inputPlan.Results())
+ nodeStack := make([]PlanNode, 0, len(inputPlan.Roots))
+ for root := range inputPlan.Roots {
+ nodeStack = append(nodeStack, root)
+ }
anyChanged = false
for len(nodeStack) > 0 { | 1 | package planner
// heuristicPlanner applies a set of rules to the nodes in a PlanSpec
// until a fixed point is reached and no more rules can be applied.
type heuristicPlanner struct {
rules map[ProcedureKind][]Rule
}
func newHeuristicPlanner() *heuristicPlanner {
return &heuristicPlanner{
rules: make(map[ProcedureKind][]Rule),
}
}
func (p *heuristicPlanner) addRules(rules []Rule) {
for _, rule := range rules {
ruleSlice := p.rules[rule.Pattern().Root()]
p.rules[rule.Pattern().Root()] = append(ruleSlice, rule)
}
}
// matchRules applies any applicable rules to the given plan node,
// and returns the rewritten plan node and whether or not any rewriting was done.
func (p *heuristicPlanner) matchRules(node PlanNode) (PlanNode, bool) {
anyChanged := false
for _, rule := range p.rules[AnyKind] {
newNode, changed := rule.Rewrite(node)
anyChanged = anyChanged || changed
node = newNode
}
for _, rule := range p.rules[node.Kind()] {
newNode, changed := rule.Rewrite(node)
anyChanged = anyChanged || changed
node = newNode
}
return node, anyChanged
}
// Plan is a fixed-point query planning algorithm.
// It traverses the DAG depth-first, attempting to apply rewrite rules at each node.
// Traversal is repeated until a pass over the DAG results in no changes with the given rule set.
//
// Plan may change its argument and/or return a new instance of PlanSpec, so the correct way to call Plan is:
// plan, err = planner.Plan(plan)
func (p *heuristicPlanner) Plan(inputPlan *PlanSpec) (*PlanSpec, error) {
for anyChanged := true; anyChanged == true; {
visited := make(map[PlanNode]struct{})
nodeStack := make([]PlanNode, len(inputPlan.Results()))
copy(nodeStack, inputPlan.Results())
anyChanged = false
for len(nodeStack) > 0 {
node := nodeStack[len(nodeStack)-1]
nodeStack = nodeStack[0 : len(nodeStack)-1]
_, alreadyVisited := visited[node]
if !alreadyVisited {
newNode, changed := p.matchRules(node)
anyChanged = anyChanged || changed
if node != newNode {
updateSuccessors(inputPlan, node, newNode)
}
// append to stack in reverse order so lower-indexed children
// are visited first.
for i := len(newNode.Predecessors()); i > 0; i-- {
nodeStack = append(nodeStack, newNode.Predecessors()[i-1])
}
visited[newNode] = struct{}{}
}
}
}
return inputPlan, nil
}
// updateSuccessors looks at all the successors of oldNode
// and rewires them to point them at newNode.
// Predecessors of oldNode and newNode are not touched.
//
// A B A B <-- successors
// \ / \ /
// node becomes newNode
// / \ / \
// D E D' E' <-- predecessors
func updateSuccessors(plan *PlanSpec, oldNode, newNode PlanNode) {
newNode.ClearSuccessors()
if len(oldNode.Successors()) == 0 {
// This is a new root node.
plan.Replace(oldNode, newNode)
return
}
for _, succ := range oldNode.Successors() {
i := 0
for ; i < len(succ.Predecessors()); i++ {
succ.Predecessors()[i] = newNode
}
if i == len(succ.Predecessors()) {
panic("Inconsistent plan graph: successor does not have edge back to predecessor")
}
}
newNode.AddSuccessors(oldNode.Successors()...)
}
| 1 | 8,617 | Does this not do the same thing as `copy`? I thought that `copy` just did an elementwise assignment, but maybe I was wrong. | influxdata-flux | go |
@@ -124,9 +124,9 @@ public abstract class FlatteningConfig {
// flattening.
Map<String, FlatteningConfig> flatteningConfigs = new LinkedHashMap<>();
- flatteningConfigs.putAll(flatteningConfigsFromGapicConfig);
- // Let flattenings from proto annotations override flattenings from GAPIC config.
+ // Let flattenings from GAPIC config override flattenings from proto annotations.
flatteningConfigs.putAll(flatteningConfigsFromProtoFile);
+ flatteningConfigs.putAll(flatteningConfigsFromGapicConfig);
return ImmutableList.copyOf(flatteningConfigs.values());
} | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.config;
import com.google.api.MethodSignature;
import com.google.api.codegen.FlatteningGroupProto;
import com.google.api.codegen.MethodConfigProto;
import com.google.api.codegen.ResourceNameTreatment;
import com.google.api.codegen.configgen.transformer.DiscoveryMethodTransformer;
import com.google.api.codegen.util.ProtoParser;
import com.google.api.tools.framework.model.Diag;
import com.google.api.tools.framework.model.DiagCollector;
import com.google.api.tools.framework.model.Oneof;
import com.google.api.tools.framework.model.SimpleLocation;
import com.google.auto.value.AutoValue;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.annotation.Nullable;
/** FlatteningConfig represents a specific flattening configuration for a method. */
@AutoValue
public abstract class FlatteningConfig {
// Maps the name of the parameter in this flattening to its FieldConfig.
public abstract ImmutableMap<String, FieldConfig> getFlattenedFieldConfigs();
@Nullable
public abstract String getFlatteningName();
/**
* Returns a map of a string representing a list of the fields in a flattening, to the flattening
* config created from a method in the gapic config.
*/
private static Map<String, FlatteningConfig> createFlatteningsFromGapicConfig(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodConfigProto methodConfigProto,
MethodModel methodModel) {
Map<String, FlatteningConfig> flatteningConfigs = new LinkedHashMap<>();
for (FlatteningGroupProto flatteningGroup : methodConfigProto.getFlattening().getGroupsList()) {
FlatteningConfig groupConfig =
FlatteningConfig.createFlatteningFromConfigProto(
diagCollector,
messageConfigs,
resourceNameConfigs,
methodConfigProto,
flatteningGroup,
methodModel);
if (groupConfig != null) {
flatteningConfigs.put(flatteningConfigToString(groupConfig), groupConfig);
}
}
if (diagCollector.hasErrors()) {
return null;
}
return flatteningConfigs;
}
static ImmutableList<FlatteningConfig> createFlatteningConfigs(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodConfigProto methodConfigProto,
DiscoveryMethodModel methodModel) {
Map<String, FlatteningConfig> flatteningConfigMap =
createFlatteningsFromGapicConfig(
diagCollector, messageConfigs, resourceNameConfigs, methodConfigProto, methodModel);
if (flatteningConfigMap == null) {
return null;
}
return ImmutableList.copyOf(flatteningConfigMap.values());
}
@VisibleForTesting
@Nullable
static ImmutableList<FlatteningConfig> createFlatteningConfigs(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodConfigProto methodConfigProto,
ProtoMethodModel methodModel,
ProtoParser protoParser) {
Map<String, FlatteningConfig> flatteningConfigsFromGapicConfig =
createFlatteningsFromGapicConfig(
diagCollector, messageConfigs, resourceNameConfigs, methodConfigProto, methodModel);
if (flatteningConfigsFromGapicConfig == null) {
return null;
}
// Get flattenings from protofile annotations
Map<String, FlatteningConfig> flatteningConfigsFromProtoFile =
createFlatteningConfigsFromProtoFile(
diagCollector, messageConfigs, resourceNameConfigs, methodModel, protoParser);
if (flatteningConfigsFromProtoFile == null) {
return null;
}
// Enforce unique flattening configs, in case proto annotations overlaps with configProto
// flattening.
Map<String, FlatteningConfig> flatteningConfigs = new LinkedHashMap<>();
flatteningConfigs.putAll(flatteningConfigsFromGapicConfig);
// Let flattenings from proto annotations override flattenings from GAPIC config.
flatteningConfigs.putAll(flatteningConfigsFromProtoFile);
return ImmutableList.copyOf(flatteningConfigs.values());
}
/**
* Returns a map of a string representing a list of the fields in a flattening, to the flattening
* config created from a method from the proto file.
*/
@Nullable
private static Map<String, FlatteningConfig> createFlatteningConfigsFromProtoFile(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
ProtoMethodModel methodModel,
ProtoParser protoParser) {
Map<String, FlatteningConfig> flatteningConfigs = new LinkedHashMap<>();
// Get flattenings from protofile annotations, let these override flattenings from GAPIC config.
List<MethodSignature> methodSignatures =
protoParser.getMethodSignatures(methodModel.getProtoMethod());
for (MethodSignature signature : methodSignatures) {
if (signature.getFieldsCount() == 0) {
break;
}
FlatteningConfig groupConfig =
FlatteningConfig.createFlatteningFromProtoFile(
diagCollector,
messageConfigs,
resourceNameConfigs,
signature,
methodModel,
protoParser);
if (groupConfig != null) {
flatteningConfigs.put(flatteningConfigToString(groupConfig), groupConfig);
}
}
if (diagCollector.hasErrors()) {
return null;
}
return flatteningConfigs;
}
/**
* Creates an instance of FlatteningConfig based on a FlatteningGroupProto, linking it up with the
* provided method.
*/
@Nullable
private static FlatteningConfig createFlatteningFromConfigProto(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodConfigProto methodConfigProto,
FlatteningGroupProto flatteningGroup,
MethodModel method) {
boolean missing = false;
ImmutableMap.Builder<String, FieldConfig> flattenedFieldConfigBuilder = ImmutableMap.builder();
Set<String> oneofNames = new HashSet<>();
List<String> flattenedParams = Lists.newArrayList(flatteningGroup.getParametersList());
if (method.hasExtraFieldMask()) {
flattenedParams.add(DiscoveryMethodTransformer.FIELDMASK_STRING);
}
for (String parameter : flattenedParams) {
FieldModel parameterField = method.getInputField(parameter);
if (parameterField == null) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Field missing for flattening: method = %s, message type = %s, field = %s",
method.getFullName(),
method.getInputFullName(),
parameter));
return null;
}
Oneof oneof = parameterField.getOneof();
if (oneof != null) {
String oneofName = oneof.getName();
if (oneofNames.contains(oneofName)) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Value from oneof already specifed for flattening:%n"
+ "method = %s, message type = %s, oneof = %s",
method.getFullName(),
method.getInputFullName(),
oneofName));
return null;
}
oneofNames.add(oneofName);
}
ResourceNameTreatment defaultResourceNameTreatment =
methodConfigProto.getResourceNameTreatment();
if (!parameterField.mayBeInResourceName()) {
defaultResourceNameTreatment = ResourceNameTreatment.NONE;
}
if (defaultResourceNameTreatment == null
|| defaultResourceNameTreatment.equals(ResourceNameTreatment.UNSET_TREATMENT)) {
defaultResourceNameTreatment = ResourceNameTreatment.VALIDATE;
}
FieldConfig fieldConfig =
FieldConfig.createFieldConfig(
diagCollector,
messageConfigs,
methodConfigProto.getFieldNamePatternsMap(),
resourceNameConfigs,
parameterField,
flatteningGroup.getParameterResourceNameTreatmentMap().get(parameter),
defaultResourceNameTreatment);
if (fieldConfig == null) {
missing = true;
} else {
flattenedFieldConfigBuilder.put(parameter, fieldConfig);
}
}
if (missing) {
return null;
}
return new AutoValue_FlatteningConfig(
flattenedFieldConfigBuilder.build(), flatteningGroup.getFlatteningGroupName());
}
/**
* Creates an instance of FlatteningConfig based on a FlatteningGroupProto, linking it up with the
* provided method.
*/
@Nullable
private static FlatteningConfig createFlatteningFromProtoFile(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodSignature methodSignature,
ProtoMethodModel method,
ProtoParser protoParser) {
// TODO(andrealin): combine this method with createFlatteningFromConfigProto.
ImmutableMap.Builder<String, FieldConfig> flattenedFieldConfigBuilder = ImmutableMap.builder();
Set<String> oneofNames = new HashSet<>();
List<String> flattenedParams = Lists.newArrayList(methodSignature.getFieldsList());
for (String parameter : flattenedParams) {
ProtoField parameterField = method.getInputField(parameter);
if (parameterField == null) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Field missing for flattening: method = %s, message type = %s, field = %s",
method.getFullName(),
method.getInputFullName(),
parameter));
return null;
}
Oneof oneof = parameterField.getOneof();
if (oneof != null) {
String oneofName = oneof.getName();
if (oneofNames.contains(oneofName)) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Value from oneof already specifed for flattening:%n"
+ "method = %s, message type = %s, oneof = %s",
method.getFullName(),
method.getInputFullName(),
oneofName));
return null;
}
oneofNames.add(oneofName);
}
ResourceNameTreatment resourceNameTreatment = ResourceNameTreatment.NONE;
String resourceNameType = protoParser.getResourceReference(parameterField.getProtoField());
if (!Strings.isNullOrEmpty(resourceNameType)) {
resourceNameTreatment = ResourceNameTreatment.STATIC_TYPES;
}
FieldConfig fieldConfig =
FieldConfig.createMessageFieldConfig(
messageConfigs, resourceNameConfigs, parameterField, resourceNameTreatment);
flattenedFieldConfigBuilder.put(parameter, fieldConfig);
}
return new AutoValue_FlatteningConfig(flattenedFieldConfigBuilder.build(), null);
}
public Iterable<FieldModel> getFlattenedFields() {
return FieldConfig.toFieldTypeIterable(getFlattenedFieldConfigs().values());
}
public FlatteningConfig withResourceNamesInSamplesOnly() {
ImmutableMap<String, FieldConfig> newFlattenedFieldConfigs =
getFlattenedFieldConfigs()
.entrySet()
.stream()
.collect(
ImmutableMap.toImmutableMap(
Map.Entry::getKey, e -> e.getValue().withResourceNameInSampleOnly()));
return new AutoValue_FlatteningConfig(newFlattenedFieldConfigs, getFlatteningName());
}
public static boolean hasAnyRepeatedResourceNameParameter(FlatteningConfig flatteningGroup) {
// Used in Java to prevent generating a flattened method with List<ResourceName> as a parameter
// because that has the same type erasure as the version of the flattened method with
// List<String> as a parameter.
// TODO(gapic-generator issue #2137) Only use raw String type for repeated params
// not for singular params in the same flattened method.
return flatteningGroup
.getFlattenedFieldConfigs()
.values()
.stream()
.anyMatch(
(FieldConfig fieldConfig) ->
fieldConfig.getField().isRepeated() && fieldConfig.useResourceNameType());
}
/** Returns a string representing the ordered fields in a flattening config. */
private static String flatteningConfigToString(FlatteningConfig flatteningConfig) {
Iterable<FieldModel> paramList = flatteningConfig.getFlattenedFields();
StringBuilder paramsAsString = new StringBuilder();
paramList.forEach(p -> paramsAsString.append(p.getSimpleName()).append(", "));
return paramsAsString.toString();
}
}
| 1 | 27,655 | Here we are merging the configs from proto annotations and gapic config. But in other places in this PR we are using the new enum to pick one or the other, right? Or is that not the correct way to think about this? | googleapis-gapic-generator | java |
@@ -16,7 +16,7 @@ var opts = struct {
Usage string
Verbosity cli.Verbosity `short:"v" long:"verbosity" default:"warning" description:"Verbosity of output (higher number = more output)"`
CacheDir string `short:"d" long:"dir" default:"" description:"The directory to store cached artifacts in."`
- Port int `short:"p" long:"port" description:"The port to run the server on"`
+ Port int `short:"p" long:"port" description:"The port to run the server on" default:"8080"`
}{
Usage: `
HTTP cache implements a resource based http server that please can use as a cache. The cache supports storing files | 1 | package main
import (
"fmt"
"github.com/thought-machine/please/src/cli"
"github.com/thought-machine/please/tools/http_cache/cache"
"gopkg.in/op/go-logging.v1"
"net/http"
"os"
"path/filepath"
)
var log = logging.MustGetLogger("httpcache")
var opts = struct {
Usage string
Verbosity cli.Verbosity `short:"v" long:"verbosity" default:"warning" description:"Verbosity of output (higher number = more output)"`
CacheDir string `short:"d" long:"dir" default:"" description:"The directory to store cached artifacts in."`
Port int `short:"p" long:"port" description:"The port to run the server on"`
}{
Usage: `
HTTP cache implements a resource based http server that please can use as a cache. The cache supports storing files
via PUT requests and retrieving them again through GET requests. Really any http server (e.g. nginx) can be used as a
cache for please however this is a lightweight and easy to configure option.
`,
}
func main() {
cli.ParseFlagsOrDie("HTTP Cache", &opts)
if opts.CacheDir == "" {
userCacheDir, err := os.UserCacheDir()
if err != nil {
log.Fatalf("failed to get user cache dir: %v", err)
}
opts.CacheDir = filepath.Join(userCacheDir, "please_http_cache")
}
log.Infof("Started please http cache at 127.0.0.1:%v serving out of %v", opts.Port, opts.CacheDir)
err := http.ListenAndServe(fmt.Sprint(":", opts.Port), cache.New(opts.CacheDir))
if err != nil {
log.Panic(err)
}
}
| 1 | 9,812 | not related to this change? | thought-machine-please | go |
@@ -51,9 +51,9 @@ if (options.arch) {
const buildType = options.buildType;
-const ndkPath = process.env["ANDROID_NDK"];
+const ndkPath = process.env["ANDROID_NDK"] || process.env["ANDROID_NDK_HOME"];
if (!ndkPath) {
- throw Error("ANDROID_NDK environment variable not set");
+ throw Error("ANDROID_NDK / ANDROID_NDK_HOME environment variable not set");
}
const sdkPath = getAndroidSdkPath(); | 1 | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 2021 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
const commandLineArgs = require("command-line-args");
const fs = require("fs-extra");
const path = require("path");
const exec = require("child_process").execFileSync;
const compareVersions = require("compare-versions");
//simple validation of current directory.
const rnDir = path.resolve(process.cwd(), "react-native");
if (!fs.existsSync(rnDir)) {
throw new Error("This script needs to be run at the root dir of the project");
}
const copyOutputPath = path.resolve(process.cwd(), "react-native", "android", "src", "main", "jniLibs");
const buildTypes = ["Debug", "Release", "RelWithDebInfo", "MinSizeRel"];
let architectures = ["x86", "armeabi-v7a", "arm64-v8a", "x86_64"];
const optionDefinitions = [
{ name: "arch", type: validateArchitectures, multiple: false, description: "Build only for a single architecture" },
{ name: "clean", type: Boolean, defaultValue: false, multiple: false, description: "Rebuild from scratch" },
{
name: "build-type",
type: validateBuildType,
defaultValue: "Release",
multiple: false,
description: "CMAKE_BUILD_TYPE: Debug, Release, RelWithDebInfo, MinSizeRel",
},
];
const options = commandLineArgs(optionDefinitions, { camelCase: true });
if (options.arch) {
architectures = [options.arch];
}
const buildType = options.buildType;
const ndkPath = process.env["ANDROID_NDK"];
if (!ndkPath) {
throw Error("ANDROID_NDK environment variable not set");
}
const sdkPath = getAndroidSdkPath();
const cmakePath = getCmakePath(sdkPath);
const cmakeVersion = getCmakeVersion(sdkPath);
const buildPath = path.resolve(process.cwd(), "build-realm-android");
if (options.clean) {
if (fs.existsSync(buildPath)) {
fs.removeSync(buildPath);
}
}
if (!fs.existsSync(buildPath)) {
fs.mkdirSync(buildPath);
}
//shared root dir to download jsc once for all architectures
const jscDir = path.resolve(buildPath, "jsc-android");
for (const arch of architectures) {
console.log(`\nBuilding Realm JS Android for ${arch} (${buildType})`);
console.log("=======================================");
//create a build dir per architecture
const archBuildDir = path.resolve(buildPath, arch);
if (!fs.existsSync(archBuildDir)) {
fs.mkdirSync(archBuildDir);
}
let args = [
cmakePath,
"-GNinja",
`-DANDROID_NDK=${ndkPath}`,
`-DANDROID_ABI=${arch}`,
`-DCMAKE_MAKE_PROGRAM=${sdkPath}/cmake/${cmakeVersion}/bin/ninja`,
`-DCMAKE_TOOLCHAIN_FILE=${ndkPath}/build/cmake/android.toolchain.cmake`,
"-DANDROID_TOOLCHAIN=clang",
"-DANDROID_NATIVE_API_LEVEL=16",
`-DCMAKE_BUILD_TYPE=${buildType}`,
"-DANDROID_STL=c++_static",
`-DJSC_ROOT_DIR=${jscDir}`,
process.cwd(),
];
exec(cmakePath, args, { cwd: archBuildDir, stdio: "inherit" });
//cwd is the archBuildDir here, hence build the current dir with "--build ."
args = ["--build", "."];
exec(cmakePath, args, { cwd: archBuildDir, stdio: "inherit" });
copyOutput(arch, archBuildDir);
}
generateVersionFile();
function generateVersionFile() {
const targetFile = path.resolve(
process.cwd(),
"react-native",
"android",
"src",
"main",
"java",
"io",
"realm",
"react",
"Version.java",
);
const version = getVersion();
const versionFileContents = `package io.realm.react;
public class Version {
public static final String VERSION = "${version}";
}
`;
fs.writeFileSync(targetFile, versionFileContents);
}
function getVersion() {
const depencenciesListFile = path.resolve(process.cwd(), "dependencies.list");
const contents = fs.readFileSync(depencenciesListFile, "UTF-8");
const lines = contents.split(/\r?\n/);
const versionValue = lines.find((line) => line.startsWith("VERSION="));
if (!versionValue) {
throw new Error("Realm version not found. Invalid dependencies.list file");
}
const version = versionValue.split("=")[1];
if (!version) {
throw new Error("Realm version not found. Invalid version value in dependencies.list file");
}
return version;
}
function copyOutput(arch, buildDir) {
const outFile = path.resolve(buildDir, "src", "android", "libs", arch, "librealm.so");
if (!fs.existsSync(outFile)) {
throw new Error(`Build output file not found: ${outFile}`);
}
const archDir = path.resolve(copyOutputPath, arch);
if (!fs.existsSync(archDir)) {
fs.mkdirSync(archDir, { recursive: true });
}
const targetFile = path.resolve(archDir, "librealm.so");
console.log(`Copying build file \n${outFile} to \n${targetFile}`);
fs.copyFileSync(outFile, targetFile);
}
function getAndroidSdkPath() {
if ("ANDROID_SDK_ROOT" in process.env) {
console.log("Using ANDROID_SDK_ROOT env variable");
return process.env["ANDROID_SDK_ROOT"];
}
if ("ANDROID_SDK" in process.env) {
console.log("Using ANDROID_SDK env variable");
return process.env["ANDROID_SDK"];
}
if ("ANDROID_HOME" in process.env) {
console.log("Using ANDROID_HOME env variable");
return process.env["ANDROID_HOME"];
}
throw new Error("Android SDK not found. ANDROID_SDK or ANDROID_HOME or ANDROID_SDK_ROOT needs to be set");
}
function getCmakePath() {
if ("CMAKE_PATH" in process.env) {
console.log("Using cmake from CMAKE_PATH environment variable");
return process.env["CMAKE_PATH"];
}
return process.platform === "win32" ? "cmake.exe" : "cmake";
}
function getCmakeVersion(sdkPath) {
const cmakePath = `${sdkPath}/cmake`;
let dirs = fs.readdirSync(cmakePath);
if (dirs.length === 0) {
throw new Error(`No CMake installation found in ${cmakePath}`);
}
const version = dirs.sort(compareVersions)[dirs.length - 1];
console.log(`Found CMake ${version} in ${cmakePath}`);
return version;
}
function validateBuildType(buildTypeOption) {
if (!buildTypes.includes(buildTypeOption)) {
throw new Error(`Invalid build type: ${buildTypeOption}. Supported architectures ${buildTypes}`);
}
return buildTypeOption;
}
function validateArchitectures(arch) {
if (!architectures.includes(arch)) {
throw new Error(`"Invalid architecture ${arch}. Supported architectures ${architectures}`);
}
return arch;
}
| 1 | 21,152 | Why is this needed? | realm-realm-js | js |
@@ -1353,7 +1353,7 @@ int64_t Creature::getStepDuration() const
int32_t stepSpeed = getStepSpeed();
if (stepSpeed > -Creature::speedB) {
calculatedStepSpeed = floor((Creature::speedA * log((stepSpeed / 2) + Creature::speedB) + Creature::speedC) + 0.5);
- if (calculatedStepSpeed <= 0) {
+ if (calculatedStepSpeed == 0) {
calculatedStepSpeed = 1;
}
} else { | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2017 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "creature.h"
#include "game.h"
#include "monster.h"
#include "configmanager.h"
#include "scheduler.h"
double Creature::speedA = 857.36;
double Creature::speedB = 261.29;
double Creature::speedC = -4795.01;
extern Game g_game;
extern ConfigManager g_config;
extern CreatureEvents* g_creatureEvents;
Creature::Creature()
{
onIdleStatus();
}
Creature::~Creature()
{
for (Creature* summon : summons) {
summon->setAttackedCreature(nullptr);
summon->removeMaster();
}
for (Condition* condition : conditions) {
condition->endCondition(this);
delete condition;
}
}
bool Creature::canSee(const Position& myPos, const Position& pos, int32_t viewRangeX, int32_t viewRangeY)
{
if (myPos.z <= 7) {
//we are on ground level or above (7 -> 0)
//view is from 7 -> 0
if (pos.z > 7) {
return false;
}
} else if (myPos.z >= 8) {
//we are underground (8 -> 15)
//view is +/- 2 from the floor we stand on
if (Position::getDistanceZ(myPos, pos) > 2) {
return false;
}
}
const int_fast32_t offsetz = myPos.getZ() - pos.getZ();
return (pos.getX() >= myPos.getX() - viewRangeX + offsetz) && (pos.getX() <= myPos.getX() + viewRangeX + offsetz)
&& (pos.getY() >= myPos.getY() - viewRangeY + offsetz) && (pos.getY() <= myPos.getY() + viewRangeY + offsetz);
}
bool Creature::canSee(const Position& pos) const
{
return canSee(getPosition(), pos, Map::maxViewportX, Map::maxViewportY);
}
bool Creature::canSeeCreature(const Creature* creature) const
{
if (!canSeeInvisibility() && creature->isInvisible()) {
return false;
}
return true;
}
void Creature::setSkull(Skulls_t newSkull)
{
skull = newSkull;
g_game.updateCreatureSkull(this);
}
int64_t Creature::getTimeSinceLastMove() const
{
if (lastStep) {
return OTSYS_TIME() - lastStep;
}
return std::numeric_limits<int64_t>::max();
}
int32_t Creature::getWalkDelay(Direction dir) const
{
if (lastStep == 0) {
return 0;
}
int64_t ct = OTSYS_TIME();
int64_t stepDuration = getStepDuration(dir);
return stepDuration - (ct - lastStep);
}
int32_t Creature::getWalkDelay() const
{
//Used for auto-walking
if (lastStep == 0) {
return 0;
}
int64_t ct = OTSYS_TIME();
int64_t stepDuration = getStepDuration() * lastStepCost;
return stepDuration - (ct - lastStep);
}
void Creature::onThink(uint32_t interval)
{
if (!isMapLoaded && useCacheMap()) {
isMapLoaded = true;
updateMapCache();
}
if (followCreature && master != followCreature && !canSeeCreature(followCreature)) {
onCreatureDisappear(followCreature, false);
}
if (attackedCreature && master != attackedCreature && !canSeeCreature(attackedCreature)) {
onCreatureDisappear(attackedCreature, false);
}
blockTicks += interval;
if (blockTicks >= 1000) {
blockCount = std::min<uint32_t>(blockCount + 1, 2);
blockTicks = 0;
}
if (followCreature) {
walkUpdateTicks += interval;
if (forceUpdateFollowPath || walkUpdateTicks >= 2000) {
walkUpdateTicks = 0;
forceUpdateFollowPath = false;
isUpdatingPath = true;
}
}
if (isUpdatingPath) {
isUpdatingPath = false;
goToFollowCreature();
}
//scripting event - onThink
const CreatureEventList& thinkEvents = getCreatureEvents(CREATURE_EVENT_THINK);
for (CreatureEvent* thinkEvent : thinkEvents) {
thinkEvent->executeOnThink(this, interval);
}
}
void Creature::onAttacking(uint32_t interval)
{
if (!attackedCreature) {
return;
}
onAttacked();
attackedCreature->onAttacked();
if (g_game.isSightClear(getPosition(), attackedCreature->getPosition(), true)) {
doAttacking(interval);
}
}
void Creature::onIdleStatus()
{
if (getHealth() > 0) {
damageMap.clear();
lastHitCreatureId = 0;
}
}
void Creature::onWalk()
{
if (getWalkDelay() <= 0) {
Direction dir;
uint32_t flags = FLAG_IGNOREFIELDDAMAGE;
if (getNextStep(dir, flags)) {
ReturnValue ret = g_game.internalMoveCreature(this, dir, flags);
if (ret != RETURNVALUE_NOERROR) {
if (Player* player = getPlayer()) {
player->sendCancelMessage(ret);
player->sendCancelWalk();
}
forceUpdateFollowPath = true;
}
} else {
if (listWalkDir.empty()) {
onWalkComplete();
}
stopEventWalk();
}
}
if (cancelNextWalk) {
listWalkDir.clear();
onWalkAborted();
cancelNextWalk = false;
}
if (eventWalk != 0) {
eventWalk = 0;
addEventWalk();
}
}
void Creature::onWalk(Direction& dir)
{
if (hasCondition(CONDITION_DRUNK)) {
uint32_t r = uniform_random(0, 20);
if (r <= DIRECTION_DIAGONAL_MASK) {
if (r < DIRECTION_DIAGONAL_MASK) {
dir = static_cast<Direction>(r);
}
g_game.internalCreatureSay(this, TALKTYPE_MONSTER_SAY, "Hicks!", false);
}
}
}
bool Creature::getNextStep(Direction& dir, uint32_t&)
{
if (listWalkDir.empty()) {
return false;
}
dir = listWalkDir.front();
listWalkDir.pop_front();
onWalk(dir);
return true;
}
void Creature::startAutoWalk(const std::forward_list<Direction>& listDir)
{
listWalkDir = listDir;
size_t size = 0;
for (auto it = listDir.begin(); it != listDir.end() && size <= 1; ++it) {
size++;
}
addEventWalk(size == 1);
}
void Creature::addEventWalk(bool firstStep)
{
cancelNextWalk = false;
if (getStepSpeed() <= 0) {
return;
}
if (eventWalk != 0) {
return;
}
int64_t ticks = getEventStepTicks(firstStep);
if (ticks <= 0) {
return;
}
// Take first step right away, but still queue the next
if (ticks == 1) {
g_game.checkCreatureWalk(getID());
}
eventWalk = g_scheduler.addEvent(createSchedulerTask(ticks, std::bind(&Game::checkCreatureWalk, &g_game, getID())));
}
void Creature::stopEventWalk()
{
if (eventWalk != 0) {
g_scheduler.stopEvent(eventWalk);
eventWalk = 0;
}
}
void Creature::updateMapCache()
{
Tile* tile;
const Position& myPos = getPosition();
Position pos(0, 0, myPos.z);
for (int32_t y = -maxWalkCacheHeight; y <= maxWalkCacheHeight; ++y) {
for (int32_t x = -maxWalkCacheWidth; x <= maxWalkCacheWidth; ++x) {
pos.x = myPos.getX() + x;
pos.y = myPos.getY() + y;
tile = g_game.map.getTile(pos);
updateTileCache(tile, pos);
}
}
}
void Creature::updateTileCache(const Tile* tile, int32_t dx, int32_t dy)
{
if (std::abs(dx) <= maxWalkCacheWidth && std::abs(dy) <= maxWalkCacheHeight) {
localMapCache[maxWalkCacheHeight + dy][maxWalkCacheWidth + dx] = tile && tile->queryAdd(0, *this, 1, FLAG_PATHFINDING | FLAG_IGNOREFIELDDAMAGE) == RETURNVALUE_NOERROR;
}
}
void Creature::updateTileCache(const Tile* tile, const Position& pos)
{
const Position& myPos = getPosition();
if (pos.z == myPos.z) {
int32_t dx = Position::getOffsetX(pos, myPos);
int32_t dy = Position::getOffsetY(pos, myPos);
updateTileCache(tile, dx, dy);
}
}
int32_t Creature::getWalkCache(const Position& pos) const
{
if (!useCacheMap()) {
return 2;
}
const Position& myPos = getPosition();
if (myPos.z != pos.z) {
return 0;
}
if (pos == myPos) {
return 1;
}
int32_t dx = Position::getOffsetX(pos, myPos);
if (std::abs(dx) <= maxWalkCacheWidth) {
int32_t dy = Position::getOffsetY(pos, myPos);
if (std::abs(dy) <= maxWalkCacheHeight) {
if (localMapCache[maxWalkCacheHeight + dy][maxWalkCacheWidth + dx]) {
return 1;
} else {
return 0;
}
}
}
//out of range
return 2;
}
void Creature::onAddTileItem(const Tile* tile, const Position& pos)
{
if (isMapLoaded && pos.z == getPosition().z) {
updateTileCache(tile, pos);
}
}
void Creature::onUpdateTileItem(const Tile* tile, const Position& pos, const Item*,
const ItemType& oldType, const Item*, const ItemType& newType)
{
if (!isMapLoaded) {
return;
}
if (oldType.blockSolid || oldType.blockPathFind || newType.blockPathFind || newType.blockSolid) {
if (pos.z == getPosition().z) {
updateTileCache(tile, pos);
}
}
}
void Creature::onRemoveTileItem(const Tile* tile, const Position& pos, const ItemType& iType, const Item*)
{
if (!isMapLoaded) {
return;
}
if (iType.blockSolid || iType.blockPathFind || iType.isGroundTile()) {
if (pos.z == getPosition().z) {
updateTileCache(tile, pos);
}
}
}
void Creature::onCreatureAppear(Creature* creature, bool isLogin)
{
if (creature == this) {
if (useCacheMap()) {
isMapLoaded = true;
updateMapCache();
}
if (isLogin) {
setLastPosition(getPosition());
}
} else if (isMapLoaded) {
if (creature->getPosition().z == getPosition().z) {
updateTileCache(creature->getTile(), creature->getPosition());
}
}
}
void Creature::onRemoveCreature(Creature* creature, bool)
{
onCreatureDisappear(creature, true);
if (creature == this) {
if (master && !master->isRemoved()) {
setMaster(nullptr);
}
} else if (isMapLoaded) {
if (creature->getPosition().z == getPosition().z) {
updateTileCache(creature->getTile(), creature->getPosition());
}
}
}
void Creature::onCreatureDisappear(const Creature* creature, bool isLogout)
{
if (attackedCreature == creature) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(isLogout);
}
if (followCreature == creature) {
setFollowCreature(nullptr);
onFollowCreatureDisappear(isLogout);
}
}
void Creature::onChangeZone(ZoneType_t zone)
{
if (attackedCreature && zone == ZONE_PROTECTION) {
onCreatureDisappear(attackedCreature, false);
}
}
void Creature::onAttackedCreatureChangeZone(ZoneType_t zone)
{
if (zone == ZONE_PROTECTION) {
onCreatureDisappear(attackedCreature, false);
}
}
void Creature::onCreatureMove(Creature* creature, const Tile* newTile, const Position& newPos,
const Tile* oldTile, const Position& oldPos, bool teleport)
{
if (creature == this) {
lastStep = OTSYS_TIME();
lastStepCost = 1;
if (!teleport) {
if (oldPos.z != newPos.z) {
//floor change extra cost
lastStepCost = 2;
} else if (Position::getDistanceX(newPos, oldPos) >= 1 && Position::getDistanceY(newPos, oldPos) >= 1) {
//diagonal extra cost
lastStepCost = 3;
}
} else {
stopEventWalk();
}
if (!summons.empty()) {
//check if any of our summons is out of range (+/- 2 floors or 30 tiles away)
std::forward_list<Creature*> despawnList;
for (Creature* summon : summons) {
const Position& pos = summon->getPosition();
if (Position::getDistanceZ(newPos, pos) > 2 || (std::max<int32_t>(Position::getDistanceX(newPos, pos), Position::getDistanceY(newPos, pos)) > 30)) {
despawnList.push_front(summon);
}
}
for (Creature* despawnCreature : despawnList) {
g_game.removeCreature(despawnCreature, true);
}
}
if (newTile->getZone() != oldTile->getZone()) {
onChangeZone(getZone());
}
//update map cache
if (isMapLoaded) {
if (teleport || oldPos.z != newPos.z) {
updateMapCache();
} else {
const Position& myPos = getPosition();
if (oldPos.y > newPos.y) { //north
//shift y south
for (int32_t y = mapWalkHeight - 1; --y >= 0;) {
memcpy(localMapCache[y + 1], localMapCache[y], sizeof(localMapCache[y]));
}
//update 0
for (int32_t x = -maxWalkCacheWidth; x <= maxWalkCacheWidth; ++x) {
Tile* cacheTile = g_game.map.getTile(myPos.getX() + x, myPos.getY() - maxWalkCacheHeight, myPos.z);
updateTileCache(cacheTile, x, -maxWalkCacheHeight);
}
} else if (oldPos.y < newPos.y) { // south
//shift y north
for (int32_t y = 0; y <= mapWalkHeight - 2; ++y) {
memcpy(localMapCache[y], localMapCache[y + 1], sizeof(localMapCache[y]));
}
//update mapWalkHeight - 1
for (int32_t x = -maxWalkCacheWidth; x <= maxWalkCacheWidth; ++x) {
Tile* cacheTile = g_game.map.getTile(myPos.getX() + x, myPos.getY() + maxWalkCacheHeight, myPos.z);
updateTileCache(cacheTile, x, maxWalkCacheHeight);
}
}
if (oldPos.x < newPos.x) { // east
//shift y west
int32_t starty = 0;
int32_t endy = mapWalkHeight - 1;
int32_t dy = Position::getDistanceY(oldPos, newPos);
if (dy < 0) {
endy += dy;
} else if (dy > 0) {
starty = dy;
}
for (int32_t y = starty; y <= endy; ++y) {
for (int32_t x = 0; x <= mapWalkWidth - 2; ++x) {
localMapCache[y][x] = localMapCache[y][x + 1];
}
}
//update mapWalkWidth - 1
for (int32_t y = -maxWalkCacheHeight; y <= maxWalkCacheHeight; ++y) {
Tile* cacheTile = g_game.map.getTile(myPos.x + maxWalkCacheWidth, myPos.y + y, myPos.z);
updateTileCache(cacheTile, maxWalkCacheWidth, y);
}
} else if (oldPos.x > newPos.x) { // west
//shift y east
int32_t starty = 0;
int32_t endy = mapWalkHeight - 1;
int32_t dy = Position::getDistanceY(oldPos, newPos);
if (dy < 0) {
endy += dy;
} else if (dy > 0) {
starty = dy;
}
for (int32_t y = starty; y <= endy; ++y) {
for (int32_t x = mapWalkWidth - 1; --x >= 0;) {
localMapCache[y][x + 1] = localMapCache[y][x];
}
}
//update 0
for (int32_t y = -maxWalkCacheHeight; y <= maxWalkCacheHeight; ++y) {
Tile* cacheTile = g_game.map.getTile(myPos.x - maxWalkCacheWidth, myPos.y + y, myPos.z);
updateTileCache(cacheTile, -maxWalkCacheWidth, y);
}
}
updateTileCache(oldTile, oldPos);
}
}
} else {
if (isMapLoaded) {
const Position& myPos = getPosition();
if (newPos.z == myPos.z) {
updateTileCache(newTile, newPos);
}
if (oldPos.z == myPos.z) {
updateTileCache(oldTile, oldPos);
}
}
}
if (creature == followCreature || (creature == this && followCreature)) {
if (hasFollowPath) {
isUpdatingPath = true;
}
if (newPos.z != oldPos.z || !canSee(followCreature->getPosition())) {
onCreatureDisappear(followCreature, false);
}
}
if (creature == attackedCreature || (creature == this && attackedCreature)) {
if (newPos.z != oldPos.z || !canSee(attackedCreature->getPosition())) {
onCreatureDisappear(attackedCreature, false);
} else {
if (hasExtraSwing()) {
//our target is moving lets see if we can get in hit
g_dispatcher.addTask(createTask(std::bind(&Game::checkCreatureAttack, &g_game, getID())));
}
if (newTile->getZone() != oldTile->getZone()) {
onAttackedCreatureChangeZone(attackedCreature->getZone());
}
}
}
}
void Creature::onDeath()
{
bool lastHitUnjustified = false;
bool mostDamageUnjustified = false;
Creature* lastHitCreature = g_game.getCreatureByID(lastHitCreatureId);
Creature* lastHitCreatureMaster;
if (lastHitCreature) {
lastHitUnjustified = lastHitCreature->onKilledCreature(this);
lastHitCreatureMaster = lastHitCreature->getMaster();
} else {
lastHitCreatureMaster = nullptr;
}
Creature* mostDamageCreature = nullptr;
const int64_t timeNow = OTSYS_TIME();
const uint32_t inFightTicks = g_config.getNumber(ConfigManager::PZ_LOCKED);
int32_t mostDamage = 0;
std::map<Creature*, uint64_t> experienceMap;
for (const auto& it : damageMap) {
if (Creature* attacker = g_game.getCreatureByID(it.first)) {
CountBlock_t cb = it.second;
if ((cb.total > mostDamage && (timeNow - cb.ticks <= inFightTicks))) {
mostDamage = cb.total;
mostDamageCreature = attacker;
}
if (attacker != this) {
uint64_t gainExp = getGainedExperience(attacker);
if (Player* attackerPlayer = attacker->getPlayer()) {
attackerPlayer->removeAttacked(getPlayer());
Party* party = attackerPlayer->getParty();
if (party && party->getLeader() && party->isSharedExperienceActive() && party->isSharedExperienceEnabled()) {
attacker = party->getLeader();
}
}
auto tmpIt = experienceMap.find(attacker);
if (tmpIt == experienceMap.end()) {
experienceMap[attacker] = gainExp;
} else {
tmpIt->second += gainExp;
}
}
}
}
for (const auto& it : experienceMap) {
it.first->onGainExperience(it.second, this);
}
if (mostDamageCreature) {
if (mostDamageCreature != lastHitCreature && mostDamageCreature != lastHitCreatureMaster) {
Creature* mostDamageCreatureMaster = mostDamageCreature->getMaster();
if (lastHitCreature != mostDamageCreatureMaster && (lastHitCreatureMaster == nullptr || mostDamageCreatureMaster != lastHitCreatureMaster)) {
mostDamageUnjustified = mostDamageCreature->onKilledCreature(this, false);
}
}
}
bool droppedCorpse = dropCorpse(lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
death(lastHitCreature);
if (master) {
setMaster(nullptr);
}
if (droppedCorpse) {
g_game.removeCreature(this, false);
}
}
bool Creature::dropCorpse(Creature* lastHitCreature, Creature* mostDamageCreature, bool lastHitUnjustified, bool mostDamageUnjustified)
{
if (!lootDrop && getMonster()) {
if (master) {
//scripting event - onDeath
const CreatureEventList& deathEvents = getCreatureEvents(CREATURE_EVENT_DEATH);
for (CreatureEvent* deathEvent : deathEvents) {
deathEvent->executeOnDeath(this, nullptr, lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
}
}
g_game.addMagicEffect(getPosition(), CONST_ME_POFF);
} else {
Item* splash;
switch (getRace()) {
case RACE_VENOM:
splash = Item::CreateItem(ITEM_FULLSPLASH, FLUID_GREEN);
break;
case RACE_BLOOD:
splash = Item::CreateItem(ITEM_FULLSPLASH, FLUID_BLOOD);
break;
default:
splash = nullptr;
break;
}
Tile* tile = getTile();
if (splash) {
g_game.internalAddItem(tile, splash, INDEX_WHEREEVER, FLAG_NOLIMIT);
g_game.startDecay(splash);
}
Item* corpse = getCorpse(lastHitCreature, mostDamageCreature);
if (corpse) {
g_game.internalAddItem(tile, corpse, INDEX_WHEREEVER, FLAG_NOLIMIT);
g_game.startDecay(corpse);
}
//scripting event - onDeath
for (CreatureEvent* deathEvent : getCreatureEvents(CREATURE_EVENT_DEATH)) {
deathEvent->executeOnDeath(this, corpse, lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
}
if (corpse) {
dropLoot(corpse->getContainer(), lastHitCreature);
}
}
return true;
}
bool Creature::hasBeenAttacked(uint32_t attackerId)
{
auto it = damageMap.find(attackerId);
if (it == damageMap.end()) {
return false;
}
return (OTSYS_TIME() - it->second.ticks) <= g_config.getNumber(ConfigManager::PZ_LOCKED);
}
Item* Creature::getCorpse(Creature*, Creature*)
{
return Item::CreateItem(getLookCorpse());
}
void Creature::changeHealth(int32_t healthChange, bool sendHealthChange/* = true*/)
{
int32_t oldHealth = health;
if (healthChange > 0) {
health += std::min<int32_t>(healthChange, getMaxHealth() - health);
} else {
health = std::max<int32_t>(0, health + healthChange);
}
if (sendHealthChange && oldHealth != health) {
g_game.addCreatureHealth(this);
}
}
void Creature::gainHealth(Creature* healer, int32_t healthGain)
{
changeHealth(healthGain);
if (healer) {
healer->onTargetCreatureGainHealth(this, healthGain);
}
}
void Creature::drainHealth(Creature* attacker, int32_t damage)
{
changeHealth(-damage, false);
if (attacker) {
attacker->onAttackedCreatureDrainHealth(this, damage);
}
}
BlockType_t Creature::blockHit(Creature* attacker, CombatType_t combatType, int32_t& damage,
bool checkDefense /* = false */, bool checkArmor /* = false */, bool /* field = false */)
{
BlockType_t blockType = BLOCK_NONE;
if (isImmune(combatType)) {
damage = 0;
blockType = BLOCK_IMMUNITY;
} else if (checkDefense || checkArmor) {
bool hasDefense = false;
if (blockCount > 0) {
--blockCount;
hasDefense = true;
}
if (checkDefense && hasDefense) {
int32_t defense = getDefense();
damage -= uniform_random(defense / 2, defense);
if (damage <= 0) {
damage = 0;
blockType = BLOCK_DEFENSE;
checkArmor = false;
}
}
if (checkArmor) {
int32_t armor = getArmor();
if (armor > 3) {
damage -= uniform_random(armor / 2, armor - (armor % 2 + 1));
} else if (armor > 0) {
--damage;
}
if (damage <= 0) {
damage = 0;
blockType = BLOCK_ARMOR;
}
}
if (hasDefense && blockType != BLOCK_NONE) {
onBlockHit();
}
}
if (attacker) {
attacker->onAttackedCreature(this);
attacker->onAttackedCreatureBlockHit(blockType);
}
onAttacked();
return blockType;
}
bool Creature::setAttackedCreature(Creature* creature)
{
if (creature) {
const Position& creaturePos = creature->getPosition();
if (creaturePos.z != getPosition().z || !canSee(creaturePos)) {
attackedCreature = nullptr;
return false;
}
attackedCreature = creature;
onAttackedCreature(attackedCreature);
attackedCreature->onAttacked();
} else {
attackedCreature = nullptr;
}
for (Creature* summon : summons) {
summon->setAttackedCreature(creature);
}
return true;
}
void Creature::getPathSearchParams(const Creature*, FindPathParams& fpp) const
{
fpp.fullPathSearch = !hasFollowPath;
fpp.clearSight = true;
fpp.maxSearchDist = 12;
fpp.minTargetDist = 1;
fpp.maxTargetDist = 1;
}
void Creature::goToFollowCreature()
{
if (followCreature) {
FindPathParams fpp;
getPathSearchParams(followCreature, fpp);
Monster* monster = getMonster();
if (monster && !monster->getMaster() && (monster->isFleeing() || fpp.maxTargetDist > 1)) {
Direction dir = DIRECTION_NONE;
if (monster->isFleeing()) {
monster->getDistanceStep(followCreature->getPosition(), dir, true);
} else { //maxTargetDist > 1
if (!monster->getDistanceStep(followCreature->getPosition(), dir)) {
// if we can't get anything then let the A* calculate
listWalkDir.clear();
if (getPathTo(followCreature->getPosition(), listWalkDir, fpp)) {
hasFollowPath = true;
startAutoWalk(listWalkDir);
} else {
hasFollowPath = false;
}
return;
}
}
if (dir != DIRECTION_NONE) {
listWalkDir.clear();
listWalkDir.push_front(dir);
hasFollowPath = true;
startAutoWalk(listWalkDir);
}
} else {
listWalkDir.clear();
if (getPathTo(followCreature->getPosition(), listWalkDir, fpp)) {
hasFollowPath = true;
startAutoWalk(listWalkDir);
} else {
hasFollowPath = false;
}
}
}
onFollowCreatureComplete(followCreature);
}
bool Creature::setFollowCreature(Creature* creature)
{
if (creature) {
if (followCreature == creature) {
return true;
}
const Position& creaturePos = creature->getPosition();
if (creaturePos.z != getPosition().z || !canSee(creaturePos)) {
followCreature = nullptr;
return false;
}
if (!listWalkDir.empty()) {
listWalkDir.clear();
onWalkAborted();
}
hasFollowPath = false;
forceUpdateFollowPath = false;
followCreature = creature;
isUpdatingPath = true;
} else {
isUpdatingPath = false;
followCreature = nullptr;
}
onFollowCreature(creature);
return true;
}
double Creature::getDamageRatio(Creature* attacker) const
{
uint32_t totalDamage = 0;
uint32_t attackerDamage = 0;
for (const auto& it : damageMap) {
const CountBlock_t& cb = it.second;
totalDamage += cb.total;
if (it.first == attacker->getID()) {
attackerDamage += cb.total;
}
}
if (totalDamage == 0) {
return 0;
}
return (static_cast<double>(attackerDamage) / totalDamage);
}
uint64_t Creature::getGainedExperience(Creature* attacker) const
{
return std::floor(getDamageRatio(attacker) * getLostExperience());
}
void Creature::addDamagePoints(Creature* attacker, int32_t damagePoints)
{
if (damagePoints <= 0) {
return;
}
uint32_t attackerId = attacker->id;
auto it = damageMap.find(attackerId);
if (it == damageMap.end()) {
CountBlock_t cb;
cb.ticks = OTSYS_TIME();
cb.total = damagePoints;
damageMap[attackerId] = cb;
} else {
it->second.total += damagePoints;
it->second.ticks = OTSYS_TIME();
}
lastHitCreatureId = attackerId;
}
void Creature::onAddCondition(ConditionType_t type)
{
if (type == CONDITION_PARALYZE && hasCondition(CONDITION_HASTE)) {
removeCondition(CONDITION_HASTE);
} else if (type == CONDITION_HASTE && hasCondition(CONDITION_PARALYZE)) {
removeCondition(CONDITION_PARALYZE);
}
}
void Creature::onAddCombatCondition(ConditionType_t)
{
//
}
void Creature::onEndCondition(ConditionType_t)
{
//
}
void Creature::onTickCondition(ConditionType_t type, bool& bRemove)
{
const MagicField* field = getTile()->getFieldItem();
if (!field) {
return;
}
switch (type) {
case CONDITION_FIRE:
bRemove = (field->getCombatType() != COMBAT_FIREDAMAGE);
break;
case CONDITION_ENERGY:
bRemove = (field->getCombatType() != COMBAT_ENERGYDAMAGE);
break;
case CONDITION_POISON:
bRemove = (field->getCombatType() != COMBAT_EARTHDAMAGE);
break;
case CONDITION_FREEZING:
bRemove = (field->getCombatType() != COMBAT_ICEDAMAGE);
break;
case CONDITION_DAZZLED:
bRemove = (field->getCombatType() != COMBAT_HOLYDAMAGE);
break;
case CONDITION_CURSED:
bRemove = (field->getCombatType() != COMBAT_DEATHDAMAGE);
break;
case CONDITION_DROWN:
bRemove = (field->getCombatType() != COMBAT_DROWNDAMAGE);
break;
case CONDITION_BLEEDING:
bRemove = (field->getCombatType() != COMBAT_PHYSICALDAMAGE);
break;
default:
break;
}
}
void Creature::onCombatRemoveCondition(Condition* condition)
{
removeCondition(condition);
}
void Creature::onAttacked()
{
//
}
void Creature::onAttackedCreatureDrainHealth(Creature* target, int32_t points)
{
target->addDamagePoints(this, points);
}
bool Creature::onKilledCreature(Creature* target, bool)
{
if (master) {
master->onKilledCreature(target);
}
//scripting event - onKill
const CreatureEventList& killEvents = getCreatureEvents(CREATURE_EVENT_KILL);
for (CreatureEvent* killEvent : killEvents) {
killEvent->executeOnKill(this, target);
}
return false;
}
void Creature::onGainExperience(uint64_t gainExp, Creature* target)
{
if (gainExp == 0 || !master) {
return;
}
gainExp /= 2;
master->onGainExperience(gainExp, target);
SpectatorHashSet spectators;
g_game.map.getSpectators(spectators, position, false, true);
if (spectators.empty()) {
return;
}
TextMessage message(MESSAGE_EXPERIENCE_OTHERS, ucfirst(getNameDescription()) + " gained " + std::to_string(gainExp) + (gainExp != 1 ? " experience points." : " experience point."));
message.position = position;
message.primary.color = TEXTCOLOR_WHITE_EXP;
message.primary.value = gainExp;
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendTextMessage(message);
}
}
bool Creature::setMaster(Creature* newMaster) {
if (!newMaster && !master) {
return false;
}
if (newMaster) {
incrementReferenceCounter();
newMaster->summons.push_back(this);
}
Creature* oldMaster = master;
master = newMaster;
if (oldMaster) {
auto summon = std::find(oldMaster->summons.begin(), oldMaster->summons.end(), this);
if (summon != oldMaster->summons.end()) {
oldMaster->summons.erase(summon);
decrementReferenceCounter();
}
}
return true;
}
bool Creature::addCondition(Condition* condition, bool force/* = false*/)
{
if (condition == nullptr) {
return false;
}
if (!force && condition->getType() == CONDITION_HASTE && hasCondition(CONDITION_PARALYZE)) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceAddCondition, &g_game, getID(), condition)));
return false;
}
}
Condition* prevCond = getCondition(condition->getType(), condition->getId(), condition->getSubId());
if (prevCond) {
prevCond->addCondition(this, condition);
delete condition;
return true;
}
if (condition->startCondition(this)) {
conditions.push_back(condition);
onAddCondition(condition->getType());
return true;
}
delete condition;
return false;
}
bool Creature::addCombatCondition(Condition* condition)
{
//Caution: condition variable could be deleted after the call to addCondition
ConditionType_t type = condition->getType();
if (!addCondition(condition)) {
return false;
}
onAddCombatCondition(type);
return true;
}
void Creature::removeCondition(ConditionType_t type, bool force/* = false*/)
{
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (condition->getType() != type) {
++it;
continue;
}
if (!force && type == CONDITION_PARALYZE) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceRemoveCondition, &g_game, getID(), type)));
return;
}
}
it = conditions.erase(it);
condition->endCondition(this);
delete condition;
onEndCondition(type);
}
}
void Creature::removeCondition(ConditionType_t type, ConditionId_t conditionId, bool force/* = false*/)
{
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (condition->getType() != type || condition->getId() != conditionId) {
++it;
continue;
}
if (!force && type == CONDITION_PARALYZE) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceRemoveCondition, &g_game, getID(), type)));
return;
}
}
it = conditions.erase(it);
condition->endCondition(this);
delete condition;
onEndCondition(type);
}
}
void Creature::removeCombatCondition(ConditionType_t type)
{
std::vector<Condition*> removeConditions;
for (Condition* condition : conditions) {
if (condition->getType() == type) {
removeConditions.push_back(condition);
}
}
for (Condition* condition : removeConditions) {
onCombatRemoveCondition(condition);
}
}
void Creature::removeCondition(Condition* condition, bool force/* = false*/)
{
auto it = std::find(conditions.begin(), conditions.end(), condition);
if (it == conditions.end()) {
return;
}
if (!force && condition->getType() == CONDITION_PARALYZE) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceRemoveCondition, &g_game, getID(), condition->getType())));
return;
}
}
conditions.erase(it);
condition->endCondition(this);
onEndCondition(condition->getType());
delete condition;
}
Condition* Creature::getCondition(ConditionType_t type) const
{
for (Condition* condition : conditions) {
if (condition->getType() == type) {
return condition;
}
}
return nullptr;
}
Condition* Creature::getCondition(ConditionType_t type, ConditionId_t conditionId, uint32_t subId/* = 0*/) const
{
for (Condition* condition : conditions) {
if (condition->getType() == type && condition->getId() == conditionId && condition->getSubId() == subId) {
return condition;
}
}
return nullptr;
}
void Creature::executeConditions(uint32_t interval)
{
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (!condition->executeCondition(this, interval)) {
ConditionType_t type = condition->getType();
it = conditions.erase(it);
condition->endCondition(this);
delete condition;
onEndCondition(type);
} else {
++it;
}
}
}
bool Creature::hasCondition(ConditionType_t type, uint32_t subId/* = 0*/) const
{
if (isSuppress(type)) {
return false;
}
int64_t timeNow = OTSYS_TIME();
for (Condition* condition : conditions) {
if (condition->getType() != type || condition->getSubId() != subId) {
continue;
}
if (condition->getEndTime() >= timeNow) {
return true;
}
}
return false;
}
bool Creature::isImmune(CombatType_t type) const
{
return hasBitSet(static_cast<uint32_t>(type), getDamageImmunities());
}
bool Creature::isImmune(ConditionType_t type) const
{
return hasBitSet(static_cast<uint32_t>(type), getConditionImmunities());
}
bool Creature::isSuppress(ConditionType_t type) const
{
return hasBitSet(static_cast<uint32_t>(type), getConditionSuppressions());
}
int64_t Creature::getStepDuration(Direction dir) const
{
int64_t stepDuration = getStepDuration();
if ((dir & DIRECTION_DIAGONAL_MASK) != 0) {
stepDuration *= 3;
}
return stepDuration;
}
int64_t Creature::getStepDuration() const
{
if (isRemoved()) {
return 0;
}
uint32_t calculatedStepSpeed;
uint32_t groundSpeed;
int32_t stepSpeed = getStepSpeed();
if (stepSpeed > -Creature::speedB) {
calculatedStepSpeed = floor((Creature::speedA * log((stepSpeed / 2) + Creature::speedB) + Creature::speedC) + 0.5);
if (calculatedStepSpeed <= 0) {
calculatedStepSpeed = 1;
}
} else {
calculatedStepSpeed = 1;
}
Item* ground = tile->getGround();
if (ground) {
groundSpeed = Item::items[ground->getID()].speed;
if (groundSpeed == 0) {
groundSpeed = 150;
}
} else {
groundSpeed = 150;
}
double duration = std::floor(1000 * groundSpeed / calculatedStepSpeed);
int64_t stepDuration = std::ceil(duration / 50) * 50;
const Monster* monster = getMonster();
if (monster && monster->isTargetNearby() && !monster->isFleeing() && !monster->getMaster()) {
stepDuration *= 2;
}
return stepDuration;
}
int64_t Creature::getEventStepTicks(bool onlyDelay) const
{
int64_t ret = getWalkDelay();
if (ret <= 0) {
int64_t stepDuration = getStepDuration();
if (onlyDelay && stepDuration > 0) {
ret = 1;
} else {
ret = stepDuration * lastStepCost;
}
}
return ret;
}
LightInfo Creature::getCreatureLight() const
{
return internalLight;
}
void Creature::setCreatureLight(LightInfo light) {
internalLight = std::move(light);
}
void Creature::setNormalCreatureLight()
{
internalLight = {};
}
bool Creature::registerCreatureEvent(const std::string& name)
{
CreatureEvent* event = g_creatureEvents->getEventByName(name);
if (!event) {
return false;
}
CreatureEventType_t type = event->getEventType();
if (hasEventRegistered(type)) {
for (CreatureEvent* creatureEvent : eventsList) {
if (creatureEvent == event) {
return false;
}
}
} else {
scriptEventsBitField |= static_cast<uint32_t>(1) << type;
}
eventsList.push_back(event);
return true;
}
bool Creature::unregisterCreatureEvent(const std::string& name)
{
CreatureEvent* event = g_creatureEvents->getEventByName(name);
if (!event) {
return false;
}
CreatureEventType_t type = event->getEventType();
if (!hasEventRegistered(type)) {
return false;
}
bool resetTypeBit = true;
auto it = eventsList.begin(), end = eventsList.end();
while (it != end) {
CreatureEvent* curEvent = *it;
if (curEvent == event) {
it = eventsList.erase(it);
continue;
}
if (curEvent->getEventType() == type) {
resetTypeBit = false;
}
++it;
}
if (resetTypeBit) {
scriptEventsBitField &= ~(static_cast<uint32_t>(1) << type);
}
return true;
}
CreatureEventList Creature::getCreatureEvents(CreatureEventType_t type)
{
CreatureEventList tmpEventList;
if (!hasEventRegistered(type)) {
return tmpEventList;
}
for (CreatureEvent* creatureEvent : eventsList) {
if (creatureEvent->getEventType() == type) {
tmpEventList.push_back(creatureEvent);
}
}
return tmpEventList;
}
bool FrozenPathingConditionCall::isInRange(const Position& startPos, const Position& testPos,
const FindPathParams& fpp) const
{
if (fpp.fullPathSearch) {
if (testPos.x > targetPos.x + fpp.maxTargetDist) {
return false;
}
if (testPos.x < targetPos.x - fpp.maxTargetDist) {
return false;
}
if (testPos.y > targetPos.y + fpp.maxTargetDist) {
return false;
}
if (testPos.y < targetPos.y - fpp.maxTargetDist) {
return false;
}
} else {
int_fast32_t dx = Position::getOffsetX(startPos, targetPos);
int32_t dxMax = (dx >= 0 ? fpp.maxTargetDist : 0);
if (testPos.x > targetPos.x + dxMax) {
return false;
}
int32_t dxMin = (dx <= 0 ? fpp.maxTargetDist : 0);
if (testPos.x < targetPos.x - dxMin) {
return false;
}
int_fast32_t dy = Position::getOffsetY(startPos, targetPos);
int32_t dyMax = (dy >= 0 ? fpp.maxTargetDist : 0);
if (testPos.y > targetPos.y + dyMax) {
return false;
}
int32_t dyMin = (dy <= 0 ? fpp.maxTargetDist : 0);
if (testPos.y < targetPos.y - dyMin) {
return false;
}
}
return true;
}
bool FrozenPathingConditionCall::operator()(const Position& startPos, const Position& testPos,
const FindPathParams& fpp, int32_t& bestMatchDist) const
{
if (!isInRange(startPos, testPos, fpp)) {
return false;
}
if (fpp.clearSight && !g_game.isSightClear(testPos, targetPos, true)) {
return false;
}
int32_t testDist = std::max<int32_t>(Position::getDistanceX(targetPos, testPos), Position::getDistanceY(targetPos, testPos));
if (fpp.maxTargetDist == 1) {
if (testDist < fpp.minTargetDist || testDist > fpp.maxTargetDist) {
return false;
}
return true;
} else if (testDist <= fpp.maxTargetDist) {
if (testDist < fpp.minTargetDist) {
return false;
}
if (testDist == fpp.maxTargetDist) {
bestMatchDist = 0;
return true;
} else if (testDist > bestMatchDist) {
//not quite what we want, but the best so far
bestMatchDist = testDist;
return true;
}
}
return false;
}
bool Creature::isInvisible() const
{
return std::find_if(conditions.begin(), conditions.end(), [] (const Condition* condition) {
return condition->getType() == CONDITION_INVISIBLE;
}) != conditions.end();
}
bool Creature::getPathTo(const Position& targetPos, std::forward_list<Direction>& dirList, const FindPathParams& fpp) const
{
return g_game.map.getPathMatching(*this, dirList, FrozenPathingConditionCall(targetPos), fpp);
}
bool Creature::getPathTo(const Position& targetPos, std::forward_list<Direction>& dirList, int32_t minTargetDist, int32_t maxTargetDist, bool fullPathSearch /*= true*/, bool clearSight /*= true*/, int32_t maxSearchDist /*= 0*/) const
{
FindPathParams fpp;
fpp.fullPathSearch = fullPathSearch;
fpp.maxSearchDist = maxSearchDist;
fpp.clearSight = clearSight;
fpp.minTargetDist = minTargetDist;
fpp.maxTargetDist = maxTargetDist;
return getPathTo(targetPos, dirList, fpp);
}
| 1 | 15,104 | This one could be replaced with a `std::max`, no? | otland-forgottenserver | cpp |
@@ -87,7 +87,10 @@ module Selenium
return unless File.exist?(manifest_path)
manifest = JSON.parse(File.read(manifest_path))
- [manifest['name'].delete(' '), manifest['version']].join('@')
+ id = if manifest.key?('application') && manifest['application'].key?('gecko')
+ manifest['application']['gecko']['id']
+ end
+ id || [manifest['name'].delete(' '), manifest['version']].join('@')
end
end # Extension
end # Firefox | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
module Firefox
#
# @api private
#
class Extension
NAMESPACE = 'http://www.mozilla.org/2004/em-rdf#'.freeze
def initialize(path)
unless File.exist?(path)
raise Error::WebDriverError, "could not find extension at #{path.inspect}"
end
@path = path
@should_reap_root = false
end
def write_to(extensions_dir)
root_dir = create_root
ext_path = File.join extensions_dir, read_id(root_dir)
FileUtils.rm_rf ext_path
FileUtils.mkdir_p File.dirname(ext_path), mode: 0o700
FileUtils.cp_r root_dir, ext_path
FileReaper.reap(root_dir) if @should_reap_root
end
private
def create_root
if File.directory? @path
@path
else
unless Zipper::EXTENSIONS.include? File.extname(@path)
raise Error::WebDriverError, "expected #{Zipper::EXTENSIONS.join(' or ')}, got #{@path.inspect}"
end
@should_reap_root = true
Zipper.unzip(@path)
end
end
def read_id(directory)
read_id_from_install_rdf(directory) || read_id_from_manifest_json(directory)
end
def read_id_from_install_rdf(directory)
rdf_path = File.join(directory, 'install.rdf')
return unless File.exist?(rdf_path)
doc = REXML::Document.new(File.read(rdf_path))
namespace = doc.root.namespaces.key(NAMESPACE)
if namespace
id_node = REXML::XPath.first(doc, "//#{namespace}:id")
return id_node.text if id_node
attr_node = REXML::XPath.first(doc, "//@#{namespace}:id")
return attr_node.value if attr_node
end
raise Error::WebDriverError, "cannot locate extension id in #{rdf_path}"
end
def read_id_from_manifest_json(directory)
manifest_path = File.join(directory, 'manifest.json')
return unless File.exist?(manifest_path)
manifest = JSON.parse(File.read(manifest_path))
[manifest['name'].delete(' '), manifest['version']].join('@')
end
end # Extension
end # Firefox
end # WebDriver
end # Selenium
| 1 | 15,944 | Couldn't you just write this as an if/else or a guard clause like on line 87? Just seems a bit weird doing this conditional assignment for essentially an if/else. | SeleniumHQ-selenium | java |
@@ -566,7 +566,11 @@ class Filter extends WidgetBase
/*
* Set scope value
*/
- $scope->value = $this->getScopeValue($scope);
+ if($scopeType=='checkbox' || $scopeType=='switch'){
+ $scope->value = isset($config['value']) ? $config['value'] : $this->getScopeValue($scope);
+ } else{
+ $scope->value = $this->getScopeValue($scope);
+ }
return $scope;
} | 1 | <?php namespace Backend\Widgets;
use Db;
use Str;
use Lang;
use Backend;
use DbDongle;
use Carbon\Carbon;
use Backend\Classes\WidgetBase;
use Backend\Classes\FilterScope;
use ApplicationException;
/**
* Filter Widget
* Renders a container used for filtering things.
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*/
class Filter extends WidgetBase
{
//
// Configurable properties
//
/**
* @var array Scope definition configuration.
*/
public $scopes;
/**
* @var string The context of this filter, scopes that do not belong
* to this context will not be shown.
*/
public $context = null;
//
// Object properties
//
/**
* @inheritDoc
*/
protected $defaultAlias = 'filter';
/**
* @var boolean Determines if scope definitions have been created.
*/
protected $scopesDefined = false;
/**
* @var array Collection of all scopes used in this filter.
*/
protected $allScopes = [];
/**
* @var array Collection of all scopes models used in this filter.
*/
protected $scopeModels = [];
/**
* @var array List of CSS classes to apply to the filter container element
*/
public $cssClasses = [];
/**
* Initialize the widget, called by the constructor and free from its parameters.
*/
public function init()
{
$this->fillFromConfig([
'scopes',
'context',
]);
}
/**
* Renders the widget.
*/
public function render()
{
$this->prepareVars();
return $this->makePartial('filter');
}
/**
* Prepares the view data
*/
public function prepareVars()
{
$this->defineFilterScopes();
$this->vars['cssClasses'] = implode(' ', $this->cssClasses);
$this->vars['scopes'] = $this->allScopes;
}
/**
* Renders the HTML element for a scope
*/
public function renderScopeElement($scope)
{
$params = ['scope' => $scope];
switch ($scope->type) {
case 'date':
if ($scope->value && $scope->value instanceof Carbon) {
$params['dateStr'] = Backend::dateTime($scope->value, ['formatAlias' => 'dateMin']);
$params['date'] = $scope->value->format('Y-m-d H:i:s');
}
break;
case 'daterange':
if ($scope->value && is_array($scope->value) && count($scope->value) === 2 &&
$scope->value[0] && $scope->value[0] instanceof Carbon &&
$scope->value[1] && $scope->value[1] instanceof Carbon
) {
$after = $scope->value[0]->format('Y-m-d H:i:s');
$before = $scope->value[1]->format('Y-m-d H:i:s');
if(strcasecmp($after, '0000-00-00 00:00:00') > 0) {
$params['afterStr'] = Backend::dateTime($scope->value[0], ['formatAlias' => 'dateMin']);
$params['after'] = $after;
}
else {
$params['afterStr'] = '∞';
$params['after'] = null;
}
if(strcasecmp($before, '2999-12-31 23:59:59') < 0) {
$params['beforeStr'] = Backend::dateTime($scope->value[1], ['formatAlias' => 'dateMin']);
$params['before'] = $before;
}
else {
$params['beforeStr'] = '∞';
$params['before'] = null;
}
}
break;
case 'number':
if (is_numeric($scope->value)) {
$params['number'] = $scope->value;
}
break;
case 'numberrange':
if ($scope->value && is_array($scope->value) && count($scope->value) === 2 &&
$scope->value[0] &&
$scope->value[1]
) {
$min = $scope->value[0];
$max = $scope->value[1];
$params['minStr'] = $min ? $min : '';
$params['min'] = $min ? $min : null;
$params['maxStr'] = $max ? $max : '∞';
$params['max'] = $max ? $max : null;
}
break;
}
return $this->makePartial('scope_'.$scope->type, $params);
}
//
// AJAX
//
/**
* Update a filter scope value.
* @return array
*/
public function onFilterUpdate()
{
$this->defineFilterScopes();
if (!$scope = post('scopeName')) {
return;
}
$scope = $this->getScope($scope);
switch ($scope->type) {
case 'group':
$active = $this->optionsFromAjax(post('options.active'));
$this->setScopeValue($scope, $active);
break;
case 'checkbox':
$checked = post('value') == 'true' ? true : false;
$this->setScopeValue($scope, $checked);
break;
case 'switch':
$value = post('value');
$this->setScopeValue($scope, $value);
break;
case 'date':
$dates = $this->datesFromAjax(post('options.dates'));
if (!empty($dates)) {
list($date) = $dates;
}
else {
$date = null;
}
$this->setScopeValue($scope, $date);
break;
case 'daterange':
$dates = $this->datesFromAjax(post('options.dates'));
if (!empty($dates)) {
list($after, $before) = $dates;
$dates = [$after, $before];
}
else {
$dates = null;
}
$this->setScopeValue($scope, $dates);
break;
case 'number':
$numbers = $this->numbersFromAjax(post('options.numbers'));
if (!empty($numbers)) {
list($number) = $numbers;
}
else {
$number = null;
}
$this->setScopeValue($scope, $number);
break;
case 'numberrange':
$numbers = $this->numbersFromAjax(post('options.numbers'));
if (!empty($numbers)) {
list($min, $max) = $numbers;
$numbers = [$min, $max];
}
else {
$numbers = null;
}
$this->setScopeValue($scope, $numbers);
break;
}
/*
* Trigger class event, merge results as viewable array
*/
$params = func_get_args();
$result = $this->fireEvent('filter.update', [$params]);
if ($result && is_array($result)) {
return call_user_func_array('array_merge', $result);
}
}
/**
* Returns available options for group scope type.
* @return array
*/
public function onFilterGetOptions()
{
$this->defineFilterScopes();
$searchQuery = post('search');
if (!$scopeName = post('scopeName')) {
return;
}
$scope = $this->getScope($scopeName);
$activeKeys = $scope->value ? array_keys($scope->value) : [];
$available = $this->getAvailableOptions($scope, $searchQuery);
$active = $searchQuery ? [] : $this->filterActiveOptions($activeKeys, $available);
return [
'scopeName' => $scopeName,
'options' => [
'available' => $this->optionsToAjax($available),
'active' => $this->optionsToAjax($active),
]
];
}
//
// Internals
//
/**
* Returns the available options a scope can use, either from the
* model relation or from a supplied array. Optionally apply a search
* constraint to the options.
* @param string $scope
* @param string $searchQuery
* @return array
*/
protected function getAvailableOptions($scope, $searchQuery = null)
{
if ($scope->options) {
return $this->getOptionsFromArray($scope, $searchQuery);
}
$available = [];
$nameColumn = $this->getScopeNameFrom($scope);
$options = $this->getOptionsFromModel($scope, $searchQuery);
foreach ($options as $option) {
$available[$option->getKey()] = $option->{$nameColumn};
}
return $available;
}
/**
* Removes any already selected options from the available options, returns
* a newly built array.
* @param array $activeKeys
* @param array $availableOptions
* @return array
*/
protected function filterActiveOptions(array $activeKeys, array &$availableOptions)
{
$active = [];
foreach ($availableOptions as $id => $option) {
if (!in_array($id, $activeKeys)) {
continue;
}
$active[$id] = $option;
unset($availableOptions[$id]);
}
return $active;
}
/**
* Looks at the model for defined scope items.
* @return Collection
*/
protected function getOptionsFromModel($scope, $searchQuery = null)
{
$model = $this->scopeModels[$scope->scopeName];
$query = $model->newQuery();
/*
* Extensibility
*/
$this->fireSystemEvent('backend.filter.extendQuery', [$query, $scope]);
if (!$searchQuery) {
return $query->get();
}
$searchFields = [$model->getKeyName(), $this->getScopeNameFrom($scope)];
return $query->searchWhere($searchQuery, $searchFields)->get();
}
/**
* Look at the defined set of options for scope items, or the model method.
* @return array
*/
protected function getOptionsFromArray($scope, $searchQuery = null)
{
/*
* Load the data
*/
$options = $scope->options;
if (is_scalar($options)) {
$model = $this->scopeModels[$scope->scopeName];
$methodName = $options;
if (!$model->methodExists($methodName)) {
throw new ApplicationException(Lang::get('backend::lang.filter.options_method_not_exists', [
'model' => get_class($model),
'method' => $methodName,
'filter' => $scope->scopeName
]));
}
$options = $model->$methodName();
}
elseif (!is_array($options)) {
$options = [];
}
/*
* Apply the search
*/
$searchQuery = Str::lower($searchQuery);
if (strlen($searchQuery)) {
$options = $this->filterOptionsBySearch($options, $searchQuery);
}
return $options;
}
/**
* Filters an array of options by a search term.
* @param array $options
* @param string $query
* @return array
*/
protected function filterOptionsBySearch($options, $query)
{
$filteredOptions = [];
$optionMatchesSearch = function ($words, $option) {
foreach ($words as $word) {
$word = trim($word);
if (!strlen($word)) {
continue;
}
if (!Str::contains(Str::lower($option), $word)) {
return false;
}
}
return true;
};
/*
* Exact
*/
foreach ($options as $index => $option) {
if (Str::is(Str::lower($option), $query)) {
$filteredOptions[$index] = $option;
unset($options[$index]);
}
}
/*
* Fuzzy
*/
$words = explode(' ', $query);
foreach ($options as $index => $option) {
if ($optionMatchesSearch($words, $option)) {
$filteredOptions[$index] = $option;
}
}
return $filteredOptions;
}
/**
* Creates a flat array of filter scopes from the configuration.
*/
protected function defineFilterScopes()
{
if ($this->scopesDefined) {
return;
}
/*
* Extensibility
*/
$this->fireSystemEvent('backend.filter.extendScopesBefore');
/*
* All scopes
*/
if (!isset($this->scopes) || !is_array($this->scopes)) {
$this->scopes = [];
}
$this->addScopes($this->scopes);
/*
* Extensibility
*/
$this->fireSystemEvent('backend.filter.extendScopes');
$this->scopesDefined = true;
}
/**
* Programatically add scopes, used internally and for extensibility.
*/
public function addScopes(array $scopes)
{
foreach ($scopes as $name => $config) {
$scopeObj = $this->makeFilterScope($name, $config);
/*
* Check that the filter scope matches the active context
*/
if ($scopeObj->context !== null) {
$context = (is_array($scopeObj->context)) ? $scopeObj->context : [$scopeObj->context];
if (!in_array($this->getContext(), $context)) {
continue;
}
}
/*
* Validate scope model
*/
if (isset($config['modelClass'])) {
$class = $config['modelClass'];
$model = new $class;
$this->scopeModels[$name] = $model;
}
/*
* Ensure scope type options are set
*/
$scopeProperties = [];
switch ($scopeObj->type) {
case 'date':
case 'daterange':
$scopeProperties = [
'minDate' => '2000-01-01',
'maxDate' => '2099-12-31',
'firstDay' => 0,
'yearRange' => 10,
];
break;
}
foreach ($scopeProperties as $property => $value) {
if (isset($config[$property])) {
$value = $config[$property];
}
$scopeObj->{$property} = $value;
}
$this->allScopes[$name] = $scopeObj;
}
}
/**
* Programatically remove a scope, used for extensibility.
* @param string $scopeName Scope name
*/
public function removeScope($scopeName)
{
if (isset($this->allScopes[$scopeName])) {
unset($this->allScopes[$scopeName]);
}
}
/**
* Creates a filter scope object from name and configuration.
*/
protected function makeFilterScope($name, $config)
{
$label = (isset($config['label'])) ? $config['label'] : null;
$scopeType = isset($config['type']) ? $config['type'] : null;
$scope = new FilterScope($name, $label);
$scope->displayAs($scopeType, $config);
/*
* Set scope value
*/
$scope->value = $this->getScopeValue($scope);
return $scope;
}
//
// Filter query logic
//
/**
* Applies all scopes to a DB query.
* @param Builder $query
* @return Builder
*/
public function applyAllScopesToQuery($query)
{
$this->defineFilterScopes();
foreach ($this->allScopes as $scope) {
$this->applyScopeToQuery($scope, $query);
}
return $query;
}
/**
* Applies a filter scope constraints to a DB query.
* @param string $scope
* @param Builder $query
* @return Builder
*/
public function applyScopeToQuery($scope, $query)
{
if (is_string($scope)) {
$scope = $this->getScope($scope);
}
if (!$scope->value) {
return;
}
switch ($scope->type) {
case 'date':
if ($scope->value instanceof Carbon) {
$value = $scope->value;
/*
* Condition
*/
if ($scopeConditions = $scope->conditions) {
$query->whereRaw(DbDongle::parse(strtr($scopeConditions, [
':filtered' => $value->format('Y-m-d'),
':after' => $value->format('Y-m-d H:i:s'),
':before' => $value->copy()->addDay()->addMinutes(-1)->format('Y-m-d H:i:s')
])));
}
/*
* Scope
*/
elseif ($scopeMethod = $scope->scope) {
$query->$scopeMethod($value);
}
}
break;
case 'daterange':
if (is_array($scope->value) && count($scope->value) > 1) {
list($after, $before) = array_values($scope->value);
if ($after && $after instanceof Carbon && $before && $before instanceof Carbon) {
/*
* Condition
*/
if ($scopeConditions = $scope->conditions) {
$query->whereRaw(DbDongle::parse(strtr($scopeConditions, [
':afterDate' => $after->format('Y-m-d'),
':after' => $after->format('Y-m-d H:i:s'),
':beforeDate' => $before->format('Y-m-d'),
':before' => $before->format('Y-m-d H:i:s')
])));
}
/*
* Scope
*/
elseif ($scopeMethod = $scope->scope) {
$query->$scopeMethod($after, $before);
}
}
}
break;
case 'number':
if (is_numeric($scope->value)) {
/*
* Condition
*/
if ($scopeConditions = $scope->conditions) {
$query->whereRaw(DbDongle::parse(strtr($scopeConditions, [
':filtered' => $scope->value,
])));
}
/*
* Scope
*/
elseif ($scopeMethod = $scope->scope) {
$query->$scopeMethod($scope->value);
}
}
case 'numberrange':
if (is_array($scope->value) && count($scope->value) > 1) {
list($min, $max) = array_values($scope->value);
if ($min && $max) {
/*
* Condition
*
*/
if ($scopeConditions = $scope->conditions) {
$query->whereRaw(DbDongle::parse(strtr($scopeConditions, [
':min' => $min,
':max' => $max
])));
}
/*
* Scope
*/
elseif ($scopeMethod = $scope->scope) {
$query->$scopeMethod($min, $max);
}
}
}
break;
default:
$value = is_array($scope->value) ? array_keys($scope->value) : $scope->value;
/*
* Condition
*/
if ($scopeConditions = $scope->conditions) {
/*
* Switch scope: multiple conditions, value either 1 or 2
*/
if (is_array($scopeConditions)) {
$conditionNum = is_array($value) ? 0 : $value - 1;
list($scopeConditions) = array_slice($scopeConditions, $conditionNum);
}
if (is_array($value)) {
$filtered = implode(',', array_build($value, function ($key, $_value) {
return [$key, Db::getPdo()->quote($_value)];
}));
}
else {
$filtered = Db::getPdo()->quote($value);
}
$query->whereRaw(DbDongle::parse(strtr($scopeConditions, [':filtered' => $filtered])));
}
/*
* Scope
*/
elseif ($scopeMethod = $scope->scope) {
$query->$scopeMethod($value);
}
break;
}
return $query;
}
//
// Access layer
//
/**
* Returns a scope value for this widget instance.
*/
public function getScopeValue($scope, $default = null)
{
if (is_string($scope)) {
$scope = $this->getScope($scope);
}
$cacheKey = 'scope-'.$scope->scopeName;
return $this->getSession($cacheKey, $default);
}
/**
* Sets an scope value for this widget instance.
*/
public function setScopeValue($scope, $value)
{
if (is_string($scope)) {
$scope = $this->getScope($scope);
}
$cacheKey = 'scope-'.$scope->scopeName;
$this->putSession($cacheKey, $value);
$scope->value = $value;
}
/**
* Get all the registered scopes for the instance.
* @return array
*/
public function getScopes()
{
return $this->allScopes;
}
/**
* Get a specified scope object
* @param string $scope
* @return mixed
*/
public function getScope($scope)
{
if (!isset($this->allScopes[$scope])) {
throw new ApplicationException('No definition for scope ' . $scope);
}
return $this->allScopes[$scope];
}
/**
* Returns the display name column for a scope.
* @param string $scope
* @return string
*/
public function getScopeNameFrom($scope)
{
if (is_string($scope)) {
$scope = $this->getScope($scope);
}
return $scope->nameFrom;
}
/**
* Returns the active context for displaying the filter.
* @return string
*/
public function getContext()
{
return $this->context;
}
//
// Helpers
//
/**
* Convert a key/pair array to a named array {id: 1, name: 'Foobar'}
* @param array $options
* @return array
*/
protected function optionsToAjax($options)
{
$processed = [];
foreach ($options as $id => $result) {
$processed[] = ['id' => $id, 'name' => $result];
}
return $processed;
}
/**
* Convert a named array to a key/pair array
* @param array $options
* @return array
*/
protected function optionsFromAjax($options)
{
$processed = [];
if (!is_array($options)) {
return $processed;
}
foreach ($options as $option) {
$id = array_get($option, 'id');
if ($id === null) {
continue;
}
$processed[$id] = array_get($option, 'name');
}
return $processed;
}
/**
* Convert an array from the posted dates
*
* @param array $dates
*
* @return array
*/
protected function datesFromAjax($ajaxDates)
{
$dates = [];
$dateRegex = '/\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}/';
if (null !== $ajaxDates) {
if (!is_array($ajaxDates)) {
if(preg_match($dateRegex, $ajaxDates)) {
$dates = [$ajaxDates];
}
} else {
foreach ($ajaxDates as $i => $date) {
if (preg_match($dateRegex, $date)) {
$dates[] = Carbon::createFromFormat('Y-m-d H:i:s', $date);
} elseif (empty($date)) {
if($i == 0) {
$dates[] = Carbon::createFromFormat('Y-m-d H:i:s', '0000-00-00 00:00:00');
} else {
$dates[] = Carbon::createFromFormat('Y-m-d H:i:s', '2999-12-31 23:59:59');
}
} else {
$dates = [];
break;
}
}
}
}
return $dates;
}
/**
* Convert an array from the posted numbers
*
* @param array $dates
*
* @return array
*/
protected function numbersFromAjax($ajaxNumbers)
{
$numbers = [];
$numberRegex = '/\d/';
if (!empty($ajaxNumbers)) {
if (!is_array($ajaxNumbers) && preg_match($numberRegex, $ajaxNumbers)) {
$numbers = [$ajaxNumbers];
} else {
foreach ($ajaxNumbers as $i => $number) {
if (preg_match($numberRegex, $number)) {
$numbers[] = $number;
} else {
$numbers = [];
break;
}
}
}
}
return $numbers;
}
/**
* @param mixed $scope
*
* @return string
*/
protected function getFilterDateFormat($scope)
{
if (isset($scope->date_format)) {
return $scope->date_format;
}
return trans('backend::lang.filter.date.format');
}
}
| 1 | 12,658 | Please use strict type comparisons (`===`), not loose type comparisons. | octobercms-october | php |
@@ -73,6 +73,14 @@ public class FilterParameter {
return blockhash;
}
+ public boolean isValid() {
+ if (!getFromBlock().isLatest() && !getToBlock().isLatest() && getBlockhash() != null) {
+ return false;
+ }
+
+ return true;
+ }
+
@Override
public String toString() {
return MoreObjects.toStringHelper(this) | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.parameters;
import static java.util.Collections.emptyList;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.LogTopic;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonFormat;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import com.google.common.base.MoreObjects;
public class FilterParameter {
private final BlockParameter fromBlock;
private final BlockParameter toBlock;
private final List<Address> addresses;
private final List<List<LogTopic>> topics;
private final Hash blockhash;
@JsonCreator
public FilterParameter(
@JsonProperty("fromBlock") final String fromBlock,
@JsonProperty("toBlock") final String toBlock,
@JsonFormat(with = JsonFormat.Feature.ACCEPT_SINGLE_VALUE_AS_ARRAY) @JsonProperty("address")
final List<Address> address,
@JsonDeserialize(using = TopicsDeserializer.class) @JsonProperty("topics")
final List<List<LogTopic>> topics,
@JsonProperty("blockhash") final String blockhash) {
this.fromBlock =
fromBlock != null ? new BlockParameter(fromBlock) : new BlockParameter("latest");
this.toBlock = toBlock != null ? new BlockParameter(toBlock) : new BlockParameter("latest");
this.addresses = address != null ? address : emptyList();
this.topics = topics != null ? topics : emptyList();
this.blockhash = blockhash != null ? Hash.fromHexString(blockhash) : null;
}
public BlockParameter getFromBlock() {
return fromBlock;
}
public BlockParameter getToBlock() {
return toBlock;
}
public List<Address> getAddresses() {
return addresses;
}
public List<List<LogTopic>> getTopics() {
return topics;
}
public Hash getBlockhash() {
return blockhash;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("fromBlock", fromBlock)
.add("toBlock", toBlock)
.add("addresses", addresses)
.add("topics", topics)
.add("blockhash", blockhash)
.toString();
}
}
| 1 | 22,134 | The filter parameters are a bit confusing. I understand that one of them has to be set, but what takes precedence if a from/to is set and the hash as well? Should that fail? | hyperledger-besu | java |
@@ -6,6 +6,9 @@
#include "graph/context/Iterator.h"
+#include <cstdio>
+#include <tuple>
+
#include "common/datatypes/Edge.h"
#include "common/datatypes/Vertex.h"
#include "graph/util/SchemaUtil.h" | 1 | /* Copyright (c) 2020 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "graph/context/Iterator.h"
#include "common/datatypes/Edge.h"
#include "common/datatypes/Vertex.h"
#include "graph/util/SchemaUtil.h"
#include "interface/gen-cpp2/common_types.h"
namespace nebula {
namespace graph {
GetNeighborsIter::GetNeighborsIter(std::shared_ptr<Value> value)
: Iterator(value, Kind::kGetNeighbors) {
if (value == nullptr) {
return;
}
auto status = processList(value);
if (UNLIKELY(!status.ok())) {
LOG(ERROR) << status;
clear();
return;
}
goToFirstEdge();
}
void GetNeighborsIter::goToFirstEdge() {
// Go to first edge
for (currentDs_ = dsIndices_.begin(); currentDs_ < dsIndices_.end(); ++currentDs_) {
if (noEdge_) {
currentRow_ = currentDs_->ds->rows.begin();
valid_ = true;
break;
}
for (currentRow_ = currentDs_->ds->rows.begin(); currentRow_ < currentDs_->ds->rows.end();
++currentRow_) {
colIdx_ = currentDs_->colLowerBound + 1;
while (colIdx_ < currentDs_->colUpperBound && !valid_) {
const auto& currentCol = currentRow_->operator[](colIdx_);
if (!currentCol.isList() || currentCol.getList().empty()) {
++colIdx_;
continue;
}
currentCol_ = ¤tCol.getList();
edgeIdxUpperBound_ = currentCol_->size();
edgeIdx_ = 0;
while (edgeIdx_ < edgeIdxUpperBound_ && !valid_) {
const auto& currentEdge = currentCol_->operator[](edgeIdx_);
if (!currentEdge.isList()) {
++edgeIdx_;
continue;
}
currentEdge_ = ¤tEdge.getList();
valid_ = true;
} // `edgeIdx_'
if (!valid_) {
++colIdx_;
}
} // `colIdx_'
if (valid_) {
break;
}
} // `currentRow_'
if (valid_) {
break;
}
} // `currentDs_'
if (valid_) {
rowsUpperBound_ = currentDs_->ds->rows.end();
bitIdx_ = 0;
if (bitset_.empty()) {
bitset_.push_back(true);
} else if (!bitset_[bitIdx_]) {
next();
}
}
}
Status GetNeighborsIter::processList(std::shared_ptr<Value> value) {
if (UNLIKELY(!value->isList())) {
std::stringstream ss;
ss << "Value type is not list, type: " << value->type();
return Status::Error(ss.str());
}
for (auto& val : value->getList().values) {
if (UNLIKELY(!val.isDataSet())) {
return Status::Error("There is a value in list which is not a data set.");
}
auto status = makeDataSetIndex(val.getDataSet());
NG_RETURN_IF_ERROR(status);
dsIndices_.emplace_back(std::move(status).value());
}
return Status::OK();
}
StatusOr<GetNeighborsIter::DataSetIndex> GetNeighborsIter::makeDataSetIndex(const DataSet& ds) {
DataSetIndex dsIndex;
dsIndex.ds = &ds;
auto buildResult = buildIndex(&dsIndex);
NG_RETURN_IF_ERROR(buildResult);
return dsIndex;
}
bool checkColumnNames(const std::vector<std::string>& colNames) {
return colNames.size() < 3 || colNames[0] != nebula::kVid || colNames[1].find("_stats") != 0 ||
colNames.back().find("_expr") != 0;
}
StatusOr<int64_t> GetNeighborsIter::buildIndex(DataSetIndex* dsIndex) {
auto& colNames = dsIndex->ds->colNames;
if (UNLIKELY(checkColumnNames(colNames))) {
return Status::Error("Bad column names.");
}
int64_t edgeStartIndex = -1;
for (size_t i = 0; i < colNames.size(); ++i) {
dsIndex->colIndices.emplace(colNames[i], i);
auto& colName = colNames[i];
if (colName.find(nebula::kTag) == 0) { // "_tag"
NG_RETURN_IF_ERROR(buildPropIndex(colName, i, false, dsIndex));
} else if (colName.find("_edge") == 0) {
NG_RETURN_IF_ERROR(buildPropIndex(colName, i, true, dsIndex));
if (edgeStartIndex < 0) {
edgeStartIndex = i;
}
} else {
// It is "_vid", "_stats", "_expr" in this situation.
}
}
if (edgeStartIndex == -1) {
noEdge_ = true;
}
dsIndex->colLowerBound = edgeStartIndex - 1;
dsIndex->colUpperBound = colNames.size() - 1;
return edgeStartIndex;
}
Status GetNeighborsIter::buildPropIndex(const std::string& props,
size_t columnId,
bool isEdge,
DataSetIndex* dsIndex) {
std::vector<std::string> pieces;
folly::split(":", props, pieces);
if (UNLIKELY(pieces.size() < 2)) {
return Status::Error("Bad column name format: %s", props.c_str());
}
PropIndex propIdx;
// if size == 2, it is the tag defined without props.
if (pieces.size() > 2) {
for (size_t i = 2; i < pieces.size(); ++i) {
propIdx.propIndices.emplace(pieces[i], i - 2);
}
}
propIdx.colIdx = columnId;
propIdx.propList.resize(pieces.size() - 2);
std::move(pieces.begin() + 2, pieces.end(), propIdx.propList.begin());
std::string name = pieces[1];
if (isEdge) {
// The first character of the edge name is +/-.
if (UNLIKELY(name.empty() || (name[0] != '+' && name[0] != '-'))) {
return Status::Error("Bad edge name: %s", name.c_str());
}
dsIndex->tagEdgeNameIndices.emplace(columnId, name);
dsIndex->edgePropsMap.emplace(name, std::move(propIdx));
} else {
dsIndex->tagEdgeNameIndices.emplace(columnId, name);
dsIndex->tagPropsMap.emplace(name, std::move(propIdx));
}
return Status::OK();
}
bool GetNeighborsIter::valid() const {
return valid_ && currentDs_ < dsIndices_.end() && currentRow_ < rowsUpperBound_ &&
colIdx_ < currentDs_->colUpperBound;
}
void GetNeighborsIter::next() {
if (!valid()) {
return;
}
if (noEdge_) {
if (++currentRow_ < rowsUpperBound_) {
return;
}
// go to next dataset
if (++currentDs_ < dsIndices_.end()) {
currentRow_ = currentDs_->ds->begin();
rowsUpperBound_ = currentDs_->ds->end();
}
return;
}
while (++edgeIdx_ > -1) {
if (edgeIdx_ < edgeIdxUpperBound_) {
const auto& currentEdge = currentCol_->operator[](edgeIdx_);
if (!currentEdge.isList()) {
continue;
}
++bitIdx_;
DCHECK_GT(bitIdx_, -1);
if (static_cast<size_t>(bitIdx_) >= bitset_.size()) {
bitset_.push_back(true);
} else if (!bitset_[bitIdx_]) {
VLOG(1) << "Filtered: " << currentEdge << " bitidx: " << bitIdx_;
// current edge had been filtered.
continue;
}
currentEdge_ = ¤tEdge.getList();
break;
}
// go to next column
while (++colIdx_) {
if (colIdx_ < currentDs_->colUpperBound) {
const auto& currentCol = currentRow_->operator[](colIdx_);
if (!currentCol.isList() || currentCol.getList().empty()) {
continue;
}
currentCol_ = ¤tCol.getList();
edgeIdxUpperBound_ = currentCol_->size();
edgeIdx_ = -1;
break;
}
// go to next row
if (++currentRow_ < rowsUpperBound_) {
colIdx_ = currentDs_->colLowerBound;
continue;
}
// go to next dataset
if (++currentDs_ < dsIndices_.end()) {
colIdx_ = currentDs_->colLowerBound;
currentRow_ = currentDs_->ds->begin();
rowsUpperBound_ = currentDs_->ds->end();
continue;
}
break;
}
if (currentDs_ == dsIndices_.end()) {
break;
}
}
}
void GetNeighborsIter::erase() {
DCHECK_GE(bitIdx_, 0);
DCHECK_LT(bitIdx_, bitset_.size());
bitset_[bitIdx_] = false;
next();
}
const Value& GetNeighborsIter::getColumn(const std::string& col) const {
if (!valid()) {
return Value::kNullValue;
}
auto& index = currentDs_->colIndices;
auto found = index.find(col);
if (found == index.end()) {
return Value::kEmpty;
}
return currentRow_->values[found->second];
}
const Value& GetNeighborsIter::getColumn(int32_t index) const {
DCHECK_LT(index, currentRow_->values.size());
return currentRow_->values[index];
}
const Value& GetNeighborsIter::getTagProp(const std::string& tag, const std::string& prop) const {
if (!valid()) {
return Value::kNullValue;
}
auto& tagPropIndices = currentDs_->tagPropsMap;
auto index = tagPropIndices.find(tag);
if (index == tagPropIndices.end()) {
return Value::kEmpty;
}
auto propIndex = index->second.propIndices.find(prop);
if (propIndex == index->second.propIndices.end()) {
return Value::kEmpty;
}
auto colId = index->second.colIdx;
auto& row = *currentRow_;
DCHECK_GT(row.size(), colId);
if (row[colId].empty()) {
return Value::kEmpty;
}
if (!row[colId].isList()) {
return Value::kNullBadType;
}
auto& list = row[colId].getList();
return list.values[propIndex->second];
}
const Value& GetNeighborsIter::getEdgeProp(const std::string& edge, const std::string& prop) const {
if (!valid()) {
return Value::kNullValue;
}
if (noEdge_) {
return Value::kEmpty;
}
auto& currentEdge = currentEdgeName();
if (edge != "*" && (currentEdge.compare(1, std::string::npos, edge) != 0)) {
VLOG(1) << "Current edge: " << currentEdgeName() << " Wanted: " << edge;
return Value::kEmpty;
}
auto index = currentDs_->edgePropsMap.find(currentEdge);
if (index == currentDs_->edgePropsMap.end()) {
VLOG(1) << "No edge found: " << edge;
VLOG(1) << "Current edge: " << currentEdge;
return Value::kEmpty;
}
auto propIndex = index->second.propIndices.find(prop);
if (propIndex == index->second.propIndices.end()) {
VLOG(1) << "No edge prop found: " << prop;
return Value::kEmpty;
}
return currentEdge_->values[propIndex->second];
}
Value GetNeighborsIter::getVertex() const {
if (!valid()) {
return Value::kNullValue;
}
auto vidVal = getColumn(nebula::kVid);
if (!SchemaUtil::isValidVid(vidVal)) {
return Value::kNullBadType;
}
Vertex vertex;
vertex.vid = vidVal;
auto& tagPropMap = currentDs_->tagPropsMap;
for (auto& tagProp : tagPropMap) {
auto& row = *currentRow_;
auto& tagPropNameList = tagProp.second.propList;
auto tagColId = tagProp.second.colIdx;
if (!row[tagColId].isList()) {
// Ignore the bad value.
continue;
}
DCHECK_GE(row.size(), tagColId);
auto& propList = row[tagColId].getList();
DCHECK_EQ(tagPropNameList.size(), propList.values.size());
Tag tag;
tag.name = tagProp.first;
for (size_t i = 0; i < propList.size(); ++i) {
tag.props.emplace(tagPropNameList[i], propList[i]);
}
vertex.tags.emplace_back(std::move(tag));
}
return Value(std::move(vertex));
}
List GetNeighborsIter::getVertices() {
List vertices;
vertices.values.reserve(size());
valid_ = true;
colIdx_ = -2;
for (currentDs_ = dsIndices_.begin(); currentDs_ < dsIndices_.end(); ++currentDs_) {
rowsUpperBound_ = currentDs_->ds->rows.end();
for (currentRow_ = currentDs_->ds->rows.begin(); currentRow_ < currentDs_->ds->rows.end();
++currentRow_) {
vertices.values.emplace_back(getVertex());
VLOG(1) << "vertex: " << getVertex() << " size: " << vertices.size();
}
}
reset();
return vertices;
}
Value GetNeighborsIter::getEdge() const {
if (!valid()) {
return Value::kNullValue;
}
if (noEdge_) {
return Value::kEmpty;
}
Edge edge;
auto edgeName = currentEdgeName().substr(1, std::string::npos);
edge.name = edgeName;
auto type = getEdgeProp(edgeName, kType);
if (!type.isInt()) {
return Value::kNullBadType;
}
edge.type = type.getInt();
auto& srcVal = getColumn(kVid);
if (!SchemaUtil::isValidVid(srcVal)) {
return Value::kNullBadType;
}
edge.src = srcVal;
auto& dstVal = getEdgeProp(edgeName, kDst);
if (!SchemaUtil::isValidVid(dstVal)) {
return Value::kNullBadType;
}
edge.dst = dstVal;
auto& rank = getEdgeProp(edgeName, kRank);
if (!rank.isInt()) {
return Value::kNullBadType;
}
edge.ranking = rank.getInt();
auto& edgePropMap = currentDs_->edgePropsMap;
auto edgeProp = edgePropMap.find(currentEdgeName());
if (edgeProp == edgePropMap.end()) {
return Value::kNullValue;
}
auto& edgeNamePropList = edgeProp->second.propList;
auto& propList = currentEdge_->values;
DCHECK_EQ(edgeNamePropList.size(), propList.size());
for (size_t i = 0; i < propList.size(); ++i) {
auto propName = edgeNamePropList[i];
if (propName == kSrc || propName == kDst || propName == kRank || propName == kType) {
continue;
}
edge.props.emplace(edgeNamePropList[i], propList[i]);
}
return Value(std::move(edge));
}
List GetNeighborsIter::getEdges() {
List edges;
edges.values.reserve(size());
for (; valid(); next()) {
auto edge = getEdge();
if (edge.isEdge()) {
const_cast<Edge&>(edge.getEdge()).format();
}
edges.values.emplace_back(std::move(edge));
}
reset();
return edges;
}
SequentialIter::SequentialIter(std::shared_ptr<Value> value) : Iterator(value, Kind::kSequential) {
DCHECK(value->isDataSet());
auto& ds = value->mutableDataSet();
iter_ = ds.rows.begin();
rows_ = &ds.rows;
for (size_t i = 0; i < ds.colNames.size(); ++i) {
colIndices_.emplace(ds.colNames[i], i);
}
}
SequentialIter::SequentialIter(std::unique_ptr<Iterator> left, std::unique_ptr<Iterator> right)
: Iterator(left->valuePtr(), Kind::kSequential) {
std::vector<std::unique_ptr<Iterator>> iterators;
iterators.emplace_back(std::move(left));
iterators.emplace_back(std::move(right));
init(std::move(iterators));
}
SequentialIter::SequentialIter(std::vector<std::unique_ptr<Iterator>> inputList)
: Iterator(inputList.front()->valuePtr(), Kind::kSequential) {
init(std::move(inputList));
}
void SequentialIter::init(std::vector<std::unique_ptr<Iterator>>&& iterators) {
DCHECK(!iterators.empty());
const auto& firstIter = iterators.front();
DCHECK(firstIter->isSequentialIter());
colIndices_ = static_cast<const SequentialIter*>(firstIter.get())->getColIndices();
DataSet ds;
for (auto& iter : iterators) {
DCHECK(iter->isSequentialIter());
auto inputIter = static_cast<SequentialIter*>(iter.get());
ds.rows.insert(ds.rows.end(),
std::make_move_iterator(inputIter->begin()),
std::make_move_iterator(inputIter->end()));
}
value_ = std::make_shared<Value>(std::move(ds));
rows_ = &value_->mutableDataSet().rows;
iter_ = rows_->begin();
}
bool SequentialIter::valid() const { return iter_ < rows_->end(); }
void SequentialIter::next() {
if (valid()) {
++iter_;
}
}
void SequentialIter::erase() { iter_ = rows_->erase(iter_); }
void SequentialIter::unstableErase() {
std::swap(rows_->back(), *iter_);
rows_->pop_back();
}
void SequentialIter::eraseRange(size_t first, size_t last) {
if (first >= last || first >= size()) {
return;
}
if (last > size()) {
rows_->erase(rows_->begin() + first, rows_->end());
} else {
rows_->erase(rows_->begin() + first, rows_->begin() + last);
}
reset();
}
void SequentialIter::doReset(size_t pos) {
DCHECK((pos == 0 && size() == 0) || (pos < size()));
iter_ = rows_->begin() + pos;
}
const Value& SequentialIter::getColumn(int32_t index) const {
return getColumnByIndex(index, iter_);
}
PropIter::PropIter(std::shared_ptr<Value> value) : SequentialIter(value) {
DCHECK(value->isDataSet());
auto& ds = value->getDataSet();
auto status = makeDataSetIndex(ds);
if (UNLIKELY(!status.ok())) {
LOG(ERROR) << status;
clear();
return;
}
kind_ = Kind::kProp;
}
Status PropIter::makeDataSetIndex(const DataSet& ds) {
dsIndex_.ds = &ds;
auto& colNames = ds.colNames;
for (size_t i = 0; i < colNames.size(); ++i) {
dsIndex_.colIndices.emplace(colNames[i], i);
auto& colName = colNames[i];
if (colName.find(".") != std::string::npos) {
NG_RETURN_IF_ERROR(buildPropIndex(colName, i));
}
}
return Status::OK();
}
Status PropIter::buildPropIndex(const std::string& props, size_t columnId) {
std::vector<std::string> pieces;
folly::split(".", props, pieces);
if (UNLIKELY(pieces.size() != 2)) {
return Status::Error("Bad column name format: %s", props.c_str());
}
std::string name = pieces[0];
auto& propsMap = dsIndex_.propsMap;
if (propsMap.find(name) != propsMap.end()) {
propsMap[name].emplace(pieces[1], columnId);
} else {
std::unordered_map<std::string, size_t> propIndices;
propIndices.emplace(pieces[1], columnId);
propsMap.emplace(name, std::move(propIndices));
}
return Status::OK();
}
const Value& PropIter::getColumn(const std::string& col) const {
if (!valid()) {
return Value::kNullValue;
}
auto index = dsIndex_.colIndices.find(col);
if (index == dsIndex_.colIndices.end()) {
return Value::kNullValue;
}
auto& row = *iter_;
DCHECK_LT(index->second, row.values.size());
return row.values[index->second];
}
const Value& PropIter::getProp(const std::string& name, const std::string& prop) const {
if (!valid()) {
return Value::kNullValue;
}
auto& propsMap = dsIndex_.propsMap;
auto index = propsMap.find(name);
if (index == propsMap.end()) {
return Value::kEmpty;
}
auto propIndex = index->second.find(prop);
if (propIndex == index->second.end()) {
VLOG(1) << "No prop found : " << prop;
return Value::kNullValue;
}
auto colId = propIndex->second;
auto& row = *iter_;
DCHECK_GT(row.size(), colId);
return row[colId];
}
Value PropIter::getVertex() const {
if (!valid()) {
return Value::kNullValue;
}
auto vidVal = getColumn(nebula::kVid);
if (!SchemaUtil::isValidVid(vidVal)) {
return Value::kNullValue;
}
Vertex vertex;
vertex.vid = vidVal;
auto& tagPropsMap = dsIndex_.propsMap;
bool isVertexProps = true;
auto& row = *iter_;
// tagPropsMap -> <std::string, std::unordered_map<std::string, size_t> >
for (auto& tagProp : tagPropsMap) {
// propIndex -> std::unordered_map<std::string, size_t>
for (auto& propIndex : tagProp.second) {
if (row[propIndex.second].empty()) {
// Not current vertex's prop
isVertexProps = false;
break;
}
}
if (!isVertexProps) {
isVertexProps = true;
continue;
}
Tag tag;
tag.name = tagProp.first;
for (auto& propIndex : tagProp.second) {
if (propIndex.first == nebula::kTag) { // "_tag"
continue;
} else {
tag.props.emplace(propIndex.first, row[propIndex.second]);
}
}
vertex.tags.emplace_back(std::move(tag));
}
return Value(std::move(vertex));
}
Value PropIter::getEdge() const {
if (!valid()) {
return Value::kNullValue;
}
Edge edge;
auto& edgePropsMap = dsIndex_.propsMap;
bool isEdgeProps = true;
auto& row = *iter_;
for (auto& edgeProp : edgePropsMap) {
for (auto& propIndex : edgeProp.second) {
if (row[propIndex.second].empty()) {
// Not current edge's prop
isEdgeProps = false;
break;
}
}
if (!isEdgeProps) {
isEdgeProps = true;
continue;
}
auto edgeName = edgeProp.first;
edge.name = edgeProp.first;
auto type = getEdgeProp(edgeName, kType);
if (!type.isInt()) {
return Value::kNullBadType;
}
edge.type = type.getInt();
auto& srcVal = getEdgeProp(edgeName, kSrc);
if (!SchemaUtil::isValidVid(srcVal)) {
return Value::kNullBadType;
}
edge.src = srcVal;
auto& dstVal = getEdgeProp(edgeName, kDst);
if (!SchemaUtil::isValidVid(dstVal)) {
return Value::kNullBadType;
}
edge.dst = dstVal;
auto rank = getEdgeProp(edgeName, kRank);
if (!rank.isInt()) {
return Value::kNullBadType;
}
edge.ranking = rank.getInt();
for (auto& propIndex : edgeProp.second) {
if (propIndex.first == kSrc || propIndex.first == kDst || propIndex.first == kType ||
propIndex.first == kRank) {
continue;
}
edge.props.emplace(propIndex.first, row[propIndex.second]);
}
return Value(std::move(edge));
}
return Value::kNullValue;
}
List PropIter::getVertices() {
DCHECK(iter_ == rows_->begin());
List vertices;
vertices.values.reserve(size());
for (; valid(); next()) {
vertices.values.emplace_back(getVertex());
}
reset();
return vertices;
}
List PropIter::getEdges() {
DCHECK(iter_ == rows_->begin());
List edges;
edges.values.reserve(size());
for (; valid(); next()) {
auto edge = getEdge();
if (edge.isEdge()) {
const_cast<Edge&>(edge.getEdge()).format();
}
edges.values.emplace_back(std::move(edge));
}
reset();
return edges;
}
const Value& PropIter::getColumn(int32_t index) const { return getColumnByIndex(index, iter_); }
std::ostream& operator<<(std::ostream& os, Iterator::Kind kind) {
switch (kind) {
case Iterator::Kind::kDefault:
os << "default";
break;
case Iterator::Kind::kSequential:
os << "sequential";
break;
case Iterator::Kind::kGetNeighbors:
os << "get neighbors";
break;
case Iterator::Kind::kProp:
os << "Prop";
break;
}
os << " iterator";
return os;
}
} // namespace graph
} // namespace nebula
| 1 | 30,998 | Is that necessary? | vesoft-inc-nebula | cpp |
@@ -72,7 +72,7 @@ describe Ncr::WorkOrder do
describe "#ba_6x_tier1_team?" do
it "is true for whitelist of organizations" do
- org_letters = %w( 7 J 4 T 1 A C Z )
+ org_letters = %w( 1 2 4 7 A C J T Z )
org_letters.each do |org_letter|
org_code = "P11#{org_letter}XXXX"
ncr_org = build(:ncr_organization, code: org_code) | 1 | describe Ncr::WorkOrder do
include ProposalSpecHelper
it_behaves_like "client data"
describe "Associations" do
it { should belong_to(:ncr_organization) }
it { should belong_to(:approving_official) }
end
describe "Validations" do
it "does not allow approving official to be changed if the first step is not actionable" do
work_order = create(:ncr_work_order)
work_order.setup_approvals_and_observers
approving_official_step = work_order.reload.individual_steps.first
approving_official_step.update(status: "completed")
work_order.approving_official = create(:user, client_slug: "ncr")
expect(work_order).not_to be_valid
end
it "does allow approving official to be changed if the first step is actionable" do
work_order = create(:ncr_work_order)
work_order.setup_approvals_and_observers
work_order.approving_official = create(:user, client_slug: "ncr")
expect(work_order).to be_valid
end
end
describe "#as_indexed_json" do
it "serializes associations" do
whsc_org = create(:whsc_organization)
work_order = create(:ncr_work_order, ncr_organization: whsc_org)
indexable = work_order.as_json({include: [:ncr_organization, :approving_official]})
expect(work_order.as_indexed_json).to eq(indexable)
end
end
describe "#editable?" do
it "is true" do
work_order = build(:ncr_work_order)
expect(work_order).to be_editable
end
end
describe "#for_whsc_organization?" do
it "is true if the org code is for a whsc organization" do
organization = create(:whsc_organization)
work_order = build(:ncr_work_order, ncr_organization: organization)
expect(work_order).to be_for_whsc_organization
end
it "is false if org code is nil" do
work_order = build(:ncr_work_order, ncr_organization: nil)
expect(work_order).not_to be_for_whsc_organization
end
it "is false if org code is for a non-whsc org" do
organization = build(:ncr_organization)
work_order = build(:ncr_work_order, ncr_organization: organization)
expect(work_order).not_to be_for_whsc_organization
end
end
describe "#ba_6x_tier1_team?" do
it "is true for whitelist of organizations" do
org_letters = %w( 7 J 4 T 1 A C Z )
org_letters.each do |org_letter|
org_code = "P11#{org_letter}XXXX"
ncr_org = build(:ncr_organization, code: org_code)
work_order = build(:ncr_work_order, ncr_organization: ncr_org)
expect(work_order).to be_ba_6x_tier1_team
end
end
it "is false for non-listed organizations" do
ncr_org = build(:ncr_organization)
work_order = build(:ncr_work_order, ncr_organization: ncr_org)
expect(work_order).to_not be_ba_6x_tier1_team
end
end
describe "#for_ool_organization?" do
it "is true if org code is for an ool org" do
organization = create(:ool_organization)
work_order = build(:ncr_work_order, ncr_organization: organization)
expect(work_order).to be_for_ool_organization
end
it "is false if org code is nil" do
work_order = build(:ncr_work_order, ncr_organization: nil)
expect(work_order).not_to be_for_ool_organization
end
it "is false if org code is for non-ool org" do
organization = build(:ncr_organization)
work_order = build(:ncr_work_order, ncr_organization: organization)
expect(work_order).not_to be_for_ool_organization
end
end
describe "#total_price" do
it "gets price from amount field" do
work_order = build(:ncr_work_order, amount: 45.36)
expect(work_order.total_price).to eq(45.36)
end
end
describe "#pubic_identifier" do
it "prepends proposal ID with 'FY' and fiscal year" do
work_order = build(:ncr_work_order)
proposal = work_order.proposal
fiscal_year = work_order.fiscal_year.to_s.rjust(2, "0")
expect(work_order.public_identifier).to eq "FY#{fiscal_year}-#{proposal.id}"
end
end
describe '#fiscal_year' do
it 'ends the fiscal year on September 30th' do
work_order = create(:ncr_work_order, created_at: Date.new(2014, 9, 30))
expect(work_order.fiscal_year).to eq 14
end
it 'starts a new fiscal year on October first' do
work_order = create(:ncr_work_order, created_at: Date.new(2014, 10, 1))
expect(work_order.fiscal_year).to eq 15
end
end
describe 'validations' do
describe 'cl_number' do
let (:work_order) { build(:ncr_work_order) }
it "works with a 'CL' prefix" do
work_order.cl_number = 'CL1234567'
expect(work_order).to be_valid
end
it "requires seven numbers" do
work_order.cl_number = '123'
expect(work_order).to_not be_valid
expect(work_order.errors.keys).to eq([:cl_number])
end
end
describe 'function_code' do
let (:work_order) { build(:ncr_work_order) }
it "works with 'PG' followed by three characters" do
work_order.function_code = 'PG123'
expect(work_order).to be_valid
end
it "must have five characters" do
work_order.function_code = 'PG12'
expect(work_order).to_not be_valid
expect(work_order.errors.keys).to eq([:function_code])
end
end
describe 'RWA' do
let (:work_order) { build(:ncr_work_order, expense_type: 'BA80') }
it 'works with one letter followed by 7 numbers' do
work_order.rwa_number = 'A1234567'
expect(work_order).to be_valid
end
it 'must be 8 chars' do
work_order.rwa_number = 'A123456'
expect(work_order).not_to be_valid
end
it 'must have a letter at the beginning' do
work_order.rwa_number = '12345678'
expect(work_order).not_to be_valid
end
it "is required for BA80" do
work_order.rwa_number = nil
expect(work_order).to_not be_valid
expect(work_order.errors.keys).to eq([:rwa_number])
end
it "is not required for BA61" do
work_order.expense_type = 'BA61'
work_order.rwa_number = nil
expect(work_order).to be_valid
work_order.rwa_number = ''
expect(work_order).to be_valid
end
end
describe "soc_code" do
let (:work_order) { build(:ncr_work_order) }
it "works with three characters" do
work_order.soc_code = "123"
expect(work_order).to be_valid
end
it "must be three characters" do
work_order.soc_code = "12"
expect(work_order).to_not be_valid
expect(work_order.errors.keys).to eq([:soc_code])
end
end
end
describe "#building_id" do
it "pulls out the building id when an identifier is present" do
wo = build(:ncr_work_order, building_number: "AB1234CD then some more")
expect(wo.building_id).to eq("AB1234CD")
end
it "defaults to the whole building number" do
wo = build(:ncr_work_order, building_number: "Another String")
expect(wo.building_id).to eq("Another String")
end
it "allows nil" do
wo = build(:ncr_work_order, building_number: nil)
expect(wo.building_id).to be_nil
end
it "does not require if expense_type is BA60" do
wo = build(:ncr_work_order, expense_type: "BA60", building_number: nil)
expect(wo).to be_valid
end
it "requires if expense_type is not BA60" do
wo = build(:ncr_work_order, expense_type: "BA61", building_number: nil)
expect(wo).to_not be_valid
end
end
describe "#final_approver" do
it "returns the final approver" do
wo = create(:ncr_work_order, :with_approvers)
expect(wo.final_approver).to eq(wo.approvers.last)
wo.individual_steps.first.complete!
expect(wo.final_approver).to eq(wo.approvers.last)
end
it "returns the last approver when fully completed" do
wo = create(:ncr_work_order, :with_approvers)
fully_complete(wo.proposal)
expect(wo.final_approver).to eq(wo.approvers.last)
end
end
describe '#restart_budget_approvals' do
it "sets the approvals to the proper state" do
work_order = create(:ncr_work_order)
proposal = work_order.proposal
work_order.setup_approvals_and_observers
fully_complete(proposal)
work_order.restart_budget_approvals
expect(work_order.status).to eq('pending')
expect(work_order.proposal.root_step.status).to eq('actionable')
expect(linear_approval_statuses(proposal)).to eq(%w(
completed
actionable
pending
))
end
end
describe "#budget_approvers" do
it "returns users assigned to budget approval steps" do
work_order = create(:ncr_work_order)
work_order.setup_approvals_and_observers
budget_mailbox_step = work_order.steps.last
user = budget_mailbox_step.user
expect(work_order.budget_approvers).to include(user)
end
it "returns users who completed budget approval steps" do
work_order = create(:ncr_work_order)
work_order.setup_approvals_and_observers
completer = create(:user)
budget_mailbox_step = work_order.steps.last
budget_mailbox_step.update(completer: completer)
expect(work_order.budget_approvers).to include(completer)
end
end
end
| 1 | 17,115 | yess so much easier to read in order like this! | 18F-C2 | rb |
@@ -613,6 +613,7 @@ public abstract class LuceneTestCase extends Assert {
RuleChain r = RuleChain.outerRule(new TestRuleIgnoreTestSuites())
.around(ignoreAfterMaxFailures)
.around(suiteFailureMarker = new TestRuleMarkFailure())
+ .around(new VerifyTestClassNamingConvention())
.around(new TestRuleAssertionsRequired())
.around(new TestRuleLimitSysouts(suiteFailureMarker))
.around(tempFilesCleanupRule = new TestRuleTemporaryFilesCleanup(suiteFailureMarker)); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.util;
import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.lang.StackWalker.StackFrame;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.net.URI;
import java.nio.file.FileSystem;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.AccessControlContext;
import java.security.AccessController;
import java.security.Permission;
import java.security.PermissionCollection;
import java.security.Permissions;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.security.ProtectionDomain;
import java.security.SecurityPermission;
import java.text.Collator;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.TimeZone;
import java.util.TreeSet;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.*;
import org.apache.lucene.index.TermsEnum.SeekStatus;
import org.apache.lucene.mockfile.FilterPath;
import org.apache.lucene.mockfile.VirusCheckingFS;
import org.apache.lucene.search.AssertingIndexSearcher;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.LRUQueryCache;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryCache;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.store.BaseDirectoryWrapper;
import org.apache.lucene.store.ByteBuffersDirectory;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.FSLockFactory;
import org.apache.lucene.store.FileSwitchDirectory;
import org.apache.lucene.store.FlushInfo;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.MMapDirectory;
import org.apache.lucene.store.MergeInfo;
import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.NRTCachingDirectory;
import org.apache.lucene.store.RawDirectoryWrapper;
import org.apache.lucene.util.automaton.AutomatonTestUtil;
import org.apache.lucene.util.automaton.CompiledAutomaton;
import org.apache.lucene.util.automaton.RegExp;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
import org.junit.runner.RunWith;
import org.junit.internal.AssumptionViolatedException;
import com.carrotsearch.randomizedtesting.JUnit4MethodProvider;
import com.carrotsearch.randomizedtesting.LifecycleScope;
import com.carrotsearch.randomizedtesting.MixWithSuiteName;
import com.carrotsearch.randomizedtesting.RandomizedContext;
import com.carrotsearch.randomizedtesting.RandomizedRunner;
import com.carrotsearch.randomizedtesting.RandomizedTest;
import com.carrotsearch.randomizedtesting.annotations.Listeners;
import com.carrotsearch.randomizedtesting.annotations.SeedDecorators;
import com.carrotsearch.randomizedtesting.annotations.TestGroup;
import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction.Action;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup.Group;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Consequence;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies;
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import com.carrotsearch.randomizedtesting.rules.NoClassHooksShadowingRule;
import com.carrotsearch.randomizedtesting.rules.NoInstanceHooksOverridesRule;
import com.carrotsearch.randomizedtesting.rules.StaticFieldsInvariantRule;
import junit.framework.AssertionFailedError;
import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean;
import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsInt;
import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
/**
* Base class for all Lucene unit tests, Junit3 or Junit4 variant.
*
* <h2>Class and instance setup.</h2>
*
* <p>
* The preferred way to specify class (suite-level) setup/cleanup is to use
* static methods annotated with {@link BeforeClass} and {@link AfterClass}. Any
* code in these methods is executed within the test framework's control and
* ensure proper setup has been made. <b>Try not to use static initializers
* (including complex final field initializers).</b> Static initializers are
* executed before any setup rules are fired and may cause you (or somebody
* else) headaches.
*
* <p>
* For instance-level setup, use {@link Before} and {@link After} annotated
* methods. If you override either {@link #setUp()} or {@link #tearDown()} in
* your subclass, make sure you call <code>super.setUp()</code> and
* <code>super.tearDown()</code>. This is detected and enforced.
*
* <h2>Specifying test cases</h2>
*
* <p>
* Any test method with a <code>testXXX</code> prefix is considered a test case.
* Any test method annotated with {@link Test} is considered a test case.
*
* <h2>Randomized execution and test facilities</h2>
*
* <p>
* {@link LuceneTestCase} uses {@link RandomizedRunner} to execute test cases.
* {@link RandomizedRunner} has built-in support for tests randomization
* including access to a repeatable {@link Random} instance. See
* {@link #random()} method. Any test using {@link Random} acquired from
* {@link #random()} should be fully reproducible (assuming no race conditions
* between threads etc.). The initial seed for a test case is reported in many
* ways:
* <ul>
* <li>as part of any exception thrown from its body (inserted as a dummy stack
* trace entry),</li>
* <li>as part of the main thread executing the test case (if your test hangs,
* just dump the stack trace of all threads and you'll see the seed),</li>
* <li>the master seed can also be accessed manually by getting the current
* context ({@link RandomizedContext#current()}) and then calling
* {@link RandomizedContext#getRunnerSeedAsString()}.</li>
* </ul>
*/
@RunWith(RandomizedRunner.class)
@TestMethodProviders({
LuceneJUnit3MethodProvider.class,
JUnit4MethodProvider.class
})
@Listeners({
RunListenerPrintReproduceInfo.class,
FailureMarker.class
})
@SeedDecorators({MixWithSuiteName.class}) // See LUCENE-3995 for rationale.
@ThreadLeakScope(Scope.SUITE)
@ThreadLeakGroup(Group.MAIN)
@ThreadLeakAction({Action.WARN, Action.INTERRUPT})
@ThreadLeakLingering(linger = 20000) // Wait long for leaked threads to complete before failure. zk needs this.
@ThreadLeakZombies(Consequence.IGNORE_REMAINING_TESTS)
@TimeoutSuite(millis = 2 * TimeUnits.HOUR)
@ThreadLeakFilters(defaultFilters = true, filters = {
QuickPatchThreadsFilter.class
})
@TestRuleLimitSysouts.Limit(
bytes = TestRuleLimitSysouts.DEFAULT_LIMIT,
hardLimit = TestRuleLimitSysouts.DEFAULT_HARD_LIMIT)
public abstract class LuceneTestCase extends Assert {
// --------------------------------------------------------------------
// Test groups, system properties and other annotations modifying tests
// --------------------------------------------------------------------
public static final String SYSPROP_NIGHTLY = "tests.nightly";
public static final String SYSPROP_WEEKLY = "tests.weekly";
public static final String SYSPROP_MONSTER = "tests.monster";
public static final String SYSPROP_AWAITSFIX = "tests.awaitsfix";
public static final String SYSPROP_SLOW = "tests.slow";
public static final String SYSPROP_BADAPPLES = "tests.badapples";
/** @see #ignoreAfterMaxFailures*/
public static final String SYSPROP_MAXFAILURES = "tests.maxfailures";
/** @see #ignoreAfterMaxFailures*/
public static final String SYSPROP_FAILFAST = "tests.failfast";
/**
* Annotation for tests that should only be run during nightly builds.
*/
@Documented
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@TestGroup(enabled = false, sysProperty = SYSPROP_NIGHTLY)
public @interface Nightly {}
/**
* Annotation for tests that should only be run during weekly builds
*/
@Documented
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@TestGroup(enabled = false, sysProperty = SYSPROP_WEEKLY)
public @interface Weekly {}
/**
* Annotation for monster tests that require special setup (e.g. use tons of disk and RAM)
*/
@Documented
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@TestGroup(enabled = false, sysProperty = SYSPROP_MONSTER)
public @interface Monster {
String value();
}
/**
* Annotation for tests which exhibit a known issue and are temporarily disabled.
*/
@Documented
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@TestGroup(enabled = false, sysProperty = SYSPROP_AWAITSFIX)
public @interface AwaitsFix {
/** Point to JIRA entry. */
public String bugUrl();
}
/**
* Annotation for tests that are slow. Slow tests do run by default but can be
* disabled if a quick run is needed.
*/
@Documented
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@TestGroup(enabled = true, sysProperty = SYSPROP_SLOW)
public @interface Slow {}
/**
* Annotation for tests that fail frequently and are not executed in Jenkins builds
* to not spam mailing lists with false reports.
*
* Tests are turned on for developers by default. If you want to disable
* them, set:
* <pre>
* -Dtests.badapples=false
* </pre>
* (or do this through {@code ~./lucene.build.properties}).
*/
@Documented
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@TestGroup(enabled = true, sysProperty = SYSPROP_BADAPPLES)
public @interface BadApple {
/** Point to JIRA entry. */
public String bugUrl();
}
/**
* Annotation for test classes that should avoid certain codec types
* (because they are expensive, for example).
*/
@Documented
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface SuppressCodecs {
String[] value();
}
/**
* Annotation for test classes that should avoid mock filesystem types
* (because they test a bug that only happens on linux, for example).
* <p>
* You can avoid specific names {@link Class#getSimpleName()} or use
* the special value {@code *} to disable all mock filesystems.
*/
@Documented
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface SuppressFileSystems {
String[] value();
}
/**
* Annotation for test classes that should avoid always omit
* actual fsync calls from reaching the filesystem.
* <p>
* This can be useful, e.g. if they make many lucene commits.
*/
@Documented
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface SuppressFsync {}
/**
* Marks any suites which are known not to close all the temporary
* files. This may prevent temp. files and folders from being cleaned
* up after the suite is completed.
*
* @see LuceneTestCase#createTempDir()
* @see LuceneTestCase#createTempFile(String, String)
*/
@Documented
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface SuppressTempFileChecks {
/** Point to JIRA entry. */
public String bugUrl() default "None";
}
/**
* Ignore {@link TestRuleLimitSysouts} for any suite which is known to print
* over the default limit of bytes to {@link System#out} or {@link System#err}.
*
* @see TestRuleLimitSysouts
*/
@Documented
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface SuppressSysoutChecks {
/** Point to JIRA entry. */
public String bugUrl();
}
/**
* Suppress the default {@code reproduce with: ant test...}
* Your own listener can be added as needed for your build.
*/
@Documented
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface SuppressReproduceLine {}
// -----------------------------------------------------------------
// Truly immutable fields and constants, initialized once and valid
// for all suites ever since.
// -----------------------------------------------------------------
/**
* True if and only if tests are run in verbose mode. If this flag is false
* tests are not expected to print any messages. Enforced with {@link TestRuleLimitSysouts}.
*/
public static final boolean VERBOSE = systemPropertyAsBoolean("tests.verbose", false);
/**
* Enables or disables dumping of {@link InfoStream} messages.
*/
public static final boolean INFOSTREAM = systemPropertyAsBoolean("tests.infostream", VERBOSE);
/**
* A random multiplier which you should use when writing random tests:
* multiply it by the number of iterations to scale your tests (for nightly builds).
*/
public static final int RANDOM_MULTIPLIER = systemPropertyAsInt("tests.multiplier", 1);
public static final boolean TEST_ASSERTS_ENABLED = systemPropertyAsBoolean("tests.asserts", true);
/** TODO: javadoc? */
public static final String DEFAULT_LINE_DOCS_FILE = "europarl.lines.txt.gz";
/** TODO: javadoc? */
public static final String JENKINS_LARGE_LINE_DOCS_FILE = "enwiki.random.lines.txt";
/** Gets the codec to run tests with. */
public static final String TEST_CODEC = System.getProperty("tests.codec", "random");
/** Gets the postingsFormat to run tests with. */
public static final String TEST_POSTINGSFORMAT = System.getProperty("tests.postingsformat", "random");
/** Gets the docValuesFormat to run tests with */
public static final String TEST_DOCVALUESFORMAT = System.getProperty("tests.docvaluesformat", "random");
/** Gets the directory to run tests with */
public static final String TEST_DIRECTORY = System.getProperty("tests.directory", "random");
/** the line file used by LineFileDocs */
public static final String TEST_LINE_DOCS_FILE = System.getProperty("tests.linedocsfile", DEFAULT_LINE_DOCS_FILE);
/** Whether or not {@link Nightly} tests should run. */
public static final boolean TEST_NIGHTLY = systemPropertyAsBoolean(SYSPROP_NIGHTLY, Nightly.class.getAnnotation(TestGroup.class).enabled());
/** Whether or not {@link Weekly} tests should run. */
public static final boolean TEST_WEEKLY = systemPropertyAsBoolean(SYSPROP_WEEKLY, Weekly.class.getAnnotation(TestGroup.class).enabled());
/** Whether or not {@link Monster} tests should run. */
public static final boolean TEST_MONSTER = systemPropertyAsBoolean(SYSPROP_MONSTER, Monster.class.getAnnotation(TestGroup.class).enabled());
/** Whether or not {@link AwaitsFix} tests should run. */
public static final boolean TEST_AWAITSFIX = systemPropertyAsBoolean(SYSPROP_AWAITSFIX, AwaitsFix.class.getAnnotation(TestGroup.class).enabled());
/** Whether or not {@link BadApple} tests should run. */
public static final boolean TEST_BADAPPLES = systemPropertyAsBoolean(SYSPROP_BADAPPLES, BadApple.class.getAnnotation(TestGroup.class).enabled());
/** Whether or not {@link Slow} tests should run. */
public static final boolean TEST_SLOW = systemPropertyAsBoolean(SYSPROP_SLOW, Slow.class.getAnnotation(TestGroup.class).enabled());
/** Throttling, see {@link MockDirectoryWrapper#setThrottling(Throttling)}. */
public static final Throttling TEST_THROTTLING = TEST_NIGHTLY ? Throttling.SOMETIMES : Throttling.NEVER;
/** Leave temporary files on disk, even on successful runs. */
public static final boolean LEAVE_TEMPORARY;
static {
boolean defaultValue = false;
for (String property : Arrays.asList(
"tests.leaveTemporary" /* ANT tasks's (junit4) flag. */,
"tests.leavetemporary" /* lowercase */,
"tests.leavetmpdir" /* default */,
"solr.test.leavetmpdir" /* Solr's legacy */)) {
defaultValue |= systemPropertyAsBoolean(property, false);
}
LEAVE_TEMPORARY = defaultValue;
}
/** Returns true, if MMapDirectory supports unmapping on this platform (required for Windows), or if we are not on Windows. */
public static boolean hasWorkingMMapOnWindows() {
return !Constants.WINDOWS || MMapDirectory.UNMAP_SUPPORTED;
}
/** Assumes that the current MMapDirectory implementation supports unmapping, so the test will not fail on Windows.
* @see #hasWorkingMMapOnWindows()
* */
public static void assumeWorkingMMapOnWindows() {
assumeTrue(MMapDirectory.UNMAP_NOT_SUPPORTED_REASON, hasWorkingMMapOnWindows());
}
/** Filesystem-based {@link Directory} implementations. */
private static final List<String> FS_DIRECTORIES = Arrays.asList(
"NIOFSDirectory",
// NIOFSDirectory as replacement for MMapDirectory if unmapping is not supported on Windows (to make randomization stable):
hasWorkingMMapOnWindows() ? "MMapDirectory" : "NIOFSDirectory"
);
/** All {@link Directory} implementations. */
private static final List<String> CORE_DIRECTORIES;
static {
CORE_DIRECTORIES = new ArrayList<>(FS_DIRECTORIES);
CORE_DIRECTORIES.add(ByteBuffersDirectory.class.getSimpleName());
}
/** A {@link org.apache.lucene.search.QueryCachingPolicy} that randomly caches. */
public static final QueryCachingPolicy MAYBE_CACHE_POLICY = new QueryCachingPolicy() {
@Override
public void onUse(Query query) {}
@Override
public boolean shouldCache(Query query) throws IOException {
return random().nextBoolean();
}
};
// -----------------------------------------------------------------
// Fields initialized in class or instance rules.
// -----------------------------------------------------------------
// -----------------------------------------------------------------
// Class level (suite) rules.
// -----------------------------------------------------------------
/**
* Stores the currently class under test.
*/
private static final TestRuleStoreClassName classNameRule;
/**
* Class environment setup rule.
*/
static final TestRuleSetupAndRestoreClassEnv classEnvRule;
/**
* Suite failure marker (any error in the test or suite scope).
*/
protected static TestRuleMarkFailure suiteFailureMarker;
/**
* Temporary files cleanup rule.
*/
private static TestRuleTemporaryFilesCleanup tempFilesCleanupRule;
/**
* Ignore tests after hitting a designated number of initial failures. This
* is truly a "static" global singleton since it needs to span the lifetime of all
* test classes running inside this JVM (it cannot be part of a class rule).
*
* <p>This poses some problems for the test framework's tests because these sometimes
* trigger intentional failures which add up to the global count. This field contains
* a (possibly) changing reference to {@link TestRuleIgnoreAfterMaxFailures} and we
* dispatch to its current value from the {@link #classRules} chain using {@link TestRuleDelegate}.
*/
private static final AtomicReference<TestRuleIgnoreAfterMaxFailures> ignoreAfterMaxFailuresDelegate;
private static final TestRule ignoreAfterMaxFailures;
static {
int maxFailures = systemPropertyAsInt(SYSPROP_MAXFAILURES, Integer.MAX_VALUE);
boolean failFast = systemPropertyAsBoolean(SYSPROP_FAILFAST, false);
if (failFast) {
if (maxFailures == Integer.MAX_VALUE) {
maxFailures = 1;
} else {
Logger.getLogger(LuceneTestCase.class.getSimpleName()).warning(
"Property '" + SYSPROP_MAXFAILURES + "'=" + maxFailures + ", 'failfast' is" +
" ignored.");
}
}
ignoreAfterMaxFailuresDelegate =
new AtomicReference<>(
new TestRuleIgnoreAfterMaxFailures(maxFailures));
ignoreAfterMaxFailures = TestRuleDelegate.of(ignoreAfterMaxFailuresDelegate);
}
/**
* Try to capture streams early so that other classes don't have a chance to steal references
* to them (as is the case with ju.logging handlers).
*/
static {
TestRuleLimitSysouts.checkCaptureStreams();
Logger.getGlobal().getHandlers();
}
/**
* Temporarily substitute the global {@link TestRuleIgnoreAfterMaxFailures}. See
* {@link #ignoreAfterMaxFailuresDelegate} for some explanation why this method
* is needed.
*/
public static TestRuleIgnoreAfterMaxFailures replaceMaxFailureRule(TestRuleIgnoreAfterMaxFailures newValue) {
return ignoreAfterMaxFailuresDelegate.getAndSet(newValue);
}
/**
* Max 10mb of static data stored in a test suite class after the suite is complete.
* Prevents static data structures leaking and causing OOMs in subsequent tests.
*/
private final static long STATIC_LEAK_THRESHOLD = 10 * 1024 * 1024;
/** By-name list of ignored types like loggers etc. */
private final static Set<String> STATIC_LEAK_IGNORED_TYPES = Set.of(
"org.slf4j.Logger",
"org.apache.solr.SolrLogFormatter",
"java.io.File", // Solr sometimes refers to this in a static way, but it has a "java.nio.fs.Path" inside
Path.class.getName(), // causes problems because interface is implemented by hidden classes
Class.class.getName(),
EnumSet.class.getName());
/**
* This controls how suite-level rules are nested. It is important that _all_ rules declared
* in {@link LuceneTestCase} are executed in proper order if they depend on each
* other.
*/
@ClassRule
public static TestRule classRules;
static {
RuleChain r = RuleChain.outerRule(new TestRuleIgnoreTestSuites())
.around(ignoreAfterMaxFailures)
.around(suiteFailureMarker = new TestRuleMarkFailure())
.around(new TestRuleAssertionsRequired())
.around(new TestRuleLimitSysouts(suiteFailureMarker))
.around(tempFilesCleanupRule = new TestRuleTemporaryFilesCleanup(suiteFailureMarker));
// TODO LUCENE-7595: Java 9 does not allow to look into runtime classes, so we have to fix the RAM usage checker!
if (!Constants.JRE_IS_MINIMUM_JAVA9) {
r = r.around(new StaticFieldsInvariantRule(STATIC_LEAK_THRESHOLD, true) {
@Override
protected boolean accept(java.lang.reflect.Field field) {
// Don't count known classes that consume memory once.
if (STATIC_LEAK_IGNORED_TYPES.contains(field.getType().getName())) {
return false;
}
// Don't count references from ourselves, we're top-level.
if (field.getDeclaringClass() == LuceneTestCase.class) {
return false;
}
return super.accept(field);
}
});
}
classRules = r.around(new NoClassHooksShadowingRule())
.around(new NoInstanceHooksOverridesRule() {
@Override
protected boolean verify(Method key) {
String name = key.getName();
return !(name.equals("setUp") || name.equals("tearDown"));
}
})
.around(classNameRule = new TestRuleStoreClassName())
.around(new TestRuleRestoreSystemProperties(
// Enlist all properties to which we have write access (security manager);
// these should be restored to previous state, no matter what the outcome of the test.
// We reset the default locale and timezone; these properties change as a side-effect
"user.language",
"user.timezone",
// TODO: these should, ideally, be moved to Solr's base class.
"solr.directoryFactory",
"solr.solr.home",
"solr.data.dir"
))
.around(classEnvRule = new TestRuleSetupAndRestoreClassEnv());
}
// -----------------------------------------------------------------
// Test level rules.
// -----------------------------------------------------------------
/** Enforces {@link #setUp()} and {@link #tearDown()} calls are chained. */
private TestRuleSetupTeardownChained parentChainCallRule = new TestRuleSetupTeardownChained();
/** Save test thread and name. */
private TestRuleThreadAndTestName threadAndTestNameRule = new TestRuleThreadAndTestName();
/** Taint suite result with individual test failures. */
private TestRuleMarkFailure testFailureMarker = new TestRuleMarkFailure(suiteFailureMarker);
/**
* This controls how individual test rules are nested. It is important that
* _all_ rules declared in {@link LuceneTestCase} are executed in proper order
* if they depend on each other.
*/
@Rule
public final TestRule ruleChain = RuleChain
.outerRule(testFailureMarker)
.around(ignoreAfterMaxFailures)
.around(threadAndTestNameRule)
.around(new TestRuleSetupAndRestoreInstanceEnv())
.around(parentChainCallRule);
private static final Map<String,FieldType> fieldToType = new HashMap<String,FieldType>();
enum LiveIWCFlushMode {BY_RAM, BY_DOCS, EITHER};
/** Set by TestRuleSetupAndRestoreClassEnv */
static LiveIWCFlushMode liveIWCFlushMode;
static void setLiveIWCFlushMode(LiveIWCFlushMode flushMode) {
liveIWCFlushMode = flushMode;
}
// -----------------------------------------------------------------
// Suite and test case setup/ cleanup.
// -----------------------------------------------------------------
/**
* For subclasses to override. Overrides must call {@code super.setUp()}.
*/
@Before
public void setUp() throws Exception {
parentChainCallRule.setupCalled = true;
}
/**
* For subclasses to override. Overrides must call {@code super.tearDown()}.
*/
@After
public void tearDown() throws Exception {
parentChainCallRule.teardownCalled = true;
fieldToType.clear();
// Test is supposed to call this itself, but we do this defensively in case it forgot:
restoreIndexWriterMaxDocs();
}
/** Tells {@link IndexWriter} to enforce the specified limit as the maximum number of documents in one index; call
* {@link #restoreIndexWriterMaxDocs} once your test is done. */
public void setIndexWriterMaxDocs(int limit) {
IndexWriterMaxDocsChanger.setMaxDocs(limit);
}
/** Returns to the default {@link IndexWriter#MAX_DOCS} limit. */
public void restoreIndexWriterMaxDocs() {
IndexWriterMaxDocsChanger.restoreMaxDocs();
}
// -----------------------------------------------------------------
// Test facilities and facades for subclasses.
// -----------------------------------------------------------------
/**
* Access to the current {@link RandomizedContext}'s Random instance. It is safe to use
* this method from multiple threads, etc., but it should be called while within a runner's
* scope (so no static initializers). The returned {@link Random} instance will be
* <b>different</b> when this method is called inside a {@link BeforeClass} hook (static
* suite scope) and within {@link Before}/ {@link After} hooks or test methods.
*
* <p>The returned instance must not be shared with other threads or cross a single scope's
* boundary. For example, a {@link Random} acquired within a test method shouldn't be reused
* for another test case.
*
* <p>There is an overhead connected with getting the {@link Random} for a particular context
* and thread. It is better to cache the {@link Random} locally if tight loops with multiple
* invocations are present or create a derivative local {@link Random} for millions of calls
* like this:
* <pre>
* Random random = new Random(random().nextLong());
* // tight loop with many invocations.
* </pre>
*/
public static Random random() {
return RandomizedContext.current().getRandom();
}
/**
* Registers a {@link Closeable} resource that should be closed after the test
* completes.
*
* @return <code>resource</code> (for call chaining).
*/
public <T extends Closeable> T closeAfterTest(T resource) {
return RandomizedContext.current().closeAtEnd(resource, LifecycleScope.TEST);
}
/**
* Registers a {@link Closeable} resource that should be closed after the suite
* completes.
*
* @return <code>resource</code> (for call chaining).
*/
public static <T extends Closeable> T closeAfterSuite(T resource) {
return RandomizedContext.current().closeAtEnd(resource, LifecycleScope.SUITE);
}
/**
* Return the current class being tested.
*/
public static Class<?> getTestClass() {
return classNameRule.getTestClass();
}
/**
* Return the name of the currently executing test case.
*/
public String getTestName() {
return threadAndTestNameRule.testMethodName;
}
/**
* Some tests expect the directory to contain a single segment, and want to
* do tests on that segment's reader. This is an utility method to help them.
*/
/*
public static SegmentReader getOnlySegmentReader(DirectoryReader reader) {
List<LeafReaderContext> subReaders = reader.leaves();
if (subReaders.size() != 1) {
throw new IllegalArgumentException(reader + " has " + subReaders.size() + " segments instead of exactly one");
}
final LeafReader r = subReaders.get(0).reader();
assertTrue("expected a SegmentReader but got " + r, r instanceof SegmentReader);
return (SegmentReader) r;
}
*/
/**
* Some tests expect the directory to contain a single segment, and want to
* do tests on that segment's reader. This is an utility method to help them.
*/
public static LeafReader getOnlyLeafReader(IndexReader reader) {
List<LeafReaderContext> subReaders = reader.leaves();
if (subReaders.size() != 1) {
throw new IllegalArgumentException(reader + " has " + subReaders.size() + " segments instead of exactly one");
}
return subReaders.get(0).reader();
}
/**
* Returns true if and only if the calling thread is the primary thread
* executing the test case.
*/
protected boolean isTestThread() {
assertNotNull("Test case thread not set?", threadAndTestNameRule.testCaseThread);
return Thread.currentThread() == threadAndTestNameRule.testCaseThread;
}
/**
* Returns a number of at least <code>i</code>
* <p>
* The actual number returned will be influenced by whether {@link #TEST_NIGHTLY}
* is active and {@link #RANDOM_MULTIPLIER}, but also with some random fudge.
*/
public static int atLeast(Random random, int i) {
int min = (TEST_NIGHTLY ? 2*i : i) * RANDOM_MULTIPLIER;
int max = min+(min/2);
return TestUtil.nextInt(random, min, max);
}
public static int atLeast(int i) {
return atLeast(random(), i);
}
/**
* Returns true if something should happen rarely,
* <p>
* The actual number returned will be influenced by whether {@link #TEST_NIGHTLY}
* is active and {@link #RANDOM_MULTIPLIER}.
*/
public static boolean rarely(Random random) {
int p = TEST_NIGHTLY ? 10 : 1;
p += (p * Math.log(RANDOM_MULTIPLIER));
int min = 100 - Math.min(p, 50); // never more than 50
return random.nextInt(100) >= min;
}
public static boolean rarely() {
return rarely(random());
}
public static boolean usually(Random random) {
return !rarely(random);
}
public static boolean usually() {
return usually(random());
}
public static void assumeTrue(String msg, boolean condition) {
RandomizedTest.assumeTrue(msg, condition);
}
public static void assumeFalse(String msg, boolean condition) {
RandomizedTest.assumeFalse(msg, condition);
}
public static void assumeNoException(String msg, Exception e) {
RandomizedTest.assumeNoException(msg, e);
}
/**
* Return <code>args</code> as a {@link Set} instance. The order of elements is not
* preserved in iterators.
*/
@SafeVarargs @SuppressWarnings("varargs")
public static <T> Set<T> asSet(T... args) {
return new HashSet<>(Arrays.asList(args));
}
/**
* Convenience method for logging an iterator.
*
* @param label String logged before/after the items in the iterator
* @param iter Each next() is toString()ed and logged on its own line. If iter is null this is logged differently then an empty iterator.
* @param stream Stream to log messages to.
*/
public static void dumpIterator(String label, Iterator<?> iter,
PrintStream stream) {
stream.println("*** BEGIN " + label + " ***");
if (null == iter) {
stream.println(" ... NULL ...");
} else {
while (iter.hasNext()) {
stream.println(iter.next().toString());
}
}
stream.println("*** END " + label + " ***");
}
/**
* Convenience method for logging an array. Wraps the array in an iterator and delegates
*
* @see #dumpIterator(String,Iterator,PrintStream)
*/
public static void dumpArray(String label, Object[] objs,
PrintStream stream) {
Iterator<?> iter = (null == objs) ? null : Arrays.asList(objs).iterator();
dumpIterator(label, iter, stream);
}
/** create a new index writer config with random defaults */
public static IndexWriterConfig newIndexWriterConfig() {
return newIndexWriterConfig(new MockAnalyzer(random()));
}
/** create a new index writer config with random defaults */
public static IndexWriterConfig newIndexWriterConfig(Analyzer a) {
return newIndexWriterConfig(random(), a);
}
/** create a new index writer config with random defaults using the specified random */
public static IndexWriterConfig newIndexWriterConfig(Random r, Analyzer a) {
IndexWriterConfig c = new IndexWriterConfig(a);
c.setSimilarity(classEnvRule.similarity);
if (VERBOSE) {
// Even though TestRuleSetupAndRestoreClassEnv calls
// InfoStream.setDefault, we do it again here so that
// the PrintStreamInfoStream.messageID increments so
// that when there are separate instances of
// IndexWriter created we see "IW 0", "IW 1", "IW 2",
// ... instead of just always "IW 0":
c.setInfoStream(new TestRuleSetupAndRestoreClassEnv.ThreadNameFixingPrintStreamInfoStream(System.out));
}
if (rarely(r)) {
c.setMergeScheduler(new SerialMergeScheduler());
} else if (rarely(r)) {
ConcurrentMergeScheduler cms;
if (r.nextBoolean()) {
cms = new ConcurrentMergeScheduler();
} else {
cms = new ConcurrentMergeScheduler() {
@Override
protected synchronized boolean maybeStall(MergeSource mergeSource) {
return true;
}
};
}
int maxThreadCount = TestUtil.nextInt(r, 1, 4);
int maxMergeCount = TestUtil.nextInt(r, maxThreadCount, maxThreadCount + 4);
cms.setMaxMergesAndThreads(maxMergeCount, maxThreadCount);
if (random().nextBoolean()) {
cms.disableAutoIOThrottle();
assertFalse(cms.getAutoIOThrottle());
}
cms.setForceMergeMBPerSec(10 + 10*random().nextDouble());
c.setMergeScheduler(cms);
} else {
// Always use consistent settings, else CMS's dynamic (SSD or not)
// defaults can change, hurting reproducibility:
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
// Only 1 thread can run at once (should maybe help reproducibility),
// with up to 3 pending merges before segment-producing threads are
// stalled:
cms.setMaxMergesAndThreads(3, 1);
c.setMergeScheduler(cms);
}
if (r.nextBoolean()) {
if (rarely(r)) {
// crazy value
c.setMaxBufferedDocs(TestUtil.nextInt(r, 2, 15));
} else {
// reasonable value
c.setMaxBufferedDocs(TestUtil.nextInt(r, 16, 1000));
}
}
c.setMergePolicy(newMergePolicy(r));
avoidPathologicalMerging(c);
if (rarely(r)) {
c.setMergedSegmentWarmer(new SimpleMergedSegmentWarmer(c.getInfoStream()));
}
c.setUseCompoundFile(r.nextBoolean());
c.setReaderPooling(r.nextBoolean());
if (rarely(r)) {
c.setCheckPendingFlushUpdate(false);
}
c.setMaxCommitMergeWaitMillis(rarely() ? atLeast(r, 1000) : atLeast(r, 200));
return c;
}
private static void avoidPathologicalMerging(IndexWriterConfig iwc) {
// Don't allow "tiny" flushed segments with "big" merge
// floor: this leads to pathological O(N^2) merge costs:
long estFlushSizeBytes = Long.MAX_VALUE;
if (iwc.getMaxBufferedDocs() != IndexWriterConfig.DISABLE_AUTO_FLUSH) {
// Gross estimation of 1 KB segment bytes for each doc indexed:
estFlushSizeBytes = Math.min(estFlushSizeBytes, iwc.getMaxBufferedDocs() * 1024);
}
if (iwc.getRAMBufferSizeMB() != IndexWriterConfig.DISABLE_AUTO_FLUSH) {
estFlushSizeBytes = Math.min(estFlushSizeBytes, (long) (iwc.getRAMBufferSizeMB() * 1024 * 1024));
}
assert estFlushSizeBytes > 0;
MergePolicy mp = iwc.getMergePolicy();
if (mp instanceof TieredMergePolicy) {
TieredMergePolicy tmp = (TieredMergePolicy) mp;
long floorSegBytes = (long) (tmp.getFloorSegmentMB() * 1024 * 1024);
if (floorSegBytes / estFlushSizeBytes > 10) {
double newValue = estFlushSizeBytes * 10.0 / 1024 / 1024;
if (VERBOSE) {
System.out.println("NOTE: LuceneTestCase: changing TieredMergePolicy.floorSegmentMB from " + tmp.getFloorSegmentMB() + " to " + newValue + " to avoid pathological merging");
}
tmp.setFloorSegmentMB(newValue);
}
} else if (mp instanceof LogByteSizeMergePolicy) {
LogByteSizeMergePolicy lmp = (LogByteSizeMergePolicy) mp;
if ((lmp.getMinMergeMB()*1024*1024) / estFlushSizeBytes > 10) {
double newValue = estFlushSizeBytes * 10.0 / 1024 / 1024;
if (VERBOSE) {
System.out.println("NOTE: LuceneTestCase: changing LogByteSizeMergePolicy.minMergeMB from " + lmp.getMinMergeMB() + " to " + newValue + " to avoid pathological merging");
}
lmp.setMinMergeMB(newValue);
}
} else if (mp instanceof LogDocMergePolicy) {
LogDocMergePolicy lmp = (LogDocMergePolicy) mp;
assert estFlushSizeBytes / 1024 < Integer.MAX_VALUE/10;
int estFlushDocs = Math.max(1, (int) (estFlushSizeBytes / 1024));
if (lmp.getMinMergeDocs() / estFlushDocs > 10) {
int newValue = estFlushDocs * 10;
if (VERBOSE) {
System.out.println("NOTE: LuceneTestCase: changing LogDocMergePolicy.minMergeDocs from " + lmp.getMinMergeDocs() + " to " + newValue + " to avoid pathological merging");
}
lmp.setMinMergeDocs(newValue);
}
}
}
public static MergePolicy newMergePolicy(Random r) {
return newMergePolicy(r, true);
}
public static MergePolicy newMergePolicy(Random r, boolean includeMockMP) {
if (includeMockMP && rarely(r)) {
return new MockRandomMergePolicy(r);
} else if (r.nextBoolean()) {
return newTieredMergePolicy(r);
} else if (rarely(r) ) {
return newAlcoholicMergePolicy(r, classEnvRule.timeZone);
}
return newLogMergePolicy(r);
}
public static MergePolicy newMergePolicy() {
return newMergePolicy(random());
}
public static LogMergePolicy newLogMergePolicy() {
return newLogMergePolicy(random());
}
public static TieredMergePolicy newTieredMergePolicy() {
return newTieredMergePolicy(random());
}
public static AlcoholicMergePolicy newAlcoholicMergePolicy() {
return newAlcoholicMergePolicy(random(), classEnvRule.timeZone);
}
public static AlcoholicMergePolicy newAlcoholicMergePolicy(Random r, TimeZone tz) {
return new AlcoholicMergePolicy(tz, new Random(r.nextLong()));
}
public static LogMergePolicy newLogMergePolicy(Random r) {
LogMergePolicy logmp = r.nextBoolean() ? new LogDocMergePolicy() : new LogByteSizeMergePolicy();
logmp.setCalibrateSizeByDeletes(r.nextBoolean());
if (rarely(r)) {
logmp.setMergeFactor(TestUtil.nextInt(r, 2, 9));
} else {
logmp.setMergeFactor(TestUtil.nextInt(r, 10, 50));
}
configureRandom(r, logmp);
return logmp;
}
private static void configureRandom(Random r, MergePolicy mergePolicy) {
if (r.nextBoolean()) {
mergePolicy.setNoCFSRatio(0.1 + r.nextDouble()*0.8);
} else {
mergePolicy.setNoCFSRatio(r.nextBoolean() ? 1.0 : 0.0);
}
if (rarely(r)) {
mergePolicy.setMaxCFSSegmentSizeMB(0.2 + r.nextDouble() * 2.0);
} else {
mergePolicy.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY);
}
}
public static TieredMergePolicy newTieredMergePolicy(Random r) {
TieredMergePolicy tmp = new TieredMergePolicy();
if (rarely(r)) {
tmp.setMaxMergeAtOnce(TestUtil.nextInt(r, 2, 9));
tmp.setMaxMergeAtOnceExplicit(TestUtil.nextInt(r, 2, 9));
} else {
tmp.setMaxMergeAtOnce(TestUtil.nextInt(r, 10, 50));
tmp.setMaxMergeAtOnceExplicit(TestUtil.nextInt(r, 10, 50));
}
if (rarely(r)) {
tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0);
} else {
tmp.setMaxMergedSegmentMB(10 + r.nextDouble() * 100);
}
tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0);
tmp.setForceMergeDeletesPctAllowed(0.0 + r.nextDouble() * 30.0);
if (rarely(r)) {
tmp.setSegmentsPerTier(TestUtil.nextInt(r, 2, 20));
} else {
tmp.setSegmentsPerTier(TestUtil.nextInt(r, 10, 50));
}
configureRandom(r, tmp);
tmp.setDeletesPctAllowed(20 + random().nextDouble() * 30);
return tmp;
}
public static MergePolicy newLogMergePolicy(boolean useCFS) {
MergePolicy logmp = newLogMergePolicy();
logmp.setNoCFSRatio(useCFS ? 1.0 : 0.0);
return logmp;
}
public static MergePolicy newLogMergePolicy(boolean useCFS, int mergeFactor) {
LogMergePolicy logmp = newLogMergePolicy();
logmp.setNoCFSRatio(useCFS ? 1.0 : 0.0);
logmp.setMergeFactor(mergeFactor);
return logmp;
}
public static MergePolicy newLogMergePolicy(int mergeFactor) {
LogMergePolicy logmp = newLogMergePolicy();
logmp.setMergeFactor(mergeFactor);
return logmp;
}
// if you want it in LiveIndexWriterConfig: it must and will be tested here.
public static void maybeChangeLiveIndexWriterConfig(Random r, LiveIndexWriterConfig c) {
boolean didChange = false;
String previous = c.toString();
if (rarely(r)) {
// change flush parameters:
// this is complicated because the api requires you "invoke setters in a magical order!"
// LUCENE-5661: workaround for race conditions in the API
synchronized (c) {
boolean flushByRAM;
switch (liveIWCFlushMode) {
case BY_RAM:
flushByRAM = true;
break;
case BY_DOCS:
flushByRAM = false;
break;
case EITHER:
flushByRAM = r.nextBoolean();
break;
default:
throw new AssertionError();
}
if (flushByRAM) {
c.setRAMBufferSizeMB(TestUtil.nextInt(r, 1, 10));
c.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else {
if (rarely(r)) {
// crazy value
c.setMaxBufferedDocs(TestUtil.nextInt(r, 2, 15));
} else {
// reasonable value
c.setMaxBufferedDocs(TestUtil.nextInt(r, 16, 1000));
}
c.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
}
}
didChange = true;
}
if (rarely(r)) {
IndexWriter.IndexReaderWarmer curWarmer = c.getMergedSegmentWarmer();
if (curWarmer == null || curWarmer instanceof SimpleMergedSegmentWarmer) {
// change warmer parameters
if (r.nextBoolean()) {
c.setMergedSegmentWarmer(new SimpleMergedSegmentWarmer(c.getInfoStream()));
} else {
c.setMergedSegmentWarmer(null);
}
}
didChange = true;
}
if (rarely(r)) {
// change CFS flush parameters
c.setUseCompoundFile(r.nextBoolean());
didChange = true;
}
if (rarely(r)) {
// change CMS merge parameters
MergeScheduler ms = c.getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) ms;
int maxThreadCount = TestUtil.nextInt(r, 1, 4);
int maxMergeCount = TestUtil.nextInt(r, maxThreadCount, maxThreadCount + 4);
boolean enableAutoIOThrottle = random().nextBoolean();
if (enableAutoIOThrottle) {
cms.enableAutoIOThrottle();
} else {
cms.disableAutoIOThrottle();
}
cms.setMaxMergesAndThreads(maxMergeCount, maxThreadCount);
didChange = true;
}
}
if (rarely(r)) {
MergePolicy mp = c.getMergePolicy();
configureRandom(r, mp);
if (mp instanceof LogMergePolicy) {
LogMergePolicy logmp = (LogMergePolicy) mp;
logmp.setCalibrateSizeByDeletes(r.nextBoolean());
if (rarely(r)) {
logmp.setMergeFactor(TestUtil.nextInt(r, 2, 9));
} else {
logmp.setMergeFactor(TestUtil.nextInt(r, 10, 50));
}
} else if (mp instanceof TieredMergePolicy) {
TieredMergePolicy tmp = (TieredMergePolicy) mp;
if (rarely(r)) {
tmp.setMaxMergeAtOnce(TestUtil.nextInt(r, 2, 9));
tmp.setMaxMergeAtOnceExplicit(TestUtil.nextInt(r, 2, 9));
} else {
tmp.setMaxMergeAtOnce(TestUtil.nextInt(r, 10, 50));
tmp.setMaxMergeAtOnceExplicit(TestUtil.nextInt(r, 10, 50));
}
if (rarely(r)) {
tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0);
} else {
tmp.setMaxMergedSegmentMB(r.nextDouble() * 100);
}
tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0);
tmp.setForceMergeDeletesPctAllowed(0.0 + r.nextDouble() * 30.0);
if (rarely(r)) {
tmp.setSegmentsPerTier(TestUtil.nextInt(r, 2, 20));
} else {
tmp.setSegmentsPerTier(TestUtil.nextInt(r, 10, 50));
}
configureRandom(r, tmp);
tmp.setDeletesPctAllowed(20 + random().nextDouble() * 30);
}
didChange = true;
}
if (VERBOSE && didChange) {
String current = c.toString();
String previousLines[] = previous.split("\n");
String currentLines[] = current.split("\n");
StringBuilder diff = new StringBuilder();
// this should always be the case, diff each line
if (previousLines.length == currentLines.length) {
for (int i = 0; i < previousLines.length; i++) {
if (!previousLines[i].equals(currentLines[i])) {
diff.append("- ").append(previousLines[i]).append("\n");
diff.append("+ ").append(currentLines[i]).append("\n");
}
}
} else {
// but just in case of something ridiculous...
diff.append(current.toString());
}
// its possible to be empty, if we "change" a value to what it had before.
if (diff.length() > 0) {
System.out.println("NOTE: LuceneTestCase: randomly changed IWC's live settings:");
System.out.println(diff);
}
}
}
/**
* Returns a new Directory instance. Use this when the test does not
* care about the specific Directory implementation (most tests).
* <p>
* The Directory is wrapped with {@link BaseDirectoryWrapper}.
* this means usually it will be picky, such as ensuring that you
* properly close it and all open files in your test. It will emulate
* some features of Windows, such as not allowing open files to be
* overwritten.
*/
public static BaseDirectoryWrapper newDirectory() {
return newDirectory(random());
}
/** Like {@link #newDirectory} except randomly the {@link VirusCheckingFS} may be installed */
public static BaseDirectoryWrapper newMaybeVirusCheckingDirectory() {
if (random().nextInt(5) == 4) {
Path path = addVirusChecker(createTempDir());
return newFSDirectory(path);
} else {
return newDirectory(random());
}
}
/**
* Returns a new Directory instance, using the specified random.
* See {@link #newDirectory()} for more information.
*/
public static BaseDirectoryWrapper newDirectory(Random r) {
return wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY), rarely(r), false);
}
/**
* Returns a new Directory instance, using the specified random.
* See {@link #newDirectory()} for more information.
*/
public static BaseDirectoryWrapper newDirectory(Random r, LockFactory lf) {
return wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY, lf), rarely(r), false);
}
public static MockDirectoryWrapper newMockDirectory() {
return newMockDirectory(random());
}
public static MockDirectoryWrapper newMockDirectory(Random r) {
return (MockDirectoryWrapper) wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY), false, false);
}
public static MockDirectoryWrapper newMockDirectory(Random r, LockFactory lf) {
return (MockDirectoryWrapper) wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY, lf), false, false);
}
public static MockDirectoryWrapper newMockFSDirectory(Path f) {
return (MockDirectoryWrapper) newFSDirectory(f, FSLockFactory.getDefault(), false);
}
public static MockDirectoryWrapper newMockFSDirectory(Path f, LockFactory lf) {
return (MockDirectoryWrapper) newFSDirectory(f, lf, false);
}
public static Path addVirusChecker(Path path) {
if (TestUtil.hasVirusChecker(path) == false) {
VirusCheckingFS fs = new VirusCheckingFS(path.getFileSystem(), random().nextLong());
FileSystem filesystem = fs.getFileSystem(URI.create("file:///"));
path = new FilterPath(path, filesystem);
}
return path;
}
/**
* Returns a new Directory instance, with contents copied from the
* provided directory. See {@link #newDirectory()} for more
* information.
*/
public static BaseDirectoryWrapper newDirectory(Directory d) throws IOException {
return newDirectory(random(), d);
}
/** Returns a new FSDirectory instance over the given file, which must be a folder. */
public static BaseDirectoryWrapper newFSDirectory(Path f) {
return newFSDirectory(f, FSLockFactory.getDefault());
}
/** Like {@link #newFSDirectory(Path)}, but randomly insert {@link VirusCheckingFS} */
public static BaseDirectoryWrapper newMaybeVirusCheckingFSDirectory(Path f) {
if (random().nextInt(5) == 4) {
f = addVirusChecker(f);
}
return newFSDirectory(f, FSLockFactory.getDefault());
}
/** Returns a new FSDirectory instance over the given file, which must be a folder. */
public static BaseDirectoryWrapper newFSDirectory(Path f, LockFactory lf) {
return newFSDirectory(f, lf, rarely());
}
private static BaseDirectoryWrapper newFSDirectory(Path f, LockFactory lf, boolean bare) {
String fsdirClass = TEST_DIRECTORY;
if (fsdirClass.equals("random")) {
fsdirClass = RandomPicks.randomFrom(random(), FS_DIRECTORIES);
}
Class<? extends FSDirectory> clazz;
try {
try {
clazz = CommandLineUtil.loadFSDirectoryClass(fsdirClass);
} catch (ClassCastException e) {
// TEST_DIRECTORY is not a sub-class of FSDirectory, so draw one at random
fsdirClass = RandomPicks.randomFrom(random(), FS_DIRECTORIES);
clazz = CommandLineUtil.loadFSDirectoryClass(fsdirClass);
}
Directory fsdir = newFSDirectoryImpl(clazz, f, lf);
BaseDirectoryWrapper wrapped = wrapDirectory(random(), fsdir, bare, true);
return wrapped;
} catch (Exception e) {
Rethrow.rethrow(e);
throw null; // dummy to prevent compiler failure
}
}
private static Directory newFileSwitchDirectory(Random random, Directory dir1, Directory dir2) {
List<String> fileExtensions =
Arrays.asList("fdt", "fdx", "tim", "tip", "si", "fnm", "pos", "dii", "dim", "nvm", "nvd", "dvm", "dvd");
Collections.shuffle(fileExtensions, random);
fileExtensions = fileExtensions.subList(0, 1 + random.nextInt(fileExtensions.size()));
return new FileSwitchDirectory(new HashSet<>(fileExtensions), dir1, dir2, true);
}
/**
* Returns a new Directory instance, using the specified random
* with contents copied from the provided directory. See
* {@link #newDirectory()} for more information.
*/
public static BaseDirectoryWrapper newDirectory(Random r, Directory d) throws IOException {
Directory impl = newDirectoryImpl(r, TEST_DIRECTORY);
for (String file : d.listAll()) {
if (file.startsWith(IndexFileNames.SEGMENTS) || IndexFileNames.CODEC_FILE_PATTERN.matcher(file).matches()) {
impl.copyFrom(d, file, file, newIOContext(r));
}
}
return wrapDirectory(r, impl, rarely(r), false);
}
private static BaseDirectoryWrapper wrapDirectory(Random random, Directory directory, boolean bare, boolean filesystem) {
// IOContext randomization might make NRTCachingDirectory make bad decisions, so avoid
// using it if the user requested a filesystem directory.
if (rarely(random) && !bare && filesystem == false) {
directory = new NRTCachingDirectory(directory, random.nextDouble(), random.nextDouble());
}
if (bare) {
BaseDirectoryWrapper base = new RawDirectoryWrapper(directory);
closeAfterSuite(new CloseableDirectory(base, suiteFailureMarker));
return base;
} else {
MockDirectoryWrapper mock = new MockDirectoryWrapper(random, directory);
mock.setThrottling(TEST_THROTTLING);
closeAfterSuite(new CloseableDirectory(mock, suiteFailureMarker));
return mock;
}
}
public static Field newStringField(String name, String value, Store stored) {
return newField(random(), name, value, stored == Store.YES ? StringField.TYPE_STORED : StringField.TYPE_NOT_STORED);
}
public static Field newStringField(String name, BytesRef value, Store stored) {
return newField(random(), name, value, stored == Store.YES ? StringField.TYPE_STORED : StringField.TYPE_NOT_STORED);
}
public static Field newTextField(String name, String value, Store stored) {
return newField(random(), name, value, stored == Store.YES ? TextField.TYPE_STORED : TextField.TYPE_NOT_STORED);
}
public static Field newStringField(Random random, String name, String value, Store stored) {
return newField(random, name, value, stored == Store.YES ? StringField.TYPE_STORED : StringField.TYPE_NOT_STORED);
}
public static Field newStringField(Random random, String name, BytesRef value, Store stored) {
return newField(random, name, value, stored == Store.YES ? StringField.TYPE_STORED : StringField.TYPE_NOT_STORED);
}
public static Field newTextField(Random random, String name, String value, Store stored) {
return newField(random, name, value, stored == Store.YES ? TextField.TYPE_STORED : TextField.TYPE_NOT_STORED);
}
public static Field newField(String name, String value, FieldType type) {
return newField(random(), name, value, type);
}
/** Returns a FieldType derived from newType but whose
* term vector options match the old type */
private static FieldType mergeTermVectorOptions(FieldType newType, FieldType oldType) {
if (newType.indexOptions() != IndexOptions.NONE && oldType.storeTermVectors() == true && newType.storeTermVectors() == false) {
newType = new FieldType(newType);
newType.setStoreTermVectors(oldType.storeTermVectors());
newType.setStoreTermVectorPositions(oldType.storeTermVectorPositions());
newType.setStoreTermVectorOffsets(oldType.storeTermVectorOffsets());
newType.setStoreTermVectorPayloads(oldType.storeTermVectorPayloads());
newType.freeze();
}
return newType;
}
// TODO: if we can pull out the "make term vector options
// consistent across all instances of the same field name"
// write-once schema sort of helper class then we can
// remove the sync here. We can also fold the random
// "enable norms" (now commented out, below) into that:
public synchronized static Field newField(Random random, String name, Object value, FieldType type) {
// Defeat any consumers that illegally rely on intern'd
// strings (we removed this from Lucene a while back):
name = new String(name);
FieldType prevType = fieldToType.get(name);
if (usually(random) || type.indexOptions() == IndexOptions.NONE || prevType != null) {
// most of the time, don't modify the params
if (prevType == null) {
fieldToType.put(name, new FieldType(type));
} else {
type = mergeTermVectorOptions(type, prevType);
}
return createField(name, value, type);
}
// TODO: once all core & test codecs can index
// offsets, sometimes randomly turn on offsets if we are
// already indexing positions...
FieldType newType = new FieldType(type);
if (!newType.stored() && random.nextBoolean()) {
newType.setStored(true); // randomly store it
}
// Randomly turn on term vector options, but always do
// so consistently for the same field name:
if (!newType.storeTermVectors() && random.nextBoolean()) {
newType.setStoreTermVectors(true);
if (!newType.storeTermVectorPositions()) {
newType.setStoreTermVectorPositions(random.nextBoolean());
if (newType.storeTermVectorPositions()) {
if (!newType.storeTermVectorPayloads()) {
newType.setStoreTermVectorPayloads(random.nextBoolean());
}
}
}
if (!newType.storeTermVectorOffsets()) {
newType.setStoreTermVectorOffsets(random.nextBoolean());
}
if (VERBOSE) {
System.out.println("NOTE: LuceneTestCase: upgrade name=" + name + " type=" + newType);
}
}
newType.freeze();
fieldToType.put(name, newType);
// TODO: we need to do this, but smarter, ie, most of
// the time we set the same value for a given field but
// sometimes (rarely) we change it up:
/*
if (newType.omitNorms()) {
newType.setOmitNorms(random.nextBoolean());
}
*/
return createField(name, value, newType);
}
private static Field createField(String name, Object value, FieldType fieldType) {
if (value instanceof String) {
return new Field(name, (String) value, fieldType);
} else if (value instanceof BytesRef) {
return new Field(name, (BytesRef) value, fieldType);
} else {
throw new IllegalArgumentException("value must be String or BytesRef");
}
}
private static final String[] availableLanguageTags = Arrays.stream(Locale.getAvailableLocales())
.map(Locale::toLanguageTag)
.sorted()
.distinct()
.toArray(String[]::new);
/**
* Return a random Locale from the available locales on the system.
* @see <a href="http://issues.apache.org/jira/browse/LUCENE-4020">LUCENE-4020</a>
*/
public static Locale randomLocale(Random random) {
return localeForLanguageTag(availableLanguageTags[random.nextInt(availableLanguageTags.length)]);
}
/**
* Return a random TimeZone from the available timezones on the system
* @see <a href="http://issues.apache.org/jira/browse/LUCENE-4020">LUCENE-4020</a>
*/
public static TimeZone randomTimeZone(Random random) {
String tzIds[] = TimeZone.getAvailableIDs();
return TimeZone.getTimeZone(tzIds[random.nextInt(tzIds.length)]);
}
/** return a Locale object equivalent to its programmatic name */
public static Locale localeForLanguageTag(String languageTag) {
return new Locale.Builder().setLanguageTag(languageTag).build();
}
private static Directory newFSDirectoryImpl(Class<? extends FSDirectory> clazz, Path path, LockFactory lf) throws IOException {
FSDirectory d = null;
try {
d = CommandLineUtil.newFSDirectory(clazz, path, lf);
} catch (ReflectiveOperationException e) {
Rethrow.rethrow(e);
}
return d;
}
static Directory newDirectoryImpl(Random random, String clazzName) {
return newDirectoryImpl(random, clazzName, FSLockFactory.getDefault());
}
static Directory newDirectoryImpl(Random random, String clazzName, LockFactory lf) {
if (clazzName.equals("random")) {
if (rarely(random)) {
clazzName = RandomPicks.randomFrom(random, CORE_DIRECTORIES);
} else if (rarely(random)) {
String clazzName1 = rarely(random)
? RandomPicks.randomFrom(random, CORE_DIRECTORIES)
: ByteBuffersDirectory.class.getName();
String clazzName2 = rarely(random)
? RandomPicks.randomFrom(random, CORE_DIRECTORIES)
: ByteBuffersDirectory.class.getName();
return newFileSwitchDirectory(random,
newDirectoryImpl(random, clazzName1, lf),
newDirectoryImpl(random, clazzName2, lf));
} else {
clazzName = ByteBuffersDirectory.class.getName();
}
}
try {
final Class<? extends Directory> clazz = CommandLineUtil.loadDirectoryClass(clazzName);
// If it is a FSDirectory type, try its ctor(Path)
if (FSDirectory.class.isAssignableFrom(clazz)) {
final Path dir = createTempDir("index-" + clazzName);
return newFSDirectoryImpl(clazz.asSubclass(FSDirectory.class), dir, lf);
}
// See if it has a Path/LockFactory ctor even though it's not an
// FSDir subclass:
try {
Constructor<? extends Directory> pathCtor = clazz.getConstructor(Path.class, LockFactory.class);
final Path dir = createTempDir("index");
return pathCtor.newInstance(dir, lf);
} catch (NoSuchMethodException nsme) {
// Ignore
}
// the remaining dirs are no longer filesystem based, so we must check that the passedLockFactory is not file based:
if (!(lf instanceof FSLockFactory)) {
// try ctor with only LockFactory
try {
return clazz.getConstructor(LockFactory.class).newInstance(lf);
} catch (NoSuchMethodException nsme) {
// Ignore
}
}
// try empty ctor
return clazz.getConstructor().newInstance();
} catch (Exception e) {
Rethrow.rethrow(e);
throw null; // dummy to prevent compiler failure
}
}
public static IndexReader wrapReader(IndexReader r) throws IOException {
Random random = random();
for (int i = 0, c = random.nextInt(6)+1; i < c; i++) {
switch(random.nextInt(5)) {
case 0:
// will create no FC insanity in atomic case, as ParallelLeafReader has own cache key:
if (VERBOSE) {
System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with ParallelLeaf/CompositeReader");
}
r = (r instanceof LeafReader) ?
new ParallelLeafReader((LeafReader) r) :
new ParallelCompositeReader((CompositeReader) r);
break;
case 1:
if (r instanceof LeafReader) {
final LeafReader ar = (LeafReader) r;
final List<String> allFields = new ArrayList<>();
for (FieldInfo fi : ar.getFieldInfos()) {
allFields.add(fi.name);
}
Collections.shuffle(allFields, random);
final int end = allFields.isEmpty() ? 0 : random.nextInt(allFields.size());
final Set<String> fields = new HashSet<>(allFields.subList(0, end));
// will create no FC insanity as ParallelLeafReader has own cache key:
if (VERBOSE) {
System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with ParallelLeafReader");
}
r = new ParallelLeafReader(
new FieldFilterLeafReader(ar, fields, false),
new FieldFilterLeafReader(ar, fields, true)
);
}
break;
case 2:
// Häckidy-Hick-Hack: a standard Reader will cause FC insanity, so we use
// QueryUtils' reader with a fake cache key, so insanity checker cannot walk
// along our reader:
if (VERBOSE) {
System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with AssertingLeaf/DirectoryReader");
}
if (r instanceof LeafReader) {
r = new AssertingLeafReader((LeafReader)r);
} else if (r instanceof DirectoryReader) {
r = new AssertingDirectoryReader((DirectoryReader)r);
}
break;
case 3:
if (VERBOSE) {
System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with MismatchedLeaf/DirectoryReader");
}
if (r instanceof LeafReader) {
r = new MismatchedLeafReader((LeafReader)r, random);
} else if (r instanceof DirectoryReader) {
r = new MismatchedDirectoryReader((DirectoryReader)r, random);
}
break;
case 4:
if (VERBOSE) {
System.out.println("NOTE: LuceneTestCase.wrapReader: wrapping previous reader=" + r + " with MergingCodecReader");
}
if (r instanceof CodecReader) {
r = new MergingCodecReader((CodecReader) r);
} else if (r instanceof DirectoryReader) {
boolean allLeavesAreCodecReaders = true;
for (LeafReaderContext ctx : r.leaves()) {
if (ctx.reader() instanceof CodecReader == false) {
allLeavesAreCodecReaders = false;
break;
}
}
if (allLeavesAreCodecReaders) {
r = new MergingDirectoryReaderWrapper((DirectoryReader) r);
}
}
break;
default:
fail("should not get here");
}
}
if (VERBOSE) {
System.out.println("wrapReader wrapped: " +r);
}
return r;
}
/**
* Sometimes wrap the IndexReader as slow, parallel or filter reader (or
* combinations of that)
*/
public static IndexReader maybeWrapReader(IndexReader r) throws IOException {
if (rarely()) {
r = wrapReader(r);
}
return r;
}
/** TODO: javadoc */
public static IOContext newIOContext(Random random) {
return newIOContext(random, IOContext.DEFAULT);
}
/** TODO: javadoc */
public static IOContext newIOContext(Random random, IOContext oldContext) {
final int randomNumDocs = random.nextInt(4192);
final int size = random.nextInt(512) * randomNumDocs;
if (oldContext.flushInfo != null) {
// Always return at least the estimatedSegmentSize of
// the incoming IOContext:
return new IOContext(new FlushInfo(randomNumDocs, Math.max(oldContext.flushInfo.estimatedSegmentSize, size)));
} else if (oldContext.mergeInfo != null) {
// Always return at least the estimatedMergeBytes of
// the incoming IOContext:
return new IOContext(new MergeInfo(randomNumDocs, Math.max(oldContext.mergeInfo.estimatedMergeBytes, size), random.nextBoolean(), TestUtil.nextInt(random, 1, 100)));
} else {
// Make a totally random IOContext:
final IOContext context;
switch (random.nextInt(5)) {
case 0:
context = IOContext.DEFAULT;
break;
case 1:
context = IOContext.READ;
break;
case 2:
context = IOContext.READONCE;
break;
case 3:
context = new IOContext(new MergeInfo(randomNumDocs, size, true, -1));
break;
case 4:
context = new IOContext(new FlushInfo(randomNumDocs, size));
break;
default:
context = IOContext.DEFAULT;
}
return context;
}
}
private static final QueryCache DEFAULT_QUERY_CACHE = IndexSearcher.getDefaultQueryCache();
private static final QueryCachingPolicy DEFAULT_CACHING_POLICY = IndexSearcher.getDefaultQueryCachingPolicy();
@Before
public void overrideTestDefaultQueryCache() {
// Make sure each test method has its own cache
overrideDefaultQueryCache();
}
@BeforeClass
public static void overrideDefaultQueryCache() {
// we need to reset the query cache in an @BeforeClass so that tests that
// instantiate an IndexSearcher in an @BeforeClass method use a fresh new cache
IndexSearcher.setDefaultQueryCache(new LRUQueryCache(10000, 1 << 25, context -> true, Float.POSITIVE_INFINITY));
IndexSearcher.setDefaultQueryCachingPolicy(MAYBE_CACHE_POLICY);
}
@AfterClass
public static void resetDefaultQueryCache() {
IndexSearcher.setDefaultQueryCache(DEFAULT_QUERY_CACHE);
IndexSearcher.setDefaultQueryCachingPolicy(DEFAULT_CACHING_POLICY);
}
@BeforeClass
public static void setupCPUCoreCount() {
// Randomize core count so CMS varies its dynamic defaults, and this also "fixes" core
// count from the master seed so it will always be the same on reproduce:
int numCores = TestUtil.nextInt(random(), 1, 4);
System.setProperty(ConcurrentMergeScheduler.DEFAULT_CPU_CORE_COUNT_PROPERTY, Integer.toString(numCores));
}
@AfterClass
public static void restoreCPUCoreCount() {
System.clearProperty(ConcurrentMergeScheduler.DEFAULT_CPU_CORE_COUNT_PROPERTY);
}
@BeforeClass
public static void setupSpins() {
// Randomize IOUtils.spins() count so CMS varies its dynamic defaults, and this also "fixes" core
// count from the master seed so it will always be the same on reproduce:
boolean spins = random().nextBoolean();
System.setProperty(ConcurrentMergeScheduler.DEFAULT_SPINS_PROPERTY, Boolean.toString(spins));
}
@AfterClass
public static void restoreSpins() {
System.clearProperty(ConcurrentMergeScheduler.DEFAULT_SPINS_PROPERTY);
}
/**
* Create a new searcher over the reader. This searcher might randomly use
* threads.
*/
public static IndexSearcher newSearcher(IndexReader r) {
return newSearcher(r, true);
}
/**
* Create a new searcher over the reader. This searcher might randomly use
* threads.
*/
public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) {
return newSearcher(r, maybeWrap, true);
}
/**
* Create a new searcher over the reader. This searcher might randomly use
* threads. if <code>maybeWrap</code> is true, this searcher might wrap the
* reader with one that returns null for getSequentialSubReaders. If
* <code>wrapWithAssertions</code> is true, this searcher might be an
* {@link AssertingIndexSearcher} instance.
*/
public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap, boolean wrapWithAssertions) {
Random random = random();
if (usually()) {
if (maybeWrap) {
try {
r = maybeWrapReader(r);
} catch (IOException e) {
Rethrow.rethrow(e);
}
}
// TODO: this whole check is a coverage hack, we should move it to tests for various filterreaders.
// ultimately whatever you do will be checkIndex'd at the end anyway.
if (random.nextInt(500) == 0 && r instanceof LeafReader) {
// TODO: not useful to check DirectoryReader (redundant with checkindex)
// but maybe sometimes run this on the other crazy readers maybeWrapReader creates?
try {
TestUtil.checkReader(r);
} catch (IOException e) {
Rethrow.rethrow(e);
}
}
final IndexSearcher ret;
if (wrapWithAssertions) {
ret = random.nextBoolean() ? new AssertingIndexSearcher(random, r) : new AssertingIndexSearcher(random, r.getContext());
} else {
ret = random.nextBoolean() ? new IndexSearcher(r) : new IndexSearcher(r.getContext());
}
ret.setSimilarity(classEnvRule.similarity);
return ret;
} else {
int threads = 0;
final ThreadPoolExecutor ex;
if (r.getReaderCacheHelper() == null || random.nextBoolean()) {
ex = null;
} else {
threads = TestUtil.nextInt(random, 1, 8);
ex = new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>(),
new NamedThreadFactory("LuceneTestCase"));
// uncomment to intensify LUCENE-3840
// ex.prestartAllCoreThreads();
}
if (ex != null) {
if (VERBOSE) {
System.out.println("NOTE: newSearcher using ExecutorService with " + threads + " threads");
}
r.getReaderCacheHelper().addClosedListener(cacheKey -> TestUtil.shutdownExecutorService(ex));
}
IndexSearcher ret;
if (wrapWithAssertions) {
ret = random.nextBoolean()
? new AssertingIndexSearcher(random, r, ex)
: new AssertingIndexSearcher(random, r.getContext(), ex);
} else if (random.nextBoolean()) {
int maxDocPerSlice = 1 + random.nextInt(100000);
int maxSegmentsPerSlice = 1 + random.nextInt(20);
ret = new IndexSearcher(r, ex) {
@Override
protected LeafSlice[] slices(List<LeafReaderContext> leaves) {
return slices(leaves, maxDocPerSlice, maxSegmentsPerSlice);
}
};
} else {
ret = random.nextBoolean()
? new IndexSearcher(r, ex)
: new IndexSearcher(r.getContext(), ex);
}
ret.setSimilarity(classEnvRule.similarity);
ret.setQueryCachingPolicy(MAYBE_CACHE_POLICY);
return ret;
}
}
/**
* Gets a resource from the test's classpath as {@link Path}. This method should only
* be used, if a real file is needed. To get a stream, code should prefer
* {@link #getDataInputStream(String)}.
*/
protected Path getDataPath(String name) throws IOException {
try {
return Paths.get(this.getClass().getResource(name).toURI());
} catch (Exception e) {
throw new IOException("Cannot find resource: " + name);
}
}
/**
* Gets a resource from the test's classpath as {@link InputStream}.
*/
protected InputStream getDataInputStream(String name) throws IOException {
InputStream in = this.getClass().getResourceAsStream(name);
if (in == null) {
throw new IOException("Cannot find resource: " + name);
}
return in;
}
public void assertReaderEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
assertReaderStatisticsEquals(info, leftReader, rightReader);
assertTermsEquals(info, leftReader, rightReader, true);
assertNormsEquals(info, leftReader, rightReader);
assertStoredFieldsEquals(info, leftReader, rightReader);
assertTermVectorsEquals(info, leftReader, rightReader);
assertDocValuesEquals(info, leftReader, rightReader);
assertDeletedDocsEquals(info, leftReader, rightReader);
assertFieldInfosEquals(info, leftReader, rightReader);
assertPointsEquals(info, leftReader, rightReader);
}
/**
* checks that reader-level statistics are the same
*/
public void assertReaderStatisticsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
// Somewhat redundant: we never delete docs
assertEquals(info, leftReader.maxDoc(), rightReader.maxDoc());
assertEquals(info, leftReader.numDocs(), rightReader.numDocs());
assertEquals(info, leftReader.numDeletedDocs(), rightReader.numDeletedDocs());
assertEquals(info, leftReader.hasDeletions(), rightReader.hasDeletions());
}
/**
* Fields api equivalency
*/
public void assertTermsEquals(String info, IndexReader leftReader, IndexReader rightReader, boolean deep) throws IOException {
Set<String> leftFields = new HashSet<>(FieldInfos.getIndexedFields(leftReader));
Set<String> rightFields = new HashSet<>(FieldInfos.getIndexedFields(rightReader));
assertEquals(info, leftFields, rightFields);
for (String field : leftFields) {
assertTermsEquals(info, leftReader, MultiTerms.getTerms(leftReader, field), MultiTerms.getTerms(rightReader, field), deep);
}
}
/**
* Terms api equivalency
*/
public void assertTermsEquals(String info, IndexReader leftReader, Terms leftTerms, Terms rightTerms, boolean deep) throws IOException {
if (leftTerms == null || rightTerms == null) {
assertNull(info, leftTerms);
assertNull(info, rightTerms);
return;
}
assertTermsStatisticsEquals(info, leftTerms, rightTerms);
assertEquals("hasOffsets", leftTerms.hasOffsets(), rightTerms.hasOffsets());
assertEquals("hasPositions", leftTerms.hasPositions(), rightTerms.hasPositions());
assertEquals("hasPayloads", leftTerms.hasPayloads(), rightTerms.hasPayloads());
TermsEnum leftTermsEnum = leftTerms.iterator();
TermsEnum rightTermsEnum = rightTerms.iterator();
assertTermsEnumEquals(info, leftReader, leftTermsEnum, rightTermsEnum, true);
assertTermsSeekingEquals(info, leftTerms, rightTerms);
if (deep) {
int numIntersections = atLeast(3);
for (int i = 0; i < numIntersections; i++) {
String re = AutomatonTestUtil.randomRegexp(random());
CompiledAutomaton automaton = new CompiledAutomaton(new RegExp(re, RegExp.NONE).toAutomaton());
if (automaton.type == CompiledAutomaton.AUTOMATON_TYPE.NORMAL) {
// TODO: test start term too
TermsEnum leftIntersection = leftTerms.intersect(automaton, null);
TermsEnum rightIntersection = rightTerms.intersect(automaton, null);
assertTermsEnumEquals(info, leftReader, leftIntersection, rightIntersection, rarely());
}
}
}
}
/**
* checks collection-level statistics on Terms
*/
public void assertTermsStatisticsEquals(String info, Terms leftTerms, Terms rightTerms) throws IOException {
assertEquals(info, leftTerms.getDocCount(), rightTerms.getDocCount());
assertEquals(info, leftTerms.getSumDocFreq(), rightTerms.getSumDocFreq());
assertEquals(info, leftTerms.getSumTotalTermFreq(), rightTerms.getSumTotalTermFreq());
if (leftTerms.size() != -1 && rightTerms.size() != -1) {
assertEquals(info, leftTerms.size(), rightTerms.size());
}
}
private static class RandomBits implements Bits {
FixedBitSet bits;
RandomBits(int maxDoc, double pctLive, Random random) {
bits = new FixedBitSet(maxDoc);
for (int i = 0; i < maxDoc; i++) {
if (random.nextDouble() <= pctLive) {
bits.set(i);
}
}
}
@Override
public boolean get(int index) {
return bits.get(index);
}
@Override
public int length() {
return bits.length();
}
}
/**
* checks the terms enum sequentially
* if deep is false, it does a 'shallow' test that doesnt go down to the docsenums
*/
public void assertTermsEnumEquals(String info, IndexReader leftReader, TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws IOException {
BytesRef term;
PostingsEnum leftPositions = null;
PostingsEnum rightPositions = null;
PostingsEnum leftDocs = null;
PostingsEnum rightDocs = null;
while ((term = leftTermsEnum.next()) != null) {
assertEquals(info, term, rightTermsEnum.next());
assertTermStatsEquals(info, leftTermsEnum, rightTermsEnum);
if (deep) {
assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.postings(leftPositions, PostingsEnum.ALL),
rightPositions = rightTermsEnum.postings(rightPositions, PostingsEnum.ALL));
assertPositionsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
leftPositions = leftTermsEnum.postings(leftPositions, PostingsEnum.ALL),
rightPositions = rightTermsEnum.postings(rightPositions, PostingsEnum.ALL));
// with freqs:
assertDocsEnumEquals(info, leftDocs = leftTermsEnum.postings(leftDocs),
rightDocs = rightTermsEnum.postings(rightDocs),
true);
// w/o freqs:
assertDocsEnumEquals(info, leftDocs = leftTermsEnum.postings(leftDocs, PostingsEnum.NONE),
rightDocs = rightTermsEnum.postings(rightDocs, PostingsEnum.NONE),
false);
// with freqs:
assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
leftDocs = leftTermsEnum.postings(leftDocs),
rightDocs = rightTermsEnum.postings(rightDocs),
true);
// w/o freqs:
assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
leftDocs = leftTermsEnum.postings(leftDocs, PostingsEnum.NONE),
rightDocs = rightTermsEnum.postings(rightDocs, PostingsEnum.NONE),
false);
}
}
assertNull(info, rightTermsEnum.next());
}
/**
* checks docs + freqs + positions + payloads, sequentially
*/
public void assertDocsAndPositionsEnumEquals(String info, PostingsEnum leftDocs, PostingsEnum rightDocs) throws IOException {
assertNotNull(leftDocs);
assertNotNull(rightDocs);
assertEquals(info, -1, leftDocs.docID());
assertEquals(info, -1, rightDocs.docID());
int docid;
while ((docid = leftDocs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
assertEquals(info, docid, rightDocs.nextDoc());
int freq = leftDocs.freq();
assertEquals(info, freq, rightDocs.freq());
for (int i = 0; i < freq; i++) {
assertEquals(info, leftDocs.nextPosition(), rightDocs.nextPosition());
assertEquals(info, leftDocs.getPayload(), rightDocs.getPayload());
assertEquals(info, leftDocs.startOffset(), rightDocs.startOffset());
assertEquals(info, leftDocs.endOffset(), rightDocs.endOffset());
}
}
assertEquals(info, DocIdSetIterator.NO_MORE_DOCS, rightDocs.nextDoc());
}
/**
* checks docs + freqs, sequentially
*/
public void assertDocsEnumEquals(String info, PostingsEnum leftDocs, PostingsEnum rightDocs, boolean hasFreqs) throws IOException {
if (leftDocs == null) {
assertNull(rightDocs);
return;
}
assertEquals(info, -1, leftDocs.docID());
assertEquals(info, -1, rightDocs.docID());
int docid;
while ((docid = leftDocs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
assertEquals(info, docid, rightDocs.nextDoc());
if (hasFreqs) {
assertEquals(info, leftDocs.freq(), rightDocs.freq());
}
}
assertEquals(info, DocIdSetIterator.NO_MORE_DOCS, rightDocs.nextDoc());
}
/**
* checks advancing docs
*/
public void assertDocsSkippingEquals(String info, IndexReader leftReader, int docFreq, PostingsEnum leftDocs, PostingsEnum rightDocs, boolean hasFreqs) throws IOException {
if (leftDocs == null) {
assertNull(rightDocs);
return;
}
int docid = -1;
int averageGap = leftReader.maxDoc() / (1+docFreq);
int skipInterval = 16;
while (true) {
if (random().nextBoolean()) {
// nextDoc()
docid = leftDocs.nextDoc();
assertEquals(info, docid, rightDocs.nextDoc());
} else {
// advance()
int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random().nextGaussian() * averageGap));
docid = leftDocs.advance(skip);
assertEquals(info, docid, rightDocs.advance(skip));
}
if (docid == DocIdSetIterator.NO_MORE_DOCS) {
return;
}
if (hasFreqs) {
assertEquals(info, leftDocs.freq(), rightDocs.freq());
}
}
}
/**
* checks advancing docs + positions
*/
public void assertPositionsSkippingEquals(String info, IndexReader leftReader, int docFreq, PostingsEnum leftDocs, PostingsEnum rightDocs) throws IOException {
if (leftDocs == null || rightDocs == null) {
assertNull(leftDocs);
assertNull(rightDocs);
return;
}
int docid = -1;
int averageGap = leftReader.maxDoc() / (1+docFreq);
int skipInterval = 16;
while (true) {
if (random().nextBoolean()) {
// nextDoc()
docid = leftDocs.nextDoc();
assertEquals(info, docid, rightDocs.nextDoc());
} else {
// advance()
int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random().nextGaussian() * averageGap));
docid = leftDocs.advance(skip);
assertEquals(info, docid, rightDocs.advance(skip));
}
if (docid == DocIdSetIterator.NO_MORE_DOCS) {
return;
}
int freq = leftDocs.freq();
assertEquals(info, freq, rightDocs.freq());
for (int i = 0; i < freq; i++) {
assertEquals(info, leftDocs.nextPosition(), rightDocs.nextPosition());
assertEquals(info, leftDocs.getPayload(), rightDocs.getPayload());
}
}
}
private void assertTermsSeekingEquals(String info, Terms leftTerms, Terms rightTerms) throws IOException {
// just an upper bound
int numTests = atLeast(20);
Random random = random();
TermsEnum leftEnum = null;
// collect this number of terms from the left side
HashSet<BytesRef> tests = new HashSet<>();
int numPasses = 0;
while (numPasses < 10 && tests.size() < numTests) {
leftEnum = leftTerms.iterator();
BytesRef term = null;
while ((term = leftEnum.next()) != null) {
int code = random.nextInt(10);
if (code == 0) {
// the term
tests.add(BytesRef.deepCopyOf(term));
} else if (code == 1) {
// truncated subsequence of term
term = BytesRef.deepCopyOf(term);
if (term.length > 0) {
// truncate it
term.length = random.nextInt(term.length);
}
} else if (code == 2) {
// term, but ensure a non-zero offset
byte newbytes[] = new byte[term.length+5];
System.arraycopy(term.bytes, term.offset, newbytes, 5, term.length);
tests.add(new BytesRef(newbytes, 5, term.length));
} else if (code == 3) {
switch (random().nextInt(3)) {
case 0:
tests.add(new BytesRef()); // before the first term
break;
case 1:
tests.add(new BytesRef(new byte[] {(byte) 0xFF, (byte) 0xFF})); // past the last term
break;
case 2:
tests.add(new BytesRef(TestUtil.randomSimpleString(random()))); // random term
break;
default:
throw new AssertionError();
}
}
}
numPasses++;
}
TermsEnum rightEnum = rightTerms.iterator();
ArrayList<BytesRef> shuffledTests = new ArrayList<>(tests);
Collections.shuffle(shuffledTests, random);
for (BytesRef b : shuffledTests) {
if (rarely()) {
// make new enums
leftEnum = leftTerms.iterator();
rightEnum = rightTerms.iterator();
}
final boolean seekExact = random().nextBoolean();
if (seekExact) {
assertEquals(info, leftEnum.seekExact(b), rightEnum.seekExact(b));
} else {
SeekStatus leftStatus = leftEnum.seekCeil(b);
SeekStatus rightStatus = rightEnum.seekCeil(b);
assertEquals(info, leftStatus, rightStatus);
if (leftStatus != SeekStatus.END) {
assertEquals(info, leftEnum.term(), rightEnum.term());
assertTermStatsEquals(info, leftEnum, rightEnum);
}
}
}
}
/**
* checks term-level statistics
*/
public void assertTermStatsEquals(String info, TermsEnum leftTermsEnum, TermsEnum rightTermsEnum) throws IOException {
assertEquals(info, leftTermsEnum.docFreq(), rightTermsEnum.docFreq());
assertEquals(info, leftTermsEnum.totalTermFreq(), rightTermsEnum.totalTermFreq());
}
/**
* checks that norms are the same across all fields
*/
public void assertNormsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
Set<String> leftFields = new HashSet<>(FieldInfos.getIndexedFields(leftReader));
Set<String> rightFields = new HashSet<>(FieldInfos.getIndexedFields(rightReader));
assertEquals(info, leftFields, rightFields);
for (String field : leftFields) {
NumericDocValues leftNorms = MultiDocValues.getNormValues(leftReader, field);
NumericDocValues rightNorms = MultiDocValues.getNormValues(rightReader, field);
if (leftNorms != null && rightNorms != null) {
assertDocValuesEquals(info, leftReader.maxDoc(), leftNorms, rightNorms);
} else {
assertNull(info, leftNorms);
assertNull(info, rightNorms);
}
}
}
/**
* checks that stored fields of all documents are the same
*/
public void assertStoredFieldsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
assert leftReader.maxDoc() == rightReader.maxDoc();
for (int i = 0; i < leftReader.maxDoc(); i++) {
Document leftDoc = leftReader.document(i);
Document rightDoc = rightReader.document(i);
// TODO: I think this is bogus because we don't document what the order should be
// from these iterators, etc. I think the codec/IndexReader should be free to order this stuff
// in whatever way it wants (e.g. maybe it packs related fields together or something)
// To fix this, we sort the fields in both documents by name, but
// we still assume that all instances with same name are in order:
Comparator<IndexableField> comp = new Comparator<IndexableField>() {
@Override
public int compare(IndexableField arg0, IndexableField arg1) {
return arg0.name().compareTo(arg1.name());
}
};
List<IndexableField> leftFields = new ArrayList<>(leftDoc.getFields());
List<IndexableField> rightFields = new ArrayList<>(rightDoc.getFields());
Collections.sort(leftFields, comp);
Collections.sort(rightFields, comp);
Iterator<IndexableField> leftIterator = leftFields.iterator();
Iterator<IndexableField> rightIterator = rightFields.iterator();
while (leftIterator.hasNext()) {
assertTrue(info, rightIterator.hasNext());
assertStoredFieldEquals(info, leftIterator.next(), rightIterator.next());
}
assertFalse(info, rightIterator.hasNext());
}
}
/**
* checks that two stored fields are equivalent
*/
public void assertStoredFieldEquals(String info, IndexableField leftField, IndexableField rightField) {
assertEquals(info, leftField.name(), rightField.name());
assertEquals(info, leftField.binaryValue(), rightField.binaryValue());
assertEquals(info, leftField.stringValue(), rightField.stringValue());
assertEquals(info, leftField.numericValue(), rightField.numericValue());
// TODO: should we check the FT at all?
}
/**
* checks that term vectors across all fields are equivalent
*/
public void assertTermVectorsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
assert leftReader.maxDoc() == rightReader.maxDoc();
for (int i = 0; i < leftReader.maxDoc(); i++) {
Fields leftFields = leftReader.getTermVectors(i);
Fields rightFields = rightReader.getTermVectors(i);
// Fields could be null if there are no postings,
// but then it must be null for both
if (leftFields == null || rightFields == null) {
assertNull(info, leftFields);
assertNull(info, rightFields);
return;
}
if (leftFields.size() != -1 && rightFields.size() != -1) {
assertEquals(info, leftFields.size(), rightFields.size());
}
Iterator<String> leftEnum = leftFields.iterator();
Iterator<String> rightEnum = rightFields.iterator();
while (leftEnum.hasNext()) {
String field = leftEnum.next();
assertEquals(info, field, rightEnum.next());
assertTermsEquals(info, leftReader, leftFields.terms(field), rightFields.terms(field), rarely());
}
assertFalse(rightEnum.hasNext());
}
}
private static Set<String> getDVFields(IndexReader reader) {
Set<String> fields = new HashSet<>();
for(FieldInfo fi : FieldInfos.getMergedFieldInfos(reader)) {
if (fi.getDocValuesType() != DocValuesType.NONE) {
fields.add(fi.name);
}
}
return fields;
}
/**
* checks that docvalues across all fields are equivalent
*/
public void assertDocValuesEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
Set<String> leftFields = getDVFields(leftReader);
Set<String> rightFields = getDVFields(rightReader);
assertEquals(info, leftFields, rightFields);
for (String field : leftFields) {
// TODO: clean this up... very messy
{
NumericDocValues leftValues = MultiDocValues.getNumericValues(leftReader, field);
NumericDocValues rightValues = MultiDocValues.getNumericValues(rightReader, field);
if (leftValues != null && rightValues != null) {
assertDocValuesEquals(info, leftReader.maxDoc(), leftValues, rightValues);
} else {
assertTrue(info + ": left numeric doc values for field=\"" + field + "\" are not null", leftValues == null || leftValues.nextDoc() == NO_MORE_DOCS);
assertTrue(info + ": right numeric doc values for field=\"" + field + "\" are not null", rightValues == null || rightValues.nextDoc() == NO_MORE_DOCS);
}
}
{
BinaryDocValues leftValues = MultiDocValues.getBinaryValues(leftReader, field);
BinaryDocValues rightValues = MultiDocValues.getBinaryValues(rightReader, field);
if (leftValues != null && rightValues != null) {
while (true) {
int docID = leftValues.nextDoc();
assertEquals(docID, rightValues.nextDoc());
if (docID == NO_MORE_DOCS) {
break;
}
assertEquals(leftValues.binaryValue(), rightValues.binaryValue());
}
} else {
assertTrue(info, leftValues == null || leftValues.nextDoc() == NO_MORE_DOCS);
assertTrue(info, rightValues == null || rightValues.nextDoc() == NO_MORE_DOCS);
}
}
{
SortedDocValues leftValues = MultiDocValues.getSortedValues(leftReader, field);
SortedDocValues rightValues = MultiDocValues.getSortedValues(rightReader, field);
if (leftValues != null && rightValues != null) {
// numOrds
assertEquals(info, leftValues.getValueCount(), rightValues.getValueCount());
// ords
for (int i = 0; i < leftValues.getValueCount(); i++) {
final BytesRef left = BytesRef.deepCopyOf(leftValues.lookupOrd(i));
final BytesRef right = rightValues.lookupOrd(i);
assertEquals(info, left, right);
}
// bytes
for(int docID=0;docID<leftReader.maxDoc();docID++) {
assertEquals(docID, leftValues.nextDoc());
assertEquals(docID, rightValues.nextDoc());
final BytesRef left = BytesRef.deepCopyOf(leftValues.binaryValue());
final BytesRef right = rightValues.binaryValue();
assertEquals(info, left, right);
}
} else {
assertNull(info, leftValues);
assertNull(info, rightValues);
}
}
{
SortedSetDocValues leftValues = MultiDocValues.getSortedSetValues(leftReader, field);
SortedSetDocValues rightValues = MultiDocValues.getSortedSetValues(rightReader, field);
if (leftValues != null && rightValues != null) {
// numOrds
assertEquals(info, leftValues.getValueCount(), rightValues.getValueCount());
// ords
for (int i = 0; i < leftValues.getValueCount(); i++) {
final BytesRef left = BytesRef.deepCopyOf(leftValues.lookupOrd(i));
final BytesRef right = rightValues.lookupOrd(i);
assertEquals(info, left, right);
}
// ord lists
while (true) {
int docID = leftValues.nextDoc();
assertEquals(docID, rightValues.nextDoc());
if (docID == NO_MORE_DOCS) {
break;
}
long ord;
while ((ord = leftValues.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
assertEquals(info, ord, rightValues.nextOrd());
}
assertEquals(info, SortedSetDocValues.NO_MORE_ORDS, rightValues.nextOrd());
}
} else {
assertNull(info, leftValues);
assertNull(info, rightValues);
}
}
{
SortedNumericDocValues leftValues = MultiDocValues.getSortedNumericValues(leftReader, field);
SortedNumericDocValues rightValues = MultiDocValues.getSortedNumericValues(rightReader, field);
if (leftValues != null && rightValues != null) {
while (true) {
int docID = leftValues.nextDoc();
assertEquals(docID, rightValues.nextDoc());
if (docID == NO_MORE_DOCS) {
break;
}
assertEquals(info, leftValues.docValueCount(), rightValues.docValueCount());
for (int j = 0; j < leftValues.docValueCount(); j++) {
assertEquals(info, leftValues.nextValue(), rightValues.nextValue());
}
}
} else {
assertNull(info, leftValues);
assertNull(info, rightValues);
}
}
}
}
public void assertDocValuesEquals(String info, int num, NumericDocValues leftDocValues, NumericDocValues rightDocValues) throws IOException {
assertNotNull(info, leftDocValues);
assertNotNull(info, rightDocValues);
while (true) {
int leftDocID = leftDocValues.nextDoc();
int rightDocID = rightDocValues.nextDoc();
assertEquals(leftDocID, rightDocID);
if (leftDocID == NO_MORE_DOCS) {
return;
}
assertEquals(leftDocValues.longValue(), rightDocValues.longValue());
}
}
// TODO: this is kinda stupid, we don't delete documents in the test.
public void assertDeletedDocsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
assert leftReader.numDeletedDocs() == rightReader.numDeletedDocs();
Bits leftBits = MultiBits.getLiveDocs(leftReader);
Bits rightBits = MultiBits.getLiveDocs(rightReader);
if (leftBits == null || rightBits == null) {
assertNull(info, leftBits);
assertNull(info, rightBits);
return;
}
assert leftReader.maxDoc() == rightReader.maxDoc();
assertEquals(info, leftBits.length(), rightBits.length());
for (int i = 0; i < leftReader.maxDoc(); i++) {
assertEquals(info, leftBits.get(i), rightBits.get(i));
}
}
public void assertFieldInfosEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
FieldInfos leftInfos = FieldInfos.getMergedFieldInfos(leftReader);
FieldInfos rightInfos = FieldInfos.getMergedFieldInfos(rightReader);
// TODO: would be great to verify more than just the names of the fields!
TreeSet<String> left = new TreeSet<>();
TreeSet<String> right = new TreeSet<>();
for (FieldInfo fi : leftInfos) {
left.add(fi.name);
}
for (FieldInfo fi : rightInfos) {
right.add(fi.name);
}
assertEquals(info, left, right);
}
// naive silly memory heavy uninversion!! maps docID -> packed values (a Set because a given doc can be multi-valued)
private Map<Integer,Set<BytesRef>> uninvert(String fieldName, IndexReader reader) throws IOException {
final Map<Integer,Set<BytesRef>> docValues = new HashMap<>();
for(LeafReaderContext ctx : reader.leaves()) {
PointValues points = ctx.reader().getPointValues(fieldName);
if (points == null) {
continue;
}
points.intersect(
new PointValues.IntersectVisitor() {
@Override
public void visit(int docID) {
throw new UnsupportedOperationException();
}
@Override
public void visit(int docID, byte[] packedValue) throws IOException {
int topDocID = ctx.docBase + docID;
if (docValues.containsKey(topDocID) == false) {
docValues.put(topDocID, new HashSet<BytesRef>());
}
docValues.get(topDocID).add(new BytesRef(packedValue.clone()));
}
@Override
public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
// We pretend our query shape is so hairy that it crosses every single cell:
return PointValues.Relation.CELL_CROSSES_QUERY;
}
});
}
return docValues;
}
public void assertPointsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
FieldInfos fieldInfos1 = FieldInfos.getMergedFieldInfos(leftReader);
FieldInfos fieldInfos2 = FieldInfos.getMergedFieldInfos(rightReader);
for(FieldInfo fieldInfo1 : fieldInfos1) {
if (fieldInfo1.getPointDimensionCount() != 0) {
FieldInfo fieldInfo2 = fieldInfos2.fieldInfo(fieldInfo1.name);
// same data dimension count?
assertEquals(info, fieldInfo2.getPointDimensionCount(), fieldInfo2.getPointDimensionCount());
// same index dimension count?
assertEquals(info, fieldInfo2.getPointIndexDimensionCount(), fieldInfo2.getPointIndexDimensionCount());
// same bytes per dimension?
assertEquals(info, fieldInfo2.getPointNumBytes(), fieldInfo2.getPointNumBytes());
assertEquals(info + " field=" + fieldInfo1.name,
uninvert(fieldInfo1.name, leftReader),
uninvert(fieldInfo1.name, rightReader));
}
}
// make sure FieldInfos2 doesn't have any point fields that FieldInfo1 didn't have
for(FieldInfo fieldInfo2 : fieldInfos2) {
if (fieldInfo2.getPointDimensionCount() != 0) {
FieldInfo fieldInfo1 = fieldInfos1.fieldInfo(fieldInfo2.name);
// same data dimension count?
assertEquals(info, fieldInfo2.getPointDimensionCount(), fieldInfo1.getPointDimensionCount());
// same index dimension count?
assertEquals(info, fieldInfo2.getPointIndexDimensionCount(), fieldInfo1.getPointIndexDimensionCount());
// same bytes per dimension?
assertEquals(info, fieldInfo2.getPointNumBytes(), fieldInfo1.getPointNumBytes());
// we don't need to uninvert and compare here ... we did that in the first loop above
}
}
}
/** Inspects stack trace to figure out if a method of a specific class called us. */
public static boolean callStackContains(Class<?> clazz, String methodName) {
final String className = clazz.getName();
return StackWalker.getInstance().walk(s -> s.skip(1) // exclude this utility method
.anyMatch(f -> className.equals(f.getClassName()) && methodName.equals(f.getMethodName())));
}
/** Inspects stack trace to figure out if one of the given method names (no class restriction) called us. */
public static boolean callStackContainsAnyOf(String... methodNames) {
return StackWalker.getInstance().walk(s -> s.skip(1) // exclude this utility method
.map(StackFrame::getMethodName)
.anyMatch(Set.of(methodNames)::contains));
}
/** Inspects stack trace if the given class called us. */
public static boolean callStackContains(Class<?> clazz) {
return StackWalker.getInstance().walk(s -> s.skip(1) // exclude this utility method
.map(StackFrame::getClassName)
.anyMatch(clazz.getName()::equals));
}
/** A runnable that can throw any checked exception. */
@FunctionalInterface
public interface ThrowingRunnable {
void run() throws Throwable;
}
/** Checks a specific exception class is thrown by the given runnable, and returns it. */
public static <T extends Throwable> T expectThrows(Class<T> expectedType, ThrowingRunnable runnable) {
return expectThrows(expectedType, "Expected exception "+ expectedType.getSimpleName() + " but no exception was thrown", runnable);
}
/** Checks a specific exception class is thrown by the given runnable, and returns it. */
public static <T extends Throwable> T expectThrows(Class<T> expectedType, String noExceptionMessage, ThrowingRunnable runnable) {
final Throwable thrown = _expectThrows(Collections.singletonList(expectedType), runnable);
if (expectedType.isInstance(thrown)) {
return expectedType.cast(thrown);
}
if (null == thrown) {
throw new AssertionFailedError(noExceptionMessage);
}
AssertionFailedError assertion = new AssertionFailedError("Unexpected exception type, expected " + expectedType.getSimpleName() + " but got " + thrown);
assertion.initCause(thrown);
throw assertion;
}
/** Checks a specific exception class is thrown by the given runnable, and returns it. */
public static <T extends Throwable> T expectThrowsAnyOf(List<Class<? extends T>> expectedTypes, ThrowingRunnable runnable) {
if (expectedTypes.isEmpty()) {
throw new AssertionError("At least one expected exception type is required?");
}
final Throwable thrown = _expectThrows(expectedTypes, runnable);
if (null != thrown) {
for (Class<? extends T> expectedType : expectedTypes) {
if (expectedType.isInstance(thrown)) {
return expectedType.cast(thrown);
}
}
}
List<String> exceptionTypes = expectedTypes.stream().map(c -> c.getSimpleName()).collect(Collectors.toList());
if (thrown != null) {
AssertionFailedError assertion = new AssertionFailedError("Unexpected exception type, expected any of " +
exceptionTypes +
" but got: " + thrown);
assertion.initCause(thrown);
throw assertion;
} else {
throw new AssertionFailedError("Expected any of the following exception types: " +
exceptionTypes+ " but no exception was thrown.");
}
}
/**
* Checks that specific wrapped and outer exception classes are thrown
* by the given runnable, and returns the wrapped exception.
*/
public static <TO extends Throwable, TW extends Throwable> TW expectThrows
(Class<TO> expectedOuterType, Class<TW> expectedWrappedType, ThrowingRunnable runnable) {
final Throwable thrown = _expectThrows(Collections.singletonList(expectedOuterType), runnable);
if (null == thrown) {
throw new AssertionFailedError("Expected outer exception " + expectedOuterType.getSimpleName()
+ " but no exception was thrown.");
}
if (expectedOuterType.isInstance(thrown)) {
Throwable cause = thrown.getCause();
if (expectedWrappedType.isInstance(cause)) {
return expectedWrappedType.cast(cause);
} else {
AssertionFailedError assertion = new AssertionFailedError
("Unexpected wrapped exception type, expected " + expectedWrappedType.getSimpleName()
+ " but got: " + cause);
assertion.initCause(thrown);
throw assertion;
}
}
AssertionFailedError assertion = new AssertionFailedError
("Unexpected outer exception type, expected " + expectedOuterType.getSimpleName()
+ " but got: " + thrown);
assertion.initCause(thrown);
throw assertion;
}
/**
* Checks that one of the specified wrapped and outer exception classes are thrown
* by the given runnable, and returns the outer exception.
*
* This method accepts outer exceptions with no wrapped exception;
* an empty list of expected wrapped exception types indicates no wrapped exception.
*/
public static <TO extends Throwable, TW extends Throwable> TO expectThrowsAnyOf
(LinkedHashMap<Class<? extends TO>,List<Class<? extends TW>>> expectedOuterToWrappedTypes, ThrowingRunnable runnable) {
final List<Class<? extends TO>> outerClasses = expectedOuterToWrappedTypes.keySet().stream().collect(Collectors.toList());
final Throwable thrown = _expectThrows(outerClasses, runnable);
if (null == thrown) {
List<String> outerTypes = outerClasses.stream().map(Class::getSimpleName).collect(Collectors.toList());
throw new AssertionFailedError("Expected any of the following outer exception types: " + outerTypes
+ " but no exception was thrown.");
}
for (Map.Entry<Class<? extends TO>, List<Class<? extends TW>>> entry : expectedOuterToWrappedTypes.entrySet()) {
Class<? extends TO> expectedOuterType = entry.getKey();
List<Class<? extends TW>> expectedWrappedTypes = entry.getValue();
Throwable cause = thrown.getCause();
if (expectedOuterType.isInstance(thrown)) {
if (expectedWrappedTypes.isEmpty()) {
return null; // no wrapped exception
} else {
for (Class<? extends TW> expectedWrappedType : expectedWrappedTypes) {
if (expectedWrappedType.isInstance(cause)) {
return expectedOuterType.cast(thrown);
}
}
List<String> wrappedTypes = expectedWrappedTypes.stream().map(Class::getSimpleName).collect(Collectors.toList());
AssertionFailedError assertion = new AssertionFailedError
("Unexpected wrapped exception type, expected one of " + wrappedTypes + " but got: " + cause);
assertion.initCause(thrown);
throw assertion;
}
}
}
List<String> outerTypes = outerClasses.stream().map(Class::getSimpleName).collect(Collectors.toList());
AssertionFailedError assertion = new AssertionFailedError
("Unexpected outer exception type, expected one of " + outerTypes + " but got: " + thrown);
assertion.initCause(thrown);
throw assertion;
}
/**
* Helper method for {@link #expectThrows} and {@link #expectThrowsAnyOf} that takes care of propagating
* any {@link AssertionError} or {@link AssumptionViolatedException} instances thrown if and only if they
* are super classes of the <code>expectedTypes</code>. Otherwise simply returns any {@link Throwable}
* thrown, regardless of type, or null if the <code>runnable</code> completed w/o error.
*/
private static Throwable _expectThrows(List<? extends Class<?>> expectedTypes, ThrowingRunnable runnable) {
try {
runnable.run();
} catch (AssertionError | AssumptionViolatedException ae) {
for (Class<?> expectedType : expectedTypes) {
if (expectedType.isInstance(ae)) { // user is expecting this type explicitly
return ae;
}
}
throw ae;
} catch (Throwable e) {
return e;
}
return null;
}
/** Returns true if the file exists (can be opened), false
* if it cannot be opened, and (unlike Java's
* File.exists) throws IOException if there's some
* unexpected error. */
public static boolean slowFileExists(Directory dir, String fileName) throws IOException {
try {
dir.openInput(fileName, IOContext.DEFAULT).close();
return true;
} catch (NoSuchFileException | FileNotFoundException e) {
return false;
}
}
/**
* This method is deprecated for a reason. Do not use it. Call {@link #createTempDir()}
* or {@link #createTempDir(String)} or {@link #createTempFile(String, String)}.
*/
@Deprecated
public static Path getBaseTempDirForTestClass() {
return tempFilesCleanupRule.getPerTestClassTempDir();
}
/**
* Creates an empty, temporary folder (when the name of the folder is of no importance).
*
* @see #createTempDir(String)
*/
public static Path createTempDir() {
return createTempDir("tempDir");
}
/**
* Creates an empty, temporary folder with the given name prefix under the
* test class's {@link #getBaseTempDirForTestClass()}.
*
* <p>The folder will be automatically removed after the
* test class completes successfully. The test should close any file handles that would prevent
* the folder from being removed.
*/
public static Path createTempDir(String prefix) {
return tempFilesCleanupRule.createTempDir(prefix);
}
/**
* Creates an empty file with the given prefix and suffix under the
* test class's {@link #getBaseTempDirForTestClass()}.
*
* <p>The file will be automatically removed after the
* test class completes successfully. The test should close any file handles that would prevent
* the folder from being removed.
*/
public static Path createTempFile(String prefix, String suffix) throws IOException {
return tempFilesCleanupRule.createTempFile(prefix, suffix);
}
/**
* Creates an empty temporary file.
*
* @see #createTempFile(String, String)
*/
public static Path createTempFile() throws IOException {
return createTempFile("tempFile", ".tmp");
}
/**
* Runs a code part with restricted permissions (be sure to add all required permissions,
* because it would start with empty permissions). You cannot grant more permissions than
* our policy file allows, but you may restrict writing to several dirs...
* <p><em>Note:</em> This assumes a {@link SecurityManager} enabled, otherwise it
* stops test execution. If enabled, it needs the following {@link SecurityPermission}:
* {@code "createAccessControlContext"}
*/
public static <T> T runWithRestrictedPermissions(PrivilegedExceptionAction<T> action, Permission... permissions) throws Exception {
assumeTrue("runWithRestrictedPermissions requires a SecurityManager enabled", System.getSecurityManager() != null);
// be sure to have required permission, otherwise doPrivileged runs with *no* permissions:
AccessController.checkPermission(new SecurityPermission("createAccessControlContext"));
final PermissionCollection perms = new Permissions();
Arrays.stream(permissions).forEach(perms::add);
final AccessControlContext ctx = new AccessControlContext(new ProtectionDomain[] { new ProtectionDomain(null, perms) });
try {
return AccessController.doPrivileged(action, ctx);
} catch (PrivilegedActionException e) {
throw e.getException();
}
}
/** True if assertions (-ea) are enabled (at least for this class). */
public static final boolean assertsAreEnabled;
static {
boolean enabled = false;
assert enabled = true; // Intentional side-effect!!!
assertsAreEnabled = enabled;
}
/**
* Compares two strings with a collator, also looking to see if the the strings
* are impacted by jdk bugs. may not avoid all jdk bugs in tests.
* see https://bugs.openjdk.java.net/browse/JDK-8071862
*/
@SuppressForbidden(reason = "dodges JDK-8071862")
public static int collate(Collator collator, String s1, String s2) {
int v1 = collator.compare(s1, s2);
int v2 = collator.getCollationKey(s1).compareTo(collator.getCollationKey(s2));
// if collation keys don't really respect collation order, things are screwed.
assumeTrue("hit JDK collator bug", Integer.signum(v1) == Integer.signum(v2));
return v1;
}
/**
* Ensures that the MergePolicy has sane values for tests that test with lots of documents.
*/
protected static IndexWriterConfig ensureSaneIWCOnNightly(IndexWriterConfig conf) {
if (LuceneTestCase.TEST_NIGHTLY) {
// newIWConfig makes smallish max seg size, which
// results in tons and tons of segments for this test
// when run nightly:
MergePolicy mp = conf.getMergePolicy();
if (mp instanceof TieredMergePolicy) {
((TieredMergePolicy) mp).setMaxMergedSegmentMB(5000.);
} else if (mp instanceof LogByteSizeMergePolicy) {
((LogByteSizeMergePolicy) mp).setMaxMergeMB(1000.);
} else if (mp instanceof LogMergePolicy) {
((LogMergePolicy) mp).setMaxMergeDocs(100000);
}
// when running nightly, merging can still have crazy parameters,
// and might use many per-field codecs. turn on CFS for IW flushes
// and ensure CFS ratio is reasonable to keep it contained.
conf.setUseCompoundFile(true);
mp.setNoCFSRatio(Math.max(0.25d, mp.getNoCFSRatio()));
}
return conf;
}
}
| 1 | 36,323 | question: would this convention automatically and always apply to all classes derived from `LuceneTestCase` including any non-`org.apache` name spaces or would it be possible to opt-out (without an exclusion list) somehow for custom code that might perhaps have chosen a different convention? | apache-lucene-solr | java |
@@ -57,7 +57,7 @@ public class PasscodeManager {
private static final String EPREFIX = "eprefix";
// Default min passcode length
- protected static final int MIN_PASSCODE_LENGTH = 6;
+ protected static final int MIN_PASSCODE_LENGTH = 4;
// Key in preference for the passcode
private static final String KEY_PASSCODE ="passcode"; | 1 | /*
* Copyright (c) 2011, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.security;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.SharedPreferences.Editor;
import android.os.Handler;
import android.util.Log;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.app.UUIDManager;
import com.salesforce.androidsdk.ui.PasscodeActivity;
import com.salesforce.androidsdk.util.EventsObservable;
import com.salesforce.androidsdk.util.EventsObservable.EventType;
/**
* This class manages the inactivity timeout, and keeps track of if the UI should locked etc.
*
* @author wmathurin
* @author bhariharan
*/
public class PasscodeManager {
// UUID keys
private static final String VKEY = "vkey";
private static final String VSUFFIX = "vsuffix";
private static final String VPREFIX = "vprefix";
private static final String EKEY = "ekey";
private static final String ESUFFIX = "esuffix";
private static final String EPREFIX = "eprefix";
// Default min passcode length
protected static final int MIN_PASSCODE_LENGTH = 6;
// Key in preference for the passcode
private static final String KEY_PASSCODE ="passcode";
// Private preference where we stored the passcode (hashed)
private static final String PREF_NAME = "user";
// Private preference where we stored the org settings.
private static final String MOBILE_POLICY_PREF = "mobile_policy";
// Key in preference for the access timeout.
private static final String KEY_TIMEOUT ="access_timeout";
// Key in preference for the passcode length.
private static final String KEY_PASSCODE_LENGTH ="passcode_length";
// Request code used to start passcode activity
public static final int PASSCODE_REQUEST_CODE = 777;
// this is a hash of the passcode to be used as part of the key to encrypt/decrypt oauth tokens
// It's using a different salt/key than the one used to verify the entry
private String passcodeHash;
// Misc
private HashConfig verificationHashConfig;
private HashConfig encryptionHashConfig;
private int failedPasscodeAttempts;
private Activity frontActivity;
private Handler handler;
private long lastActivity;
private boolean locked;
private int timeoutMs;
private int minPasscodeLength;
private LockChecker lockChecker;
/**
* Parameterized constructor.
*
* @param ctx Context.
* @param verificationHashConfig Verification HashConfig.
* @param encryptionHashConfig Encryption HashConfig.
*/
public PasscodeManager(Context ctx) {
this(ctx,
new HashConfig(UUIDManager.getUuId(VPREFIX), UUIDManager.getUuId(VSUFFIX), UUIDManager.getUuId(VKEY)),
new HashConfig(UUIDManager.getUuId(EPREFIX), UUIDManager.getUuId(ESUFFIX), UUIDManager.getUuId(EKEY)));
}
public PasscodeManager(Context ctx, HashConfig verificationHashConfig, HashConfig encryptionHashConfig) {
this.minPasscodeLength = MIN_PASSCODE_LENGTH;
this.lastActivity = now();
this.verificationHashConfig = verificationHashConfig;
this.encryptionHashConfig = encryptionHashConfig;
readMobilePolicy(ctx);
// Locked at app startup if you're authenticated.
this.locked = true;
lockChecker = new LockChecker();
}
/**
* Stores the mobile policy in a private file.
*
* @param context Context.
*/
private void storeMobilePolicy(Context context) {
// Context will be null only in test runs.
if (context != null) {
final SharedPreferences sp = context.getSharedPreferences(MOBILE_POLICY_PREF, Context.MODE_PRIVATE);
Editor e = sp.edit();
e.putInt(KEY_TIMEOUT, timeoutMs);
e.putInt(KEY_PASSCODE_LENGTH, minPasscodeLength);
e.commit();
}
}
/**
* Reads the mobile policy from a private file.
*
* @param context Context.
*/
private void readMobilePolicy(Context context) {
// Context will be null only in test runs.
if (context != null) {
final SharedPreferences sp = context.getSharedPreferences(PasscodeManager.MOBILE_POLICY_PREF, Context.MODE_PRIVATE);
if (!sp.contains(KEY_TIMEOUT) || !sp.contains(KEY_PASSCODE_LENGTH)) {
timeoutMs = 0;
minPasscodeLength = MIN_PASSCODE_LENGTH;
storeMobilePolicy(context);
return;
}
timeoutMs = sp.getInt(PasscodeManager.KEY_TIMEOUT, 0);
minPasscodeLength = sp.getInt(PasscodeManager.KEY_PASSCODE_LENGTH, MIN_PASSCODE_LENGTH);
}
}
/**
* Reset this passcode manager: delete stored passcode and reset fields to their starting value
*/
public void reset(Context ctx) {
lastActivity = now();
locked = true;
failedPasscodeAttempts = 0;
passcodeHash = null;
SharedPreferences sp = ctx.getSharedPreferences(PREF_NAME, Context.MODE_PRIVATE);
Editor e = sp.edit();
e.remove(KEY_PASSCODE);
e.commit();
timeoutMs = 0;
minPasscodeLength = MIN_PASSCODE_LENGTH;
storeMobilePolicy(ctx);
handler = null;
}
/**
* Enable/disable passcode screen.
*/
public void setEnabled(boolean enabled) {
if (enabled) {
handler = new Handler();
handler.postDelayed(lockChecker, 20 * 1000);
} else {
if (handler != null) {
handler.removeCallbacks(lockChecker);
}
handler = null;
}
}
/**
* @return true if passcode manager is enabled.
*/
public boolean isEnabled() {
return (handler != null);
}
/**
* @return the new failure count
*/
public int addFailedPasscodeAttempt() {
return ++failedPasscodeAttempts;
}
/**
* @param ctx
* @param passcode
* @return true if passcode matches the one stored (hashed) in private preference
*/
public boolean check(Context ctx, String passcode) {
SharedPreferences sp = ctx.getSharedPreferences(PREF_NAME, Context.MODE_PRIVATE);
String hashedPasscode = sp.getString(KEY_PASSCODE, null);
hashedPasscode = Encryptor.removeNewLine(hashedPasscode);
if (hashedPasscode != null) {
return hashedPasscode.equals(hashForVerification(passcode));
}
/*
* If the stored passcode hash is null, there is no passcode.
*/
return true;
}
/**
* Store the given passcode (hashed) in private preference
* @param ctx
* @param passcode
*/
public void store(Context ctx, String passcode) {
SharedPreferences sp = ctx.getSharedPreferences(PREF_NAME, Context.MODE_PRIVATE);
Editor e = sp.edit();
e.putString(KEY_PASSCODE, hashForVerification(passcode));
e.commit();
}
/**
* @param ctx
* @return true if passcode was already created
*/
public boolean hasStoredPasscode(Context ctx) {
SharedPreferences sp = ctx.getSharedPreferences(PREF_NAME, Context.MODE_PRIVATE);
return sp.contains(KEY_PASSCODE);
}
/**
* @return number of failed passcode attempts
*/
public int getFailedPasscodeAttempts() {
return failedPasscodeAttempts;
}
/**
* @return a hash of the passcode that can be used for encrypting oauth tokens
*/
public String getPasscodeHash() {
return passcodeHash;
}
/**
* @return true if locked
*/
public boolean isLocked() {
return timeoutMs > 0 && locked;
}
/**
* @param ctx
*/
public void lock(Context ctx) {
locked = true;
showLockActivity(ctx);
EventsObservable.get().notifyEvent(EventType.AppLocked);
}
/**
* @param newFrontActivity
* @param registerActivity
* @return
*/
public boolean lockIfNeeded(Activity newFrontActivity, boolean registerActivity) {
if (newFrontActivity != null)
frontActivity = newFrontActivity;
if (isEnabled() && (isLocked() || shouldLock())) {
lock(frontActivity);
return true;
} else {
if (registerActivity) updateLast();
return false;
}
}
/**
* @param a
*/
public void nolongerFrontActivity(Activity a) {
if (frontActivity == a)
frontActivity = null;
}
/**
* To be called by passcode protected activity when being paused
*/
public void onPause(Activity ctx) {
// Disable passcode manager
setEnabled(false);
}
/**
* To be called by passcode protected activity when being resumed
* When passcode screen is about to be shown, false is returned, the activity will be resumed once
* the user has successfully enter her passcode
*
* @return true if the resume should be allowed to continue and false otherwise
*/
public boolean onResume(Activity ctx) {
// Enable passcode manager
setEnabled(true);
// Bring up passcode screen if needed
lockIfNeeded(ctx, true);
// If locked, do nothing - when the app gets unlocked we will be back here
return !isLocked();
}
/**
* To be called by passcode protected activity whenever there is a user interaction
*/
public void recordUserInteraction() {
updateLast();
}
/**
* Called when the access timeout for the org changes.
*
* @param newTimeout New access timeout value.
*/
public void setTimeoutMs(int newTimeout) {
// Access timeout hasn't changed.
if (timeoutMs == newTimeout) {
return;
}
/*
* Either access timeout has changed from one non-zero value to another,
* which doesn't alter the passcode situation, or the app goes from
* no passcode to passcode, which will trigger the passcode creation flow.
*/
if (timeoutMs == 0 || (timeoutMs > 0 && newTimeout > 0)) {
timeoutMs = newTimeout;
storeMobilePolicy(SalesforceSDKManager.getInstance().getAppContext());
return;
}
// Passcode to no passcode.
timeoutMs = newTimeout;
SalesforceSDKManager.getInstance().changePasscode(passcodeHash, null);
reset(SalesforceSDKManager.getInstance().getAppContext());
}
public int getTimeoutMs() {
return timeoutMs;
}
public int getMinPasscodeLength() {
return minPasscodeLength;
}
public void setMinPasscodeLength(int minPasscodeLength) {
this.minPasscodeLength = minPasscodeLength;
}
public boolean shouldLock() {
return timeoutMs > 0 && now() >= (lastActivity + timeoutMs);
}
public void showLockActivity(Context ctx) {
if (ctx == null) {
return;
}
Intent i = new Intent(ctx, PasscodeActivity.class);
i.addFlags(Intent.FLAG_ACTIVITY_SINGLE_TOP);
i.addFlags(Intent.FLAG_ACTIVITY_NO_HISTORY);
i.addFlags(Intent.FLAG_ACTIVITY_REORDER_TO_FRONT);
if (ctx == SalesforceSDKManager.getInstance().getAppContext()) {
i.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
}
if (ctx instanceof Activity) {
((Activity) ctx).startActivityForResult(i, PASSCODE_REQUEST_CODE);
} else {
ctx.startActivity(i);
}
}
public void unlock(String passcode) {
locked = false;
failedPasscodeAttempts = 0;
passcodeHash = hashForEncryption(passcode);
updateLast();
EventsObservable.get().notifyEvent(EventType.AppUnlocked);
}
protected long now() {
return System.currentTimeMillis();
}
private void updateLast() {
lastActivity = now();
}
public String hashForVerification(String passcode) {
return hash(passcode, verificationHashConfig);
}
public String hashForEncryption(String passcode) {
return hash(passcode, encryptionHashConfig);
}
private String hash(String passcode, HashConfig hashConfig) {
return Encryptor.hash(hashConfig.prefix + passcode + hashConfig.suffix, hashConfig.key);
}
/**
* Thread checking periodically to see how much has elapsed since the last recorded activity
* When that elapsed time exceed timeoutMs, it locks the app
*/
private class LockChecker implements Runnable {
public void run() {
try {
if (isEnabled()) {
Log.d("LockChecker:run", "isLocked:" + locked + " elapsedSinceLastActivity:" + ((now() - lastActivity)/1000) + " timeout:" + (timeoutMs / 1000));
}
if (!locked)
lockIfNeeded(null, false);
} finally {
if (handler != null) {
handler.postDelayed(this, 20 * 1000);
}
}
}
}
/**
* Key for hashing and salts to be preprended and appended to data to increase entropy.
*/
public static class HashConfig {
public final String prefix;
public final String suffix;
public final String key;
public HashConfig(String prefix, String suffix, String key) {
this.prefix = prefix;
this.suffix = suffix;
this.key = key;
}
}
}
| 1 | 13,766 | Also found this bug, where we were setting the min passcode length to 6, but the min length for a connected app is 4. This can cause problems when the app is force closed or the shared pref is removed. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -175,7 +175,7 @@ class FastTemporalMemory(TemporalMemory):
"""
self._validateCell(cell)
- return int(cell.idx / self.cellsPerColumn)
+ return int(cell / self.cellsPerColumn)
def cellsForColumn(self, column): | 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Temporal Memory implementation in Python.
"""
from nupic.research.temporal_memory import TemporalMemory
from nupic.bindings.algorithms import Connections, ConnectionsCell
class FastTemporalMemory(TemporalMemory):
"""
Class implementing the Temporal Memory algorithm.
Uses C++ Connections data structure for optimization.
"""
def __init__(self, *args, **kwargs):
maxSegmentsPerCell = kwargs.get("maxSegmentsPerCell", 255)
maxSynapsesPerSegment = kwargs.get("maxSynapsesPerSegment", 255)
super(FastTemporalMemory, self).__init__(*args, **kwargs)
self.connections = Connections(
self.numberOfCells(),
maxSegmentsPerCell=maxSegmentsPerCell,
maxSynapsesPerSegment=maxSynapsesPerSegment)
def burstColumns(self,
activeColumns,
predictedColumns,
prevActiveCells,
prevWinnerCells,
connections):
"""
Phase 2: Burst unpredicted columns.
Pseudocode:
- for each unpredicted active column
- mark all cells as active
- mark the best matching cell as winner cell
- (learning)
- if it has no matching segment
- (optimization) if there are prev winner cells
- add a segment to it
- mark the segment as learning
@param activeColumns (set) Indices of active columns in `t`
@param predictedColumns (set) Indices of predicted columns in `t`
@param prevActiveCells (set) Indices of active cells in `t-1`
@param prevWinnerCells (set) Indices of winner cells in `t-1`
@param connections (Connections) Connectivity of layer
@return (tuple) Contains:
`activeCells` (set),
`winnerCells` (set),
`learningSegments` (set)
"""
activeCells = set()
winnerCells = set()
learningSegments = set()
unpredictedColumns = activeColumns - predictedColumns
for column in unpredictedColumns:
cells = self.cellsForColumn(column)
activeCells.update(cells)
bestSegment = connections.mostActiveSegmentForCells(
list(cells), list(prevActiveCells), self.minThreshold)
if bestSegment is None:
bestCell = self.leastUsedCell(cells, connections)
if len(prevWinnerCells):
bestSegment = connections.createSegment(bestCell)
else:
# TODO: For some reason, bestSegment.cell is garbage-collected after
# this function returns. So we have to use the below hack. Figure out
# why and clean up.
bestCell = ConnectionsCell(bestSegment.cell.idx)
winnerCells.add(bestCell)
if bestSegment:
learningSegments.add(bestSegment)
return activeCells, winnerCells, learningSegments
def computePredictiveCells(self, activeCells, connections):
"""
Phase 4: Compute predictive cells due to lateral input
on distal dendrites.
Pseudocode:
- for each distal dendrite segment with activity >= activationThreshold
- mark the segment as active
- mark the cell as predictive
- for each distal dendrite segment with unconnected
activity >= minThreshold
- mark the segment as matching
- mark the cell as matching
Forward propagates activity from active cells to the synapses that touch
them, to determine which synapses are active.
@param activeCells (set) Indices of active cells in `t`
@param connections (Connections) Connectivity of layer
@return (tuple) Contains:
`activeSegments` (set),
`predictiveCells` (set),
`matchingSegments` (set),
`matchingCells` (set)
"""
activity = connections.computeActivity(list(activeCells),
self.connectedPermanence,
self.activationThreshold)
activeSegments = set(connections.activeSegments(activity))
predictiveCells = set(connections.activeCells(activity))
if self.predictedSegmentDecrement > 0:
activity = connections.computeActivity(list(activeCells),
0,
self.minThreshold)
matchingSegments = set(connections.activeSegments(activity))
matchingCells = set(connections.activeCells(activity))
else:
matchingSegments = set()
matchingCells = set()
return activeSegments, predictiveCells, matchingSegments, matchingCells
@staticmethod
def getCellIndex(cell):
return cell.idx
# ==============================
# Helper functions
# ==============================
def columnForCell(self, cell):
"""
Returns the index of the column that a cell belongs to.
@param cell (int) Cell index
@return (int) Column index
"""
self._validateCell(cell)
return int(cell.idx / self.cellsPerColumn)
def cellsForColumn(self, column):
"""
Returns the indices of cells that belong to a column.
@param column (int) Column index
@return (set) Cell indices
"""
self._validateColumn(column)
start = self.cellsPerColumn * column
end = start + self.cellsPerColumn
return set([ConnectionsCell(idx) for idx in xrange(start, end)])
def _validateCell(self, cell):
"""
Raises an error if cell index is invalid.
@param cell (int) Cell index
"""
if cell.idx >= self.numberOfCells() or cell.idx < 0:
raise IndexError("Invalid cell")
| 1 | 20,522 | Why was this change necessary? | numenta-nupic | py |
@@ -55,7 +55,7 @@ import java.util.logging.Logger;
class Host {
- private static final Logger LOG = Logger.getLogger("Selenium Distributor");
+ private static final Logger LOG = Logger.getLogger("Selenium Host");
private final Node node;
private final UUID nodeId;
private final URI uri; | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.distributor.local;
import static java.util.stream.Collectors.groupingBy;
import static java.util.stream.Collectors.summingInt;
import static org.openqa.selenium.grid.data.SessionClosedEvent.SESSION_CLOSED;
import static org.openqa.selenium.grid.distributor.local.Host.Status.DOWN;
import static org.openqa.selenium.grid.distributor.local.Host.Status.DRAINING;
import static org.openqa.selenium.grid.distributor.local.Host.Status.UP;
import static org.openqa.selenium.grid.distributor.local.Slot.Status.ACTIVE;
import static org.openqa.selenium.grid.distributor.local.Slot.Status.AVAILABLE;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.SessionNotCreatedException;
import org.openqa.selenium.events.EventBus;
import org.openqa.selenium.grid.component.HealthCheck;
import org.openqa.selenium.grid.data.CreateSessionRequest;
import org.openqa.selenium.grid.data.CreateSessionResponse;
import org.openqa.selenium.grid.data.DistributorStatus;
import org.openqa.selenium.grid.data.NodeStatus;
import org.openqa.selenium.grid.node.Node;
import org.openqa.selenium.remote.SessionId;
import java.net.URI;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Supplier;
import java.util.logging.Logger;
class Host {
private static final Logger LOG = Logger.getLogger("Selenium Distributor");
private final Node node;
private final UUID nodeId;
private final URI uri;
private final Runnable performHealthCheck;
// Used any time we need to read or modify the mutable state of this host
private final ReadWriteLock lock = new ReentrantReadWriteLock(/* fair */ true);
private Status status;
private List<Slot> slots;
private int maxSessionCount;
public Host(EventBus bus, Node node) {
this.node = Objects.requireNonNull(node);
this.nodeId = node.getId();
this.uri = node.getUri();
this.status = Status.DOWN;
this.slots = ImmutableList.of();
HealthCheck healthCheck = node.getHealthCheck();
this.performHealthCheck = () -> {
HealthCheck.Result result = healthCheck.check();
Host.Status current = result.isAlive() ? UP : DOWN;
Host.Status previous = setHostStatus(current);
if (previous == DRAINING) {
// We want to continue to allow the node to drain.
setHostStatus(DRAINING);
return;
}
if (current != previous) {
LOG.info(String.format(
"Changing status of node %s from %s to %s. Reason: %s",
node.getId(),
previous,
current,
result.getMessage()));
}
};
bus.addListener(SESSION_CLOSED, event -> {
SessionId id = event.getData(SessionId.class);
this.slots.forEach(slot -> slot.onEnd(id));
});
update(node.getStatus());
}
void update(NodeStatus status) {
Objects.requireNonNull(status);
Lock writeLock = lock.writeLock();
writeLock.lock();
try {
// This is grossly inefficient. But we're on a modern processor and we're expecting 10s to 100s
// of nodes, so this is probably ok.
Set<NodeStatus.Active> sessions = status.getCurrentSessions();
Map<Capabilities, Integer> actives = sessions.parallelStream().collect(
groupingBy(NodeStatus.Active::getStereotype, summingInt(active -> 1)));
ImmutableList.Builder<Slot> slots = ImmutableList.builder();
status.getStereotypes().forEach((caps, count) -> {
if (actives.containsKey(caps)) {
Integer activeCount = actives.get(caps);
for (int i = 0; i < activeCount; i++) {
slots.add(new Slot(node, caps, ACTIVE));
}
count -= activeCount;
}
for (int i = 0; i < count; i++) {
slots.add(new Slot(node, caps, AVAILABLE));
}
});
this.slots = slots.build();
// By definition, we can never have more sessions than we have slots available
this.maxSessionCount = Math.min(this.slots.size(), status.getMaxSessionCount());
} finally {
writeLock.unlock();
}
}
public UUID getId() {
return nodeId;
}
public DistributorStatus.NodeSummary asSummary() {
Map<Capabilities, Integer> stereotypes = new HashMap<>();
Map<Capabilities, Integer> used = new HashMap<>();
slots.forEach(slot -> {
stereotypes.compute(slot.getStereotype(), (key, curr) -> curr == null ? 1 : curr + 1);
if (slot.getStatus() != AVAILABLE) {
used.compute(slot.getStereotype(), (key, curr) -> curr == null ? 1 : curr + 1);
}
});
return new DistributorStatus.NodeSummary(
nodeId,
uri,
getHostStatus() == UP,
maxSessionCount,
stereotypes,
used);
}
public Status getHostStatus() {
return status;
}
/**
* @return The previous status of the node.
*/
private Status setHostStatus(Status status) {
Status toReturn = this.status;
this.status = Objects.requireNonNull(status, "Status must be set.");
return toReturn;
}
public boolean hasCapacity(Capabilities caps) {
Lock read = lock.readLock();
read.lock();
try {
long count = slots.stream()
.filter(slot -> slot.isSupporting(caps))
.filter(slot -> slot.getStatus() == AVAILABLE)
.count();
return count > 0;
} finally {
read.unlock();
}
}
public float getLoad() {
Lock read = lock.readLock();
read.lock();
try {
float inUse = slots.parallelStream()
.filter(slot -> slot.getStatus() != AVAILABLE)
.count();
return (inUse / (float) maxSessionCount) * 100f;
} finally {
read.unlock();
}
}
public long getLastSessionCreated() {
Lock read = lock.readLock();
read.lock();
try {
return slots.parallelStream()
.mapToLong(Slot::getLastSessionCreated)
.max()
.orElse(0);
} finally {
read.unlock();
}
}
public Supplier<CreateSessionResponse> reserve(CreateSessionRequest sessionRequest) {
Objects.requireNonNull(sessionRequest);
Lock write = lock.writeLock();
write.lock();
try {
Slot toReturn = slots.stream()
.filter(slot -> slot.isSupporting(sessionRequest.getCapabilities()))
.filter(slot -> slot.getStatus() == AVAILABLE)
.findFirst()
.orElseThrow(() -> new SessionNotCreatedException("Unable to reserve an instance"));
return toReturn.onReserve(sessionRequest);
} finally {
write.unlock();
}
}
@VisibleForTesting
void runHealthCheck() {
performHealthCheck.run();
}
@Override
public int hashCode() {
return Objects.hash(nodeId, uri);
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof Host)) {
return false;
}
Host that = (Host) obj;
return this.node.equals(that.node);
}
public enum Status {
UP,
DRAINING,
DOWN,
}
}
| 1 | 16,860 | I kind of did this deliberately, so everything from the distributor appears in the same set of logs. | SeleniumHQ-selenium | java |
@@ -300,6 +300,7 @@ class TCPSession(IPSession):
return pkt
metadata["pay_class"] = pay_class
metadata["tcp_reassemble"] = tcp_reassemble
+ metadata["seq"] = pkt[TCP].seq
else:
tcp_reassemble = metadata["tcp_reassemble"]
# Get a relative sequence number for a storage purpose | 1 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <[email protected]>
# This program is published under a GPLv2 license
"""
Sessions: decode flow of packets when sniffing
"""
from collections import defaultdict
from scapy.compat import raw
from scapy.config import conf
from scapy.packet import NoPayload, Packet
from scapy.plist import PacketList
# Typing imports
from scapy.compat import (
Any,
Callable,
DefaultDict,
Dict,
List,
Optional,
Tuple,
cast
)
class DefaultSession(object):
"""Default session: no stream decoding"""
def __init__(
self,
prn=None, # type: Optional[Callable[[Packet], Any]]
store=False, # type: bool
supersession=None, # type: Optional[DefaultSession]
*args, # type: Any
**karg # type: Any
):
# type: (...) -> None
self.__prn = prn
self.__store = store
self.lst = [] # type: List[Packet]
self.__count = 0
self._supersession = supersession
if self._supersession:
self._supersession.prn = self.__prn
self._supersession.store = self.__store
self.__store = False
self.__prn = None
@property
def store(self):
# type: () -> bool
return self.__store
@store.setter
def store(self, val):
# type: (bool) -> None
if self._supersession:
self._supersession.store = val
else:
self.__store = val
@property
def prn(self):
# type: () -> Optional[Callable[[Packet], Any]]
return self.__prn
@prn.setter
def prn(self, f):
# type: (Optional[Any]) -> None
if self._supersession:
self._supersession.prn = f
else:
self.__prn = f
@property
def count(self):
# type: () -> int
if self._supersession:
return self._supersession.count
else:
return self.__count
def toPacketList(self):
# type: () -> PacketList
if self._supersession:
return PacketList(self._supersession.lst, "Sniffed")
else:
return PacketList(self.lst, "Sniffed")
def on_packet_received(self, pkt):
# type: (Optional[Packet]) -> None
"""DEV: entry point. Will be called by sniff() for each
received packet (that passes the filters).
"""
if not pkt:
return
if not isinstance(pkt, Packet):
raise TypeError("Only provide a Packet.")
self.__count += 1
if self.store:
self.lst.append(pkt)
if self.prn:
result = self.prn(pkt)
if result is not None:
print(result)
class IPSession(DefaultSession):
"""Defragment IP packets 'on-the-flow'.
Usage:
>>> sniff(session=IPSession)
"""
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
DefaultSession.__init__(self, *args, **kwargs)
self.fragments = defaultdict(list) # type: DefaultDict[Tuple[Any, ...], List[Packet]] # noqa: E501
def _ip_process_packet(self, packet):
# type: (Packet) -> Optional[Packet]
from scapy.layers.inet import _defrag_list, IP
if IP not in packet:
return packet
ip = packet[IP]
packet._defrag_pos = 0
if ip.frag != 0 or ip.flags.MF:
uniq = (ip.id, ip.src, ip.dst, ip.proto)
self.fragments[uniq].append(packet)
if not ip.flags.MF: # end of frag
try:
if self.fragments[uniq][0].frag == 0:
# Has first fragment (otherwise ignore)
defrag = [] # type: List[Packet]
_defrag_list(self.fragments[uniq], defrag, [])
defragmented_packet = defrag[0]
defragmented_packet = defragmented_packet.__class__(
raw(defragmented_packet)
)
return defragmented_packet
finally:
del self.fragments[uniq]
return None
else:
return packet
def on_packet_received(self, pkt):
# type: (Optional[Packet]) -> None
if not pkt:
return None
super(IPSession, self).on_packet_received(self._ip_process_packet(pkt))
class StringBuffer(object):
"""StringBuffer is an object used to re-order data received during
a TCP transmission.
Each TCP fragment contains a sequence number, which marks
(relatively to the first sequence number) the index of the data contained
in the fragment.
If a TCP fragment is missed, this class will fill the missing space with
zeros.
"""
def __init__(self):
# type: () -> None
self.content = bytearray(b"")
self.content_len = 0
self.incomplete = [] # type: List[Tuple[int, int]]
def append(self, data, seq):
# type: (bytes, int) -> None
data_len = len(data)
seq = seq - 1
if seq + data_len > self.content_len:
self.content += b"\x00" * (seq - self.content_len + data_len)
# If data was missing, mark it.
self.incomplete.append((self.content_len, seq))
self.content_len = seq + data_len
assert len(self.content) == self.content_len
# XXX removes empty space marker.
# for ifrag in self.incomplete:
# if [???]:
# self.incomplete.remove([???])
memoryview(self.content)[seq:seq + data_len] = data # type: ignore
def full(self):
# type: () -> bool
# Should only be true when all missing data was filled up,
# (or there never was missing data)
return True # XXX
def clear(self):
# type: () -> None
self.__init__() # type: ignore
def __bool__(self):
# type: () -> bool
return bool(self.content_len)
__nonzero__ = __bool__
def __len__(self):
# type: () -> int
return self.content_len
def __bytes__(self):
# type: () -> bytes
return bytes(self.content)
def __str__(self):
# type: () -> str
return cast(str, self.__bytes__())
class TCPSession(IPSession):
"""A Session that matches seq/ack packets together to dissect
special protocols, such as HTTP.
DEV: implement a class-function `tcp_reassemble` in your Packet class::
@classmethod
def tcp_reassemble(cls, data, metadata):
# data = the reassembled data from the same request/flow
# metadata = empty dictionary, that can be used to store data
[...]
# If the packet is available, return it. Otherwise don't.
# Whenever you return a packet, the buffer will be discarded.
return pkt
# Otherwise, maybe store stuff in metadata, and return None,
# as you need additional data.
return None
For more details and a real example, see:
https://scapy.readthedocs.io/en/latest/usage.html#how-to-use-tcpsession-to-defragment-tcp-packets
:param app: Whether the socket is on application layer = has no TCP
layer. This is used for instance if you are using a native
TCP socket. Default to False
"""
fmt = ('TCP {IP:%IP.src%}{IPv6:%IPv6.src%}:%r,TCP.sport% > ' +
'{IP:%IP.dst%}{IPv6:%IPv6.dst%}:%r,TCP.dport%')
def __init__(self, app=False, *args, **kwargs):
# type: (bool, *Any, **Any) -> None
super(TCPSession, self).__init__(*args, **kwargs)
self.app = app
if app:
self.data = b""
self.metadata = {} # type: Dict[str, Any]
else:
# The StringBuffer() is used to build a global
# string from fragments and their seq nulber
self.tcp_frags = defaultdict(
lambda: (StringBuffer(), {})
) # type: DefaultDict[str, Tuple[StringBuffer, Dict[str, Any]]]
def _process_packet(self, pkt):
# type: (Packet) -> Optional[Packet]
"""Process each packet: matches the TCP seq/ack numbers
to follow the TCP streams, and orders the fragments.
"""
if self.app:
# Special mode: Application layer. Use on top of TCP
pay_class = pkt.__class__
if not hasattr(pay_class, "tcp_reassemble"):
# Being on top of TCP, we have no way of knowing
# when a packet ends.
return pkt
self.data += bytes(pkt)
pkt = pay_class.tcp_reassemble(self.data, self.metadata)
if pkt:
self.data = b""
self.metadata = {}
return pkt
return None
from scapy.layers.inet import IP, TCP
if not pkt or TCP not in pkt:
return pkt
pay = pkt[TCP].payload
if isinstance(pay, (NoPayload, conf.padding_layer)):
return pkt
new_data = pay.original
# Match packets by a uniqute TCP identifier
seq = pkt[TCP].seq
ident = pkt.sprintf(self.fmt)
data, metadata = self.tcp_frags[ident]
# Let's guess which class is going to be used
if "pay_class" not in metadata:
pay_class = pay.__class__
if hasattr(pay_class, "tcp_reassemble"):
tcp_reassemble = pay_class.tcp_reassemble
else:
# We can't know for sure when a packet ends.
# Ignore.
return pkt
metadata["pay_class"] = pay_class
metadata["tcp_reassemble"] = tcp_reassemble
else:
tcp_reassemble = metadata["tcp_reassemble"]
# Get a relative sequence number for a storage purpose
relative_seq = metadata.get("relative_seq", None)
if relative_seq is None:
relative_seq = metadata["relative_seq"] = seq - 1
seq = seq - relative_seq
# Add the data to the buffer
# Note that this take care of retransmission packets.
data.append(new_data, seq)
# Check TCP FIN or TCP RESET
if pkt[TCP].flags.F or pkt[TCP].flags.R:
metadata["tcp_end"] = True
# In case any app layer protocol requires it,
# allow the parser to inspect TCP PSH flag
if pkt[TCP].flags.P:
metadata["tcp_psh"] = True
# XXX TODO: check that no empty space is missing in the buffer.
# XXX Currently, if a TCP fragment was missing, we won't notice it.
packet = None # type: Optional[Packet]
if data.full():
# Reassemble using all previous packets
packet = tcp_reassemble(bytes(data), metadata)
# Stack the result on top of the previous frames
if packet:
data.clear()
metadata.clear()
del self.tcp_frags[ident]
pay.underlayer.remove_payload()
if IP in pkt:
pkt[IP].len = None
pkt[IP].chksum = None
return pkt / packet
return None
def on_packet_received(self, pkt):
# type: (Optional[Packet]) -> None
"""Hook to the Sessions API: entry point of the dissection.
This will defragment IP if necessary, then process to
TCP reassembly.
"""
if not pkt:
return None
# First, defragment IP if necessary
pkt = self._ip_process_packet(pkt)
if not pkt:
return None
# Now handle TCP reassembly
pkt = self._process_packet(pkt)
DefaultSession.on_packet_received(self, pkt)
| 1 | 19,533 | We already have a `seq = pkt[TCP].seq` so you can just re-use it. | secdev-scapy | py |
@@ -22,11 +22,8 @@ package org.apache.iceberg.data.parquet;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
-import java.time.Instant;
-import java.time.LocalDate;
-import java.time.LocalDateTime;
-import java.time.OffsetDateTime;
-import java.time.ZoneOffset;
+
+import java.time.*;
import java.time.temporal.ChronoUnit;
import java.util.Iterator;
import java.util.List; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.data.parquet;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import java.time.Instant;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import java.time.temporal.ChronoUnit;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.iceberg.Schema;
import org.apache.iceberg.data.GenericRecord;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.parquet.ParquetSchemaUtil;
import org.apache.iceberg.parquet.ParquetValueReader;
import org.apache.iceberg.parquet.ParquetValueReaders;
import org.apache.iceberg.parquet.ParquetValueReaders.BinaryAsDecimalReader;
import org.apache.iceberg.parquet.ParquetValueReaders.BytesReader;
import org.apache.iceberg.parquet.ParquetValueReaders.IntAsLongReader;
import org.apache.iceberg.parquet.ParquetValueReaders.IntegerAsDecimalReader;
import org.apache.iceberg.parquet.ParquetValueReaders.ListReader;
import org.apache.iceberg.parquet.ParquetValueReaders.LongAsDecimalReader;
import org.apache.iceberg.parquet.ParquetValueReaders.MapReader;
import org.apache.iceberg.parquet.ParquetValueReaders.PrimitiveReader;
import org.apache.iceberg.parquet.ParquetValueReaders.StringReader;
import org.apache.iceberg.parquet.ParquetValueReaders.StructReader;
import org.apache.iceberg.parquet.ParquetValueReaders.UnboxedReader;
import org.apache.iceberg.parquet.TypeWithSchemaVisitor;
import org.apache.iceberg.types.Type.TypeID;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.types.Types.StructType;
import org.apache.iceberg.types.Types.TimestampType;
import org.apache.parquet.column.ColumnDescriptor;
import org.apache.parquet.schema.DecimalMetadata;
import org.apache.parquet.schema.GroupType;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Type;
public class GenericParquetReaders {
private GenericParquetReaders() {
}
@SuppressWarnings("unchecked")
public static ParquetValueReader<GenericRecord> buildReader(Schema expectedSchema,
MessageType fileSchema) {
if (ParquetSchemaUtil.hasIds(fileSchema)) {
return (ParquetValueReader<GenericRecord>)
TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema,
new ReadBuilder(fileSchema));
} else {
return (ParquetValueReader<GenericRecord>)
TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema,
new FallbackReadBuilder(fileSchema));
}
}
private static class FallbackReadBuilder extends ReadBuilder {
FallbackReadBuilder(MessageType type) {
super(type);
}
@Override
public ParquetValueReader<?> message(StructType expected, MessageType message,
List<ParquetValueReader<?>> fieldReaders) {
// the top level matches by ID, but the remaining IDs are missing
return super.struct(expected, message, fieldReaders);
}
@Override
public ParquetValueReader<?> struct(StructType expected, GroupType struct,
List<ParquetValueReader<?>> fieldReaders) {
// the expected struct is ignored because nested fields are never found when the
List<ParquetValueReader<?>> newFields = Lists.newArrayListWithExpectedSize(
fieldReaders.size());
List<Type> types = Lists.newArrayListWithExpectedSize(fieldReaders.size());
List<Type> fields = struct.getFields();
for (int i = 0; i < fields.size(); i += 1) {
Type fieldType = fields.get(i);
int fieldD = type().getMaxDefinitionLevel(path(fieldType.getName())) - 1;
newFields.add(ParquetValueReaders.option(fieldType, fieldD, fieldReaders.get(i)));
types.add(fieldType);
}
return new RecordReader(types, newFields, expected);
}
}
private static class ReadBuilder extends TypeWithSchemaVisitor<ParquetValueReader<?>> {
private final MessageType type;
ReadBuilder(MessageType type) {
this.type = type;
}
@Override
public ParquetValueReader<?> message(StructType expected, MessageType message,
List<ParquetValueReader<?>> fieldReaders) {
return struct(expected, message.asGroupType(), fieldReaders);
}
@Override
public ParquetValueReader<?> struct(StructType expected, GroupType struct,
List<ParquetValueReader<?>> fieldReaders) {
// match the expected struct's order
Map<Integer, ParquetValueReader<?>> readersById = Maps.newHashMap();
Map<Integer, Type> typesById = Maps.newHashMap();
List<Type> fields = struct.getFields();
for (int i = 0; i < fields.size(); i += 1) {
Type fieldType = fields.get(i);
int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName())) - 1;
int id = fieldType.getId().intValue();
readersById.put(id, ParquetValueReaders.option(fieldType, fieldD, fieldReaders.get(i)));
typesById.put(id, fieldType);
}
List<Types.NestedField> expectedFields = expected != null ?
expected.fields() : ImmutableList.of();
List<ParquetValueReader<?>> reorderedFields = Lists.newArrayListWithExpectedSize(
expectedFields.size());
List<Type> types = Lists.newArrayListWithExpectedSize(expectedFields.size());
for (Types.NestedField field : expectedFields) {
int id = field.fieldId();
ParquetValueReader<?> reader = readersById.get(id);
if (reader != null) {
reorderedFields.add(reader);
types.add(typesById.get(id));
} else {
reorderedFields.add(ParquetValueReaders.nulls());
types.add(null);
}
}
return new RecordReader(types, reorderedFields, expected);
}
@Override
public ParquetValueReader<?> list(Types.ListType expectedList, GroupType array,
ParquetValueReader<?> elementReader) {
GroupType repeated = array.getFields().get(0).asGroupType();
String[] repeatedPath = currentPath();
int repeatedD = type.getMaxDefinitionLevel(repeatedPath) - 1;
int repeatedR = type.getMaxRepetitionLevel(repeatedPath) - 1;
Type elementType = repeated.getType(0);
int elementD = type.getMaxDefinitionLevel(path(elementType.getName())) - 1;
return new ListReader<>(repeatedD, repeatedR, ParquetValueReaders.option(elementType, elementD, elementReader));
}
@Override
public ParquetValueReader<?> map(Types.MapType expectedMap, GroupType map,
ParquetValueReader<?> keyReader,
ParquetValueReader<?> valueReader) {
GroupType repeatedKeyValue = map.getFields().get(0).asGroupType();
String[] repeatedPath = currentPath();
int repeatedD = type.getMaxDefinitionLevel(repeatedPath) - 1;
int repeatedR = type.getMaxRepetitionLevel(repeatedPath) - 1;
Type keyType = repeatedKeyValue.getType(0);
int keyD = type.getMaxDefinitionLevel(path(keyType.getName())) - 1;
Type valueType = repeatedKeyValue.getType(1);
int valueD = type.getMaxDefinitionLevel(path(valueType.getName())) - 1;
return new MapReader<>(repeatedD, repeatedR,
ParquetValueReaders.option(keyType, keyD, keyReader),
ParquetValueReaders.option(valueType, valueD, valueReader));
}
@Override
public ParquetValueReader<?> primitive(org.apache.iceberg.types.Type.PrimitiveType expected,
PrimitiveType primitive) {
ColumnDescriptor desc = type.getColumnDescription(currentPath());
if (primitive.getOriginalType() != null) {
switch (primitive.getOriginalType()) {
case ENUM:
case JSON:
case UTF8:
return new StringReader(desc);
case INT_8:
case INT_16:
case INT_32:
if (expected.typeId() == TypeID.LONG) {
return new IntAsLongReader(desc);
} else {
return new UnboxedReader<>(desc);
}
case INT_64:
return new UnboxedReader<>(desc);
case DATE:
return new DateReader(desc);
case TIMESTAMP_MICROS:
TimestampType tsMicrosType = (TimestampType) expected;
if (tsMicrosType.shouldAdjustToUTC()) {
return new TimestamptzReader(desc);
} else {
return new TimestampReader(desc);
}
case TIMESTAMP_MILLIS:
TimestampType tsMillisType = (TimestampType) expected;
if (tsMillisType.shouldAdjustToUTC()) {
return new TimestamptzMillisReader(desc);
} else {
return new TimestampMillisReader(desc);
}
case DECIMAL:
DecimalMetadata decimal = primitive.getDecimalMetadata();
switch (primitive.getPrimitiveTypeName()) {
case BINARY:
case FIXED_LEN_BYTE_ARRAY:
return new BinaryAsDecimalReader(desc, decimal.getScale());
case INT64:
return new LongAsDecimalReader(desc, decimal.getScale());
case INT32:
return new IntegerAsDecimalReader(desc, decimal.getScale());
default:
throw new UnsupportedOperationException(
"Unsupported base type for decimal: " + primitive.getPrimitiveTypeName());
}
case BSON:
return new BytesReader(desc);
default:
throw new UnsupportedOperationException(
"Unsupported logical type: " + primitive.getOriginalType());
}
}
switch (primitive.getPrimitiveTypeName()) {
case FIXED_LEN_BYTE_ARRAY:
return new FixedReader(desc);
case BINARY:
return new BytesReader(desc);
case INT32:
if (expected != null && expected.typeId() == TypeID.LONG) {
return new IntAsLongReader(desc);
} else {
return new UnboxedReader<>(desc);
}
case FLOAT:
if (expected != null && expected.typeId() == TypeID.DOUBLE) {
return new ParquetValueReaders.FloatAsDoubleReader(desc);
} else {
return new UnboxedReader<>(desc);
}
case BOOLEAN:
case INT64:
case DOUBLE:
return new UnboxedReader<>(desc);
default:
throw new UnsupportedOperationException("Unsupported type: " + primitive);
}
}
MessageType type() {
return type;
}
private String[] currentPath() {
String[] path = new String[fieldNames.size()];
if (!fieldNames.isEmpty()) {
Iterator<String> iter = fieldNames.descendingIterator();
for (int i = 0; iter.hasNext(); i += 1) {
path[i] = iter.next();
}
}
return path;
}
protected String[] path(String name) {
String[] path = new String[fieldNames.size() + 1];
path[fieldNames.size()] = name;
if (!fieldNames.isEmpty()) {
Iterator<String> iter = fieldNames.descendingIterator();
for (int i = 0; iter.hasNext(); i += 1) {
path[i] = iter.next();
}
}
return path;
}
}
private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC);
private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate();
private static class DateReader extends PrimitiveReader<LocalDate> {
private DateReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public LocalDate read(LocalDate reuse) {
return EPOCH_DAY.plusDays(column.nextInteger());
}
}
private static class TimestampReader extends PrimitiveReader<LocalDateTime> {
private TimestampReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public LocalDateTime read(LocalDateTime reuse) {
return EPOCH.plus(column.nextLong(), ChronoUnit.MICROS).toLocalDateTime();
}
}
private static class TimestampMillisReader extends PrimitiveReader<LocalDateTime> {
private TimestampMillisReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public LocalDateTime read(LocalDateTime reuse) {
return EPOCH.plus(column.nextLong() * 1000, ChronoUnit.MICROS).toLocalDateTime();
}
}
private static class TimestamptzReader extends PrimitiveReader<OffsetDateTime> {
private TimestamptzReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public OffsetDateTime read(OffsetDateTime reuse) {
return EPOCH.plus(column.nextLong(), ChronoUnit.MICROS);
}
}
private static class TimestamptzMillisReader extends PrimitiveReader<OffsetDateTime> {
private TimestamptzMillisReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public OffsetDateTime read(OffsetDateTime reuse) {
return EPOCH.plus(column.nextLong() * 1000, ChronoUnit.MICROS);
}
}
private static class FixedReader extends PrimitiveReader<byte[]> {
private FixedReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public byte[] read(byte[] reuse) {
if (reuse != null) {
column.nextBinary().toByteBuffer().duplicate().get(reuse);
return reuse;
} else {
return column.nextBinary().getBytes();
}
}
}
static class RecordReader extends StructReader<Record, Record> {
private final StructType structType;
RecordReader(List<Type> types,
List<ParquetValueReader<?>> readers,
StructType struct) {
super(types, readers);
this.structType = struct;
}
@Override
protected Record newStructData(Record reuse) {
if (reuse != null) {
return reuse;
} else {
return GenericRecord.create(structType);
}
}
@Override
@SuppressWarnings("unchecked")
protected Object getField(Record intermediate, int pos) {
return intermediate.get(pos);
}
@Override
protected Record buildStruct(Record struct) {
return struct;
}
@Override
protected void set(Record struct, int pos, Object value) {
struct.set(pos, value);
}
}
}
| 1 | 15,607 | We avoid wildcard imports because it isn't clear where symbols are coming from and there is potential for collision. Could you roll back this change? (As well as the additional newline, we don't use breaks in imports either.) | apache-iceberg | java |
@@ -533,11 +533,11 @@ TEST_F(TCPv4Tests, send_and_receive_between_secure_ports_client_verifies)
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
while (!sent)
{
- Locators input_begin(locator_list.begin());
- Locators input_end(locator_list.end());
+ Locators l_input_begin(locator_list.begin());
+ Locators l_input_end(locator_list.end());
sent =
- send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
+ send_resource_list.at(0)->send(message, 5, &l_input_begin, &l_input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
std::this_thread::sleep_for(std::chrono::milliseconds(100));
} | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <memory>
#include <thread>
#include <asio.hpp>
#include <gtest/gtest.h>
#include <MockReceiverResource.h>
#include "mock/MockTCPv4Transport.h"
#include <fastdds/dds/log/Log.hpp>
#include <fastrtps/transport/TCPv4TransportDescriptor.h>
#include <fastrtps/utils/Semaphore.h>
#include <fastrtps/utils/IPFinder.h>
#include <fastrtps/utils/IPLocator.h>
#include <rtps/transport/TCPv4Transport.h>
#include <rtps/transport/tcp/RTCPHeader.h>
using namespace eprosima::fastrtps;
using namespace eprosima::fastrtps::rtps;
using TCPv4Transport = eprosima::fastdds::rtps::TCPv4Transport;
using TCPHeader = eprosima::fastdds::rtps::TCPHeader;
#if defined(_WIN32)
#define GET_PID _getpid
#else
#define GET_PID getpid
#endif // if defined(_WIN32)
static uint16_t g_default_port = 0;
static uint16_t g_output_port = 0;
static uint16_t g_input_port = 0;
static std::string g_test_wan_address = "88.88.88.88";
uint16_t get_port(
uint16_t offset)
{
uint16_t port = static_cast<uint16_t>(GET_PID());
if (offset > port)
{
port += offset;
}
return port;
}
class TCPv4Tests : public ::testing::Test
{
public:
TCPv4Tests()
{
eprosima::fastdds::dds::Log::SetVerbosity(eprosima::fastdds::dds::Log::Kind::Info);
HELPER_SetDescriptorDefaults();
}
~TCPv4Tests()
{
eprosima::fastdds::dds::Log::KillThread();
}
void HELPER_SetDescriptorDefaults();
TCPv4TransportDescriptor descriptor;
TCPv4TransportDescriptor descriptorOnlyOutput;
std::unique_ptr<std::thread> senderThread;
std::unique_ptr<std::thread> receiverThread;
};
TEST_F(TCPv4Tests, locators_with_kind_1_supported)
{
// Given
TCPv4Transport transportUnderTest(descriptor);
transportUnderTest.init();
Locator_t supportedLocator;
supportedLocator.kind = LOCATOR_KIND_TCPv4;
Locator_t unsupportedLocatorv4;
unsupportedLocatorv4.kind = LOCATOR_KIND_UDPv4;
Locator_t unsupportedLocatorv6;
unsupportedLocatorv6.kind = LOCATOR_KIND_UDPv6;
// Then
ASSERT_TRUE(transportUnderTest.IsLocatorSupported(supportedLocator));
ASSERT_FALSE(transportUnderTest.IsLocatorSupported(unsupportedLocatorv4));
ASSERT_FALSE(transportUnderTest.IsLocatorSupported(unsupportedLocatorv6));
}
TEST_F(TCPv4Tests, opening_and_closing_output_channel)
{
// Given
TCPv4Transport transportUnderTest(descriptorOnlyOutput);
transportUnderTest.init();
Locator_t genericOutputChannelLocator;
genericOutputChannelLocator.kind = LOCATOR_KIND_TCPv4;
genericOutputChannelLocator.port = g_output_port; // arbitrary
IPLocator::setLogicalPort(genericOutputChannelLocator, g_output_port);
SendResourceList send_resource_list;
// Then
ASSERT_FALSE(transportUnderTest.is_output_channel_open_for(genericOutputChannelLocator));
ASSERT_TRUE(transportUnderTest.OpenOutputChannel(send_resource_list, genericOutputChannelLocator));
ASSERT_FALSE(send_resource_list.empty());
ASSERT_TRUE(transportUnderTest.is_output_channel_open_for(genericOutputChannelLocator));
send_resource_list.clear();
//ASSERT_FALSE(transportUnderTest.is_output_channel_open_for(genericOutputChannelLocator));
}
// This test checks that opening a listening port, never bound by an input channel,
// is correctly closed without valgrind errors. It should show a warning message
// in the log about called on deleted.
TEST_F(TCPv4Tests, opening_and_closing_output_channel_with_listener)
{
// Given
TCPv4Transport transportUnderTest(descriptor);
transportUnderTest.init();
Locator_t genericOutputChannelLocator;
genericOutputChannelLocator.kind = LOCATOR_KIND_TCPv4;
genericOutputChannelLocator.port = g_output_port; // arbitrary
IPLocator::setLogicalPort(genericOutputChannelLocator, g_output_port);
SendResourceList send_resource_list;
// Then
ASSERT_FALSE(transportUnderTest.is_output_channel_open_for(genericOutputChannelLocator));
ASSERT_TRUE(transportUnderTest.OpenOutputChannel(send_resource_list, genericOutputChannelLocator));
ASSERT_FALSE(send_resource_list.empty());
ASSERT_TRUE(transportUnderTest.is_output_channel_open_for(genericOutputChannelLocator));
send_resource_list.clear();
//ASSERT_FALSE(transportUnderTest.is_output_channel_open_for(genericOutputChannelLocator));
}
TEST_F(TCPv4Tests, opening_and_closing_input_channel)
{
// Given
TCPv4Transport transportUnderTest(descriptor);
transportUnderTest.init();
Locator_t genericInputChannelLocator;
genericInputChannelLocator.kind = LOCATOR_KIND_TCPv4;
genericInputChannelLocator.port = g_input_port; // listen port
IPLocator::setIPv4(genericInputChannelLocator, 127, 0, 0, 1);
// Then
ASSERT_FALSE (transportUnderTest.IsInputChannelOpen(genericInputChannelLocator));
ASSERT_TRUE (transportUnderTest.OpenInputChannel(genericInputChannelLocator, nullptr, 0x00FF));
ASSERT_TRUE (transportUnderTest.IsInputChannelOpen(genericInputChannelLocator));
ASSERT_TRUE (transportUnderTest.CloseInputChannel(genericInputChannelLocator));
ASSERT_FALSE (transportUnderTest.IsInputChannelOpen(genericInputChannelLocator));
ASSERT_FALSE (transportUnderTest.CloseInputChannel(genericInputChannelLocator));
}
#ifndef __APPLE__
TEST_F(TCPv4Tests, send_and_receive_between_ports)
{
eprosima::fastdds::dds::Log::SetVerbosity(eprosima::fastdds::dds::Log::Kind::Info);
std::regex filter("RTCP(?!_SEQ)");
eprosima::fastdds::dds::Log::SetCategoryFilter(filter);
TCPv4TransportDescriptor recvDescriptor;
recvDescriptor.add_listener_port(g_default_port);
TCPv4Transport receiveTransportUnderTest(recvDescriptor);
receiveTransportUnderTest.init();
TCPv4TransportDescriptor sendDescriptor;
TCPv4Transport sendTransportUnderTest(sendDescriptor);
sendTransportUnderTest.init();
Locator_t inputLocator;
inputLocator.kind = LOCATOR_KIND_TCPv4;
inputLocator.port = g_default_port;
IPLocator::setIPv4(inputLocator, 127, 0, 0, 1);
IPLocator::setLogicalPort(inputLocator, 7410);
LocatorList_t locator_list;
locator_list.push_back(inputLocator);
Locator_t outputLocator;
outputLocator.kind = LOCATOR_KIND_TCPv4;
IPLocator::setIPv4(outputLocator, 127, 0, 0, 1);
outputLocator.port = g_default_port;
IPLocator::setLogicalPort(outputLocator, 7410);
MockReceiverResource receiver(receiveTransportUnderTest, inputLocator);
MockMessageReceiver* msg_recv = dynamic_cast<MockMessageReceiver*>(receiver.CreateMessageReceiver());
ASSERT_TRUE(receiveTransportUnderTest.IsInputChannelOpen(inputLocator));
SendResourceList send_resource_list;
ASSERT_TRUE(sendTransportUnderTest.OpenOutputChannel(send_resource_list, outputLocator));
ASSERT_FALSE(send_resource_list.empty());
octet message[5] = { 'H', 'e', 'l', 'l', 'o' };
Semaphore sem;
std::function<void()> recCallback = [&]()
{
EXPECT_EQ(memcmp(message, msg_recv->data, 5), 0);
sem.post();
};
msg_recv->setCallback(recCallback);
auto sendThreadFunction = [&]()
{
bool sent = false;
while (!sent)
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
EXPECT_TRUE(sent);
};
senderThread.reset(new std::thread(sendThreadFunction));
std::this_thread::sleep_for(std::chrono::milliseconds(1));
senderThread->join();
sem.wait();
}
#endif // ifndef __APPLE__
TEST_F(TCPv4Tests, send_is_rejected_if_buffer_size_is_bigger_to_size_specified_in_descriptor)
{
// Given
TCPv4Transport transportUnderTest(descriptorOnlyOutput);
transportUnderTest.init();
Locator_t genericOutputChannelLocator;
genericOutputChannelLocator.kind = LOCATOR_KIND_TCPv4;
genericOutputChannelLocator.port = g_output_port;
IPLocator::setLogicalPort(genericOutputChannelLocator, 7400);
SendResourceList send_resource_list;
transportUnderTest.OpenOutputChannel(send_resource_list, genericOutputChannelLocator);
ASSERT_FALSE(send_resource_list.empty());
Locator_t destinationLocator;
destinationLocator.kind = LOCATOR_KIND_TCPv4;
destinationLocator.port = g_output_port + 1;
IPLocator::setLogicalPort(destinationLocator, 7400);
LocatorList_t locator_list;
locator_list.push_back(destinationLocator);
Locators destination_begin(locator_list.begin());
Locators destination_end(locator_list.end());
// Then
std::vector<octet> receiveBufferWrongSize(descriptor.sendBufferSize + 1);
ASSERT_FALSE(send_resource_list.at(0)->send(receiveBufferWrongSize.data(), (uint32_t)receiveBufferWrongSize.size(),
&destination_begin, &destination_end, (std::chrono::steady_clock::now() + std::chrono::microseconds(100))));
}
TEST_F(TCPv4Tests, RemoteToMainLocal_simply_strips_out_address_leaving_IP_ANY)
{
// Given
TCPv4Transport transportUnderTest(descriptor);
transportUnderTest.init();
Locator_t remote_locator;
remote_locator.kind = LOCATOR_KIND_TCPv4;
remote_locator.port = g_default_port;
IPLocator::setIPv4(remote_locator, 222, 222, 222, 222);
// When
Locator_t mainLocalLocator = transportUnderTest.RemoteToMainLocal(remote_locator);
ASSERT_EQ(mainLocalLocator.port, remote_locator.port);
ASSERT_EQ(mainLocalLocator.kind, remote_locator.kind);
ASSERT_EQ(IPLocator::toIPv4string(mainLocalLocator), s_IPv4AddressAny);
}
TEST_F(TCPv4Tests, match_if_port_AND_address_matches)
{
// Given
TCPv4Transport transportUnderTest(descriptor);
transportUnderTest.init();
Locator_t locatorAlpha;
locatorAlpha.port = g_default_port;
IPLocator::setIPv4(locatorAlpha, 239, 255, 0, 1);
Locator_t locatorBeta = locatorAlpha;
// Then
ASSERT_TRUE(transportUnderTest.DoInputLocatorsMatch(locatorAlpha, locatorBeta));
IPLocator::setIPv4(locatorBeta, 100, 100, 100, 100);
// Then
ASSERT_TRUE(transportUnderTest.DoInputLocatorsMatch(locatorAlpha, locatorBeta));
}
TEST_F(TCPv4Tests, send_to_wrong_interface)
{
TCPv4Transport transportUnderTest(descriptorOnlyOutput);
transportUnderTest.init();
Locator_t outputChannelLocator;
outputChannelLocator.port = g_output_port;
outputChannelLocator.kind = LOCATOR_KIND_TCPv4;
IPLocator::setLogicalPort(outputChannelLocator, 7400);
IPLocator::setIPv4(outputChannelLocator, 127, 0, 0, 1); // Loopback
SendResourceList send_resource_list;
ASSERT_TRUE(transportUnderTest.OpenOutputChannel(send_resource_list, outputChannelLocator));
ASSERT_FALSE(send_resource_list.empty());
//Sending through a different IP will NOT work, except 0.0.0.0
Locator_t wrongLocator(outputChannelLocator);
IPLocator::setIPv4(wrongLocator, 111, 111, 111, 111);
LocatorList_t locator_list;
locator_list.push_back(wrongLocator);
Locators wrong_begin(locator_list.begin());
Locators wrong_end(locator_list.end());
std::vector<octet> message = { 'H', 'e', 'l', 'l', 'o' };
ASSERT_FALSE(send_resource_list.at(0)->send(message.data(), (uint32_t)message.size(), &wrong_begin, &wrong_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100))));
}
TEST_F(TCPv4Tests, send_to_blocked_interface)
{
descriptor.interfaceWhiteList.emplace_back("111.111.111.111");
TCPv4Transport transportUnderTest(descriptor);
transportUnderTest.init();
Locator_t outputChannelLocator;
outputChannelLocator.port = g_output_port;
outputChannelLocator.kind = LOCATOR_KIND_TCPv4;
IPLocator::setLogicalPort(outputChannelLocator, 7400);
IPLocator::setIPv4(outputChannelLocator, 127, 0, 0, 1); // Loopback
SendResourceList send_resource_list;
ASSERT_TRUE(transportUnderTest.OpenOutputChannel(send_resource_list, outputChannelLocator));
ASSERT_FALSE(send_resource_list.empty());
//Sending through a different IP will NOT work, except 0.0.0.0
Locator_t wrongLocator(outputChannelLocator);
IPLocator::setIPv4(wrongLocator, 111, 111
, 111, 111);
LocatorList_t locator_list;
locator_list.push_back(wrongLocator);
Locators wrong_begin(locator_list.begin());
Locators wrong_end(locator_list.end());
std::vector<octet> message = { 'H', 'e', 'l', 'l', 'o' };
ASSERT_FALSE(send_resource_list.at(0)->send(message.data(), (uint32_t)message.size(), &wrong_begin, &wrong_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100))));
}
#ifndef __APPLE__
TEST_F(TCPv4Tests, send_and_receive_between_allowed_interfaces_ports)
{
LocatorList_t interfaces;
if (IPFinder::getAllIPAddress(&interfaces))
{
Locator_t locator;
for (auto& tmpLocator : interfaces)
{
if (tmpLocator.kind == LOCATOR_KIND_UDPv4 && IPLocator::toIPv4string(tmpLocator) != "127.0.0.1")
{
locator = tmpLocator;
break;
}
}
if (IsAddressDefined(locator))
{
eprosima::fastdds::dds::Log::SetVerbosity(eprosima::fastdds::dds::Log::Kind::Info);
std::regex filter("RTCP(?!_SEQ)");
eprosima::fastdds::dds::Log::SetCategoryFilter(filter);
TCPv4TransportDescriptor recvDescriptor;
recvDescriptor.interfaceWhiteList.emplace_back(IPLocator::toIPv4string(locator));
recvDescriptor.add_listener_port(g_default_port);
TCPv4Transport receiveTransportUnderTest(recvDescriptor);
receiveTransportUnderTest.init();
TCPv4TransportDescriptor sendDescriptor;
sendDescriptor.interfaceWhiteList.emplace_back(IPLocator::toIPv4string(locator));
TCPv4Transport sendTransportUnderTest(sendDescriptor);
sendTransportUnderTest.init();
Locator_t inputLocator;
inputLocator.kind = LOCATOR_KIND_TCPv4;
inputLocator.port = g_default_port;
inputLocator.set_address(locator);
IPLocator::setLogicalPort(inputLocator, 7410);
LocatorList_t locator_list;
locator_list.push_back(inputLocator);
Locator_t outputLocator;
outputLocator.kind = LOCATOR_KIND_TCPv4;
outputLocator.set_address(locator);
outputLocator.port = g_default_port;
IPLocator::setLogicalPort(outputLocator, 7410);
{
MockReceiverResource receiver(receiveTransportUnderTest, inputLocator);
MockMessageReceiver* msg_recv = dynamic_cast<MockMessageReceiver*>(receiver.CreateMessageReceiver());
ASSERT_TRUE(receiveTransportUnderTest.IsInputChannelOpen(inputLocator));
SendResourceList send_resource_list;
ASSERT_TRUE(sendTransportUnderTest.OpenOutputChannel(send_resource_list, outputLocator));
ASSERT_FALSE(send_resource_list.empty());
octet message[5] = { 'H', 'e', 'l', 'l', 'o' };
bool bOk = false;
std::function<void()> recCallback = [&]()
{
EXPECT_EQ(memcmp(message, msg_recv->data, 5), 0);
bOk = true;
};
msg_recv->setCallback(recCallback);
bool bFinish(false);
auto sendThreadFunction = [&]()
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
bool sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
while (!bFinish && !sent)
{
Locators input_begin2(locator_list.begin());
Locators input_end2(locator_list.end());
sent =
send_resource_list.at(0)->send(message, 5, &input_begin2, &input_end2,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
EXPECT_TRUE(sent);
//EXPECT_TRUE(transportUnderTest.send(message, 5, outputLocator, inputLocator));
};
senderThread.reset(new std::thread(sendThreadFunction));
std::this_thread::sleep_for(std::chrono::seconds(10));
bFinish = true;
senderThread->join();
ASSERT_TRUE(bOk);
}
}
}
}
#if TLS_FOUND
TEST_F(TCPv4Tests, send_and_receive_between_secure_ports_client_verifies)
{
eprosima::fastdds::dds::Log::SetVerbosity(eprosima::fastdds::dds::Log::Kind::Info);
using TLSOptions = TCPTransportDescriptor::TLSConfig::TLSOptions;
using TLSVerifyMode = TCPTransportDescriptor::TLSConfig::TLSVerifyMode;
TCPv4TransportDescriptor recvDescriptor;
recvDescriptor.add_listener_port(g_default_port);
recvDescriptor.apply_security = true;
recvDescriptor.tls_config.password = "test";
recvDescriptor.tls_config.cert_chain_file = "server.pem";
recvDescriptor.tls_config.private_key_file = "server.pem";
recvDescriptor.tls_config.tmp_dh_file = "dh2048.pem";
recvDescriptor.tls_config.add_option(TLSOptions::DEFAULT_WORKAROUNDS);
recvDescriptor.tls_config.add_option(TLSOptions::SINGLE_DH_USE);
//recvDescriptor.tls_config.add_option(TLSOptions::NO_COMPRESSION);
recvDescriptor.tls_config.add_option(TLSOptions::NO_SSLV2);
//recvDescriptor.tls_config.add_option(TLSOptions::NO_SSLV3);
TCPv4Transport receiveTransportUnderTest(recvDescriptor);
receiveTransportUnderTest.init();
TCPv4TransportDescriptor sendDescriptor;
sendDescriptor.apply_security = true;
//sendDescriptor.tls_config.password = "test";
sendDescriptor.tls_config.verify_file = "ca.pem";
sendDescriptor.tls_config.verify_mode = TLSVerifyMode::VERIFY_PEER;
TCPv4Transport sendTransportUnderTest(sendDescriptor);
sendTransportUnderTest.init();
Locator_t inputLocator;
inputLocator.kind = LOCATOR_KIND_TCPv4;
inputLocator.port = g_default_port;
IPLocator::setIPv4(inputLocator, 127, 0, 0, 1);
IPLocator::setLogicalPort(inputLocator, 7410);
LocatorList_t locator_list;
locator_list.push_back(inputLocator);
Locator_t outputLocator;
outputLocator.kind = LOCATOR_KIND_TCPv4;
IPLocator::setIPv4(outputLocator, 127, 0, 0, 1);
outputLocator.port = g_default_port;
IPLocator::setLogicalPort(outputLocator, 7410);
{
MockReceiverResource receiver(receiveTransportUnderTest, inputLocator);
MockMessageReceiver* msg_recv = dynamic_cast<MockMessageReceiver*>(receiver.CreateMessageReceiver());
ASSERT_TRUE(receiveTransportUnderTest.IsInputChannelOpen(inputLocator));
SendResourceList send_resource_list;
ASSERT_TRUE(sendTransportUnderTest.OpenOutputChannel(send_resource_list, outputLocator));
ASSERT_FALSE(send_resource_list.empty());
octet message[5] = { 'H', 'e', 'l', 'l', 'o' };
Semaphore sem;
std::function<void()> recCallback = [&]()
{
EXPECT_EQ(memcmp(message, msg_recv->data, 5), 0);
sem.post();
};
msg_recv->setCallback(recCallback);
auto sendThreadFunction = [&]()
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
bool sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
while (!sent)
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
EXPECT_TRUE(sent);
//EXPECT_TRUE(transportUnderTest.send(message, 5, outputLocator, inputLocator));
};
senderThread.reset(new std::thread(sendThreadFunction));
std::this_thread::sleep_for(std::chrono::milliseconds(1));
senderThread->join();
sem.wait();
}
}
TEST_F(TCPv4Tests, send_and_receive_between_secure_ports_server_verifies)
{
eprosima::fastdds::dds::Log::SetVerbosity(eprosima::fastdds::dds::Log::Kind::Info);
using TLSOptions = TCPTransportDescriptor::TLSConfig::TLSOptions;
using TLSVerifyMode = TCPTransportDescriptor::TLSConfig::TLSVerifyMode;
using TLSHSRole = TCPTransportDescriptor::TLSConfig::TLSHandShakeRole;
TCPv4TransportDescriptor recvDescriptor;
recvDescriptor.add_listener_port(g_default_port);
recvDescriptor.apply_security = true;
recvDescriptor.tls_config.handshake_role = TLSHSRole::CLIENT;
recvDescriptor.tls_config.password = "test";
recvDescriptor.tls_config.verify_file = "maincacert.pem";
recvDescriptor.tls_config.verify_mode = TLSVerifyMode::VERIFY_PEER;
recvDescriptor.tls_config.add_option(TLSOptions::DEFAULT_WORKAROUNDS);
recvDescriptor.tls_config.add_option(TLSOptions::SINGLE_DH_USE);
recvDescriptor.tls_config.add_option(TLSOptions::NO_SSLV2);
recvDescriptor.tls_config.add_option(TLSOptions::NO_COMPRESSION);
TCPv4Transport receiveTransportUnderTest(recvDescriptor);
receiveTransportUnderTest.init();
TCPv4TransportDescriptor sendDescriptor;
sendDescriptor.apply_security = true;
sendDescriptor.tls_config.handshake_role = TLSHSRole::SERVER;
sendDescriptor.tls_config.password = "test";
sendDescriptor.tls_config.cert_chain_file = "server.pem";
sendDescriptor.tls_config.private_key_file = "server.pem";
sendDescriptor.tls_config.tmp_dh_file = "dh2048.pem";
sendDescriptor.tls_config.verify_mode = TLSVerifyMode::VERIFY_PEER | TLSVerifyMode::VERIFY_FAIL_IF_NO_PEER_CERT;
sendDescriptor.tls_config.add_option(TLSOptions::DEFAULT_WORKAROUNDS);
sendDescriptor.tls_config.add_option(TLSOptions::SINGLE_DH_USE);
sendDescriptor.tls_config.add_option(TLSOptions::NO_SSLV2);
recvDescriptor.tls_config.add_option(TLSOptions::NO_COMPRESSION);
TCPv4Transport sendTransportUnderTest(sendDescriptor);
sendTransportUnderTest.init();
Locator_t inputLocator;
inputLocator.kind = LOCATOR_KIND_TCPv4;
inputLocator.port = g_default_port;
IPLocator::setIPv4(inputLocator, 127, 0, 0, 1);
IPLocator::setLogicalPort(inputLocator, 7410);
LocatorList_t locator_list;
locator_list.push_back(inputLocator);
Locator_t outputLocator;
outputLocator.kind = LOCATOR_KIND_TCPv4;
IPLocator::setIPv4(outputLocator, 127, 0, 0, 1);
outputLocator.port = g_default_port;
IPLocator::setLogicalPort(outputLocator, 7410);
{
MockReceiverResource receiver(receiveTransportUnderTest, inputLocator);
MockMessageReceiver* msg_recv = dynamic_cast<MockMessageReceiver*>(receiver.CreateMessageReceiver());
ASSERT_TRUE(receiveTransportUnderTest.IsInputChannelOpen(inputLocator));
SendResourceList send_resource_list;
ASSERT_TRUE(sendTransportUnderTest.OpenOutputChannel(send_resource_list, outputLocator));
ASSERT_FALSE(send_resource_list.empty());
octet message[5] = { 'H', 'e', 'l', 'l', 'o' };
Semaphore sem;
std::function<void()> recCallback = [&]()
{
EXPECT_EQ(memcmp(message, msg_recv->data, 5), 0);
sem.post();
};
msg_recv->setCallback(recCallback);
auto sendThreadFunction = [&]()
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
bool sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
while (!sent)
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
EXPECT_TRUE(sent);
//EXPECT_TRUE(transportUnderTest.send(message, 5, outputLocator, inputLocator));
};
senderThread.reset(new std::thread(sendThreadFunction));
std::this_thread::sleep_for(std::chrono::milliseconds(1));
senderThread->join();
sem.wait();
}
}
TEST_F(TCPv4Tests, send_and_receive_between_both_secure_ports)
{
eprosima::fastdds::dds::Log::SetVerbosity(eprosima::fastdds::dds::Log::Kind::Info);
using TLSOptions = TCPTransportDescriptor::TLSConfig::TLSOptions;
using TLSVerifyMode = TCPTransportDescriptor::TLSConfig::TLSVerifyMode;
TCPv4TransportDescriptor recvDescriptor;
recvDescriptor.add_listener_port(g_default_port);
recvDescriptor.apply_security = true;
recvDescriptor.tls_config.password = "testkey";
recvDescriptor.tls_config.cert_chain_file = "mainpubcert.pem";
recvDescriptor.tls_config.private_key_file = "mainpubkey.pem";
recvDescriptor.tls_config.verify_file = "maincacert.pem";
// Server doesn't accept clients without certs
recvDescriptor.tls_config.verify_mode = TLSVerifyMode::VERIFY_PEER | TLSVerifyMode::VERIFY_FAIL_IF_NO_PEER_CERT;
recvDescriptor.tls_config.add_option(TLSOptions::DEFAULT_WORKAROUNDS);
recvDescriptor.tls_config.add_option(TLSOptions::SINGLE_DH_USE);
recvDescriptor.tls_config.add_option(TLSOptions::NO_COMPRESSION);
recvDescriptor.tls_config.add_option(TLSOptions::NO_SSLV2);
recvDescriptor.tls_config.add_option(TLSOptions::NO_SSLV3);
TCPv4Transport receiveTransportUnderTest(recvDescriptor);
receiveTransportUnderTest.init();
TCPv4TransportDescriptor sendDescriptor;
sendDescriptor.apply_security = true;
sendDescriptor.tls_config.password = "testkey";
sendDescriptor.tls_config.cert_chain_file = "mainsubcert.pem";
sendDescriptor.tls_config.private_key_file = "mainsubkey.pem";
sendDescriptor.tls_config.verify_file = "maincacert.pem";
sendDescriptor.tls_config.verify_mode = TLSVerifyMode::VERIFY_PEER;
sendDescriptor.tls_config.add_option(TLSOptions::DEFAULT_WORKAROUNDS);
sendDescriptor.tls_config.add_option(TLSOptions::SINGLE_DH_USE);
sendDescriptor.tls_config.add_option(TLSOptions::NO_COMPRESSION);
sendDescriptor.tls_config.add_option(TLSOptions::NO_SSLV2);
sendDescriptor.tls_config.add_option(TLSOptions::NO_SSLV3);
TCPv4Transport sendTransportUnderTest(sendDescriptor);
sendTransportUnderTest.init();
Locator_t inputLocator;
inputLocator.kind = LOCATOR_KIND_TCPv4;
inputLocator.port = g_default_port;
IPLocator::setIPv4(inputLocator, 127, 0, 0, 1);
IPLocator::setLogicalPort(inputLocator, 7410);
LocatorList_t locator_list;
locator_list.push_back(inputLocator);
Locator_t outputLocator;
outputLocator.kind = LOCATOR_KIND_TCPv4;
IPLocator::setIPv4(outputLocator, 127, 0, 0, 1);
outputLocator.port = g_default_port;
IPLocator::setLogicalPort(outputLocator, 7410);
{
MockReceiverResource receiver(receiveTransportUnderTest, inputLocator);
MockMessageReceiver* msg_recv = dynamic_cast<MockMessageReceiver*>(receiver.CreateMessageReceiver());
ASSERT_TRUE(receiveTransportUnderTest.IsInputChannelOpen(inputLocator));
SendResourceList send_resource_list;
ASSERT_TRUE(sendTransportUnderTest.OpenOutputChannel(send_resource_list, outputLocator));
ASSERT_FALSE(send_resource_list.empty());
octet message[5] = { 'H', 'e', 'l', 'l', 'o' };
Semaphore sem;
std::function<void()> recCallback = [&]()
{
EXPECT_EQ(memcmp(message, msg_recv->data, 5), 0);
sem.post();
};
msg_recv->setCallback(recCallback);
auto sendThreadFunction = [&]()
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
bool sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
while (!sent)
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
EXPECT_TRUE(sent);
//EXPECT_TRUE(transportUnderTest.send(message, 5, outputLocator, inputLocator));
};
senderThread.reset(new std::thread(sendThreadFunction));
std::this_thread::sleep_for(std::chrono::milliseconds(1));
senderThread->join();
sem.wait();
}
}
TEST_F(TCPv4Tests, send_and_receive_between_both_secure_ports_untrusted)
{
eprosima::fastdds::dds::Log::SetVerbosity(eprosima::fastdds::dds::Log::Kind::Info);
using TLSOptions = TCPTransportDescriptor::TLSConfig::TLSOptions;
using TLSVerifyMode = TCPTransportDescriptor::TLSConfig::TLSVerifyMode;
TCPv4TransportDescriptor recvDescriptor;
recvDescriptor.add_listener_port(g_default_port);
recvDescriptor.apply_security = true;
recvDescriptor.tls_config.password = "testkey";
recvDescriptor.tls_config.cert_chain_file = "mainpubcert.pem";
recvDescriptor.tls_config.private_key_file = "mainpubkey.pem";
recvDescriptor.tls_config.verify_file = "ca.pem"; // This CA doesn't know about these certificates
// Server doesn't accept clients without certs
recvDescriptor.tls_config.verify_mode = TLSVerifyMode::VERIFY_FAIL_IF_NO_PEER_CERT;
recvDescriptor.tls_config.add_option(TLSOptions::DEFAULT_WORKAROUNDS);
recvDescriptor.tls_config.add_option(TLSOptions::SINGLE_DH_USE);
recvDescriptor.tls_config.add_option(TLSOptions::NO_COMPRESSION);
recvDescriptor.tls_config.add_option(TLSOptions::NO_SSLV2);
recvDescriptor.tls_config.add_option(TLSOptions::NO_SSLV3);
TCPv4Transport receiveTransportUnderTest(recvDescriptor);
receiveTransportUnderTest.init();
TCPv4TransportDescriptor sendDescriptor;
sendDescriptor.apply_security = true;
sendDescriptor.tls_config.password = "testkey";
sendDescriptor.tls_config.cert_chain_file = "mainsubcert.pem";
sendDescriptor.tls_config.private_key_file = "mainsubkey.pem";
sendDescriptor.tls_config.verify_file = "ca.pem"; // This CA doesn't know about these certificates
sendDescriptor.tls_config.verify_mode = TLSVerifyMode::VERIFY_PEER;
sendDescriptor.tls_config.add_option(TLSOptions::DEFAULT_WORKAROUNDS);
sendDescriptor.tls_config.add_option(TLSOptions::SINGLE_DH_USE);
sendDescriptor.tls_config.add_option(TLSOptions::NO_COMPRESSION);
sendDescriptor.tls_config.add_option(TLSOptions::NO_SSLV2);
sendDescriptor.tls_config.add_option(TLSOptions::NO_SSLV3);
TCPv4Transport sendTransportUnderTest(sendDescriptor);
sendTransportUnderTest.init();
Locator_t inputLocator;
inputLocator.kind = LOCATOR_KIND_TCPv4;
inputLocator.port = g_default_port;
IPLocator::setIPv4(inputLocator, 127, 0, 0, 1);
IPLocator::setLogicalPort(inputLocator, 7410);
LocatorList_t locator_list;
locator_list.push_back(inputLocator);
Locator_t outputLocator;
outputLocator.kind = LOCATOR_KIND_TCPv4;
IPLocator::setIPv4(outputLocator, 127, 0, 0, 1);
outputLocator.port = g_default_port;
IPLocator::setLogicalPort(outputLocator, 7410);
{
MockReceiverResource receiver(receiveTransportUnderTest, inputLocator);
MockMessageReceiver* msg_recv = dynamic_cast<MockMessageReceiver*>(receiver.CreateMessageReceiver());
ASSERT_TRUE(receiveTransportUnderTest.IsInputChannelOpen(inputLocator));
SendResourceList send_resource_list;
ASSERT_TRUE(sendTransportUnderTest.OpenOutputChannel(send_resource_list, outputLocator));
ASSERT_FALSE(send_resource_list.empty());
octet message[5] = { 'H', 'e', 'l', 'l', 'o' };
Semaphore sem;
std::function<void()> recCallback = [&]()
{
ASSERT_TRUE(false);
EXPECT_EQ(memcmp(message, msg_recv->data, 5), 0);
sem.post();
};
msg_recv->setCallback(recCallback);
auto sendThreadFunction = [&]()
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
bool sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
int count = 0;
while (!sent && count < 30)
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
std::this_thread::sleep_for(std::chrono::milliseconds(100));
++count;
}
EXPECT_FALSE(sent);
sem.post();
//EXPECT_TRUE(transportUnderTest.send(message, 5, outputLocator, inputLocator));
};
senderThread.reset(new std::thread(sendThreadFunction));
std::this_thread::sleep_for(std::chrono::milliseconds(1));
senderThread->join();
sem.wait();
}
}
TEST_F(TCPv4Tests, send_and_receive_between_secure_clients_1)
{
eprosima::fastdds::dds::Log::SetVerbosity(eprosima::fastdds::dds::Log::Kind::Info);
using TLSVerifyMode = TCPTransportDescriptor::TLSConfig::TLSVerifyMode;
using TLSOptions = TCPTransportDescriptor::TLSConfig::TLSOptions;
using TLSHSRole = TCPTransportDescriptor::TLSConfig::TLSHandShakeRole;
TCPv4TransportDescriptor recvDescriptor;
recvDescriptor.add_listener_port(g_default_port);
recvDescriptor.apply_security = true;
recvDescriptor.tls_config.handshake_role = TLSHSRole::CLIENT;
//recvDescriptor.tls_config.password = "testkey";
//recvDescriptor.tls_config.password = "test";
//recvDescriptor.tls_config.cert_chain_file = "mainpubcert.pem";
//recvDescriptor.tls_config.private_key_file = "mainpubkey.pem";
recvDescriptor.tls_config.verify_file = "maincacert.pem"; // This CA only know about mainsub certificates
//recvDescriptor.tls_config.verify_file = "ca.pem";
// Server doesn't accept clients without certs
recvDescriptor.tls_config.verify_mode = TLSVerifyMode::VERIFY_FAIL_IF_NO_PEER_CERT;
recvDescriptor.tls_config.add_option(TLSOptions::DEFAULT_WORKAROUNDS);
TCPv4Transport receiveTransportUnderTest(recvDescriptor);
receiveTransportUnderTest.init();
TCPv4TransportDescriptor sendDescriptor;
sendDescriptor.apply_security = true;
sendDescriptor.tls_config.handshake_role = TLSHSRole::SERVER;
sendDescriptor.tls_config.password = "testkey";
sendDescriptor.tls_config.cert_chain_file = "mainsubcert.pem";
sendDescriptor.tls_config.private_key_file = "mainsubkey.pem";
//sendDescriptor.tls_config.password = "test";
//sendDescriptor.tls_config.cert_chain_file = "server.pem";
//sendDescriptor.tls_config.private_key_file = "server.pem";
//sendDescriptor.tls_config.verify_file = "maincacert.pem";
sendDescriptor.tls_config.verify_mode = TLSVerifyMode::VERIFY_PEER;
sendDescriptor.tls_config.add_option(TLSOptions::DEFAULT_WORKAROUNDS);
TCPv4Transport sendTransportUnderTest(sendDescriptor);
sendTransportUnderTest.init();
Locator_t inputLocator;
inputLocator.kind = LOCATOR_KIND_TCPv4;
inputLocator.port = g_default_port;
IPLocator::setIPv4(inputLocator, 127, 0, 0, 1);
IPLocator::setLogicalPort(inputLocator, 7410);
LocatorList_t locator_list;
locator_list.push_back(inputLocator);
Locator_t outputLocator;
outputLocator.kind = LOCATOR_KIND_TCPv4;
IPLocator::setIPv4(outputLocator, 127, 0, 0, 1);
outputLocator.port = g_default_port;
IPLocator::setLogicalPort(outputLocator, 7410);
{
MockReceiverResource receiver(receiveTransportUnderTest, inputLocator);
MockMessageReceiver* msg_recv = dynamic_cast<MockMessageReceiver*>(receiver.CreateMessageReceiver());
ASSERT_TRUE(receiveTransportUnderTest.IsInputChannelOpen(inputLocator));
SendResourceList send_resource_list;
ASSERT_TRUE(sendTransportUnderTest.OpenOutputChannel(send_resource_list, outputLocator));
ASSERT_FALSE(send_resource_list.empty());
octet message[5] = { 'H', 'e', 'l', 'l', 'o' };
Semaphore sem;
std::function<void()> recCallback = [&]()
{
EXPECT_EQ(memcmp(message, msg_recv->data, 5), 0);
sem.post();
};
msg_recv->setCallback(recCallback);
auto sendThreadFunction = [&]()
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
bool sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
while (!sent)
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
EXPECT_TRUE(sent);
};
senderThread.reset(new std::thread(sendThreadFunction));
std::this_thread::sleep_for(std::chrono::milliseconds(1));
senderThread->join();
sem.wait();
}
}
/*
TEST_F(TCPv4Tests, send_and_receive_between_secure_clients_2)
{
eprosima::fastdds::dds::Log::SetVerbosity(eprosima::fastdds::dds::Log::Kind::Info);
using TLSVerifyMode = TCPTransportDescriptor::TLSConfig::TLSVerifyMode;
using TLSOptions = TCPTransportDescriptor::TLSConfig::TLSOptions;
using TLSHSRole = TCPTransportDescriptor::TLSConfig::TLSHandShakeRole;
TCPv4TransportDescriptor recvDescriptor;
recvDescriptor.add_listener_port(g_default_port + 1);
recvDescriptor.apply_security = true;
recvDescriptor.tls_config.handshake_role = TLSHSRole::CLIENT;
//recvDescriptor.tls_config.password = "testkey";
//recvDescriptor.tls_config.password = "test";
//recvDescriptor.tls_config.cert_chain_file = "mainpubcert.pem";
//recvDescriptor.tls_config.private_key_file = "mainpubkey.pem";
recvDescriptor.tls_config.verify_file = "maincacert.pem"; // This CA only know about mainsub certificates
//recvDescriptor.tls_config.verify_file = "ca.pem";
// Server doesn't accept clients without certs
recvDescriptor.tls_config.verify_mode = TLSVerifyMode::VERIFY_FAIL_IF_NO_PEER_CERT | TLSVerifyMode::VERIFY_PEER;
recvDescriptor.tls_config.add_option(TLSOptions::DEFAULT_WORKAROUNDS);
TCPv4Transport receiveTransportUnderTest(recvDescriptor);
receiveTransportUnderTest.init();
Locator_t inputLocator;
inputLocator.kind = LOCATOR_KIND_TCPv4;
inputLocator.port = g_default_port + 1;
IPLocator::setIPv4(inputLocator, 127, 0, 0, 1);
IPLocator::setLogicalPort(inputLocator, 7410);
Locator_t outputLocator;
outputLocator.kind = LOCATOR_KIND_TCPv4;
IPLocator::setIPv4(outputLocator, 127, 0, 0, 1);
outputLocator.port = g_default_port + 1;
IPLocator::setLogicalPort(outputLocator, 7410);
TCPv4TransportDescriptor sendDescriptor2;
sendDescriptor2.apply_security = true;
sendDescriptor2.tls_config.handshake_role = TLSHSRole::SERVER;
sendDescriptor2.tls_config.password = "test";
sendDescriptor2.tls_config.cert_chain_file = "server.pem";
sendDescriptor2.tls_config.private_key_file = "server.pem";
//sendDescriptor2.tls_config.password = "testkey";
//sendDescriptor2.tls_config.cert_chain_file = "mainsubcert.pem";
//sendDescriptor2.tls_config.private_key_file = "mainsubkey.pem";
sendDescriptor2.tls_config.verify_mode = TLSVerifyMode::VERIFY_PEER;
sendDescriptor2.tls_config.add_option(TLSOptions::DEFAULT_WORKAROUNDS);
TCPv4Transport sendTransportUnderTest2(sendDescriptor2);
sendTransportUnderTest2.init();
{
MockReceiverResource receiver(receiveTransportUnderTest, inputLocator);
MockMessageReceiver *msg_recv = dynamic_cast<MockMessageReceiver*>(receiver.CreateMessageReceiver());
ASSERT_TRUE(receiveTransportUnderTest.IsInputChannelOpen(inputLocator));
ASSERT_TRUE(sendTransportUnderTest2.OpenOutputChannel(outputLocator));
octet message[5] = { 'H','e','l','l','o' };
Semaphore sem;
std::function<void()> recCallback = [&]()
{
EXPECT_FALSE(true); // Should not receive
sem.post();
};
msg_recv->setCallback(recCallback);
auto sendThreadFunction = [&]()
{
bool sent = sendTransportUnderTest2.send(message, 5, outputLocator, inputLocator);
int count = 0;
while (!sent && count < 30)
{
sent = sendTransportUnderTest2.send(message, 5, outputLocator, inputLocator);
std::this_thread::sleep_for(std::chrono::milliseconds(100));
++count;
}
EXPECT_FALSE(sent);
sem.post();
};
senderThread.reset(new std::thread(sendThreadFunction));
std::this_thread::sleep_for(std::chrono::milliseconds(1));
senderThread->join();
sem.wait();
}
ASSERT_TRUE(sendTransportUnderTest2.CloseOutputChannel(outputLocator));
}
*/
TEST_F(TCPv4Tests, send_and_receive_between_secure_ports_untrusted_server)
{
eprosima::fastdds::dds::Log::SetVerbosity(eprosima::fastdds::dds::Log::Kind::Info);
using TLSOptions = TCPTransportDescriptor::TLSConfig::TLSOptions;
using TLSVerifyMode = TCPTransportDescriptor::TLSConfig::TLSVerifyMode;
TCPv4TransportDescriptor recvDescriptor;
recvDescriptor.add_listener_port(g_default_port);
recvDescriptor.apply_security = true;
recvDescriptor.tls_config.password = "testkey";
recvDescriptor.tls_config.cert_chain_file = "mainpubcert.pem";
recvDescriptor.tls_config.private_key_file = "mainpubkey.pem";
// Server doesn't accept clients without certs
recvDescriptor.tls_config.verify_mode = TLSVerifyMode::VERIFY_PEER;
recvDescriptor.tls_config.add_option(TLSOptions::DEFAULT_WORKAROUNDS);
recvDescriptor.tls_config.add_option(TLSOptions::SINGLE_DH_USE);
recvDescriptor.tls_config.add_option(TLSOptions::NO_COMPRESSION);
recvDescriptor.tls_config.add_option(TLSOptions::NO_SSLV2);
recvDescriptor.tls_config.add_option(TLSOptions::NO_SSLV3);
TCPv4Transport receiveTransportUnderTest(recvDescriptor);
receiveTransportUnderTest.init();
TCPv4TransportDescriptor sendDescriptor;
sendDescriptor.apply_security = true;
sendDescriptor.tls_config.verify_file = "ca.pem"; // This CA doesn't know about these certificates
sendDescriptor.tls_config.verify_mode = TLSVerifyMode::VERIFY_PEER;
sendDescriptor.tls_config.add_option(TLSOptions::DEFAULT_WORKAROUNDS);
sendDescriptor.tls_config.add_option(TLSOptions::SINGLE_DH_USE);
sendDescriptor.tls_config.add_option(TLSOptions::NO_COMPRESSION);
sendDescriptor.tls_config.add_option(TLSOptions::NO_SSLV2);
sendDescriptor.tls_config.add_option(TLSOptions::NO_SSLV3);
TCPv4Transport sendTransportUnderTest(sendDescriptor);
sendTransportUnderTest.init();
Locator_t inputLocator;
inputLocator.kind = LOCATOR_KIND_TCPv4;
inputLocator.port = g_default_port;
IPLocator::setIPv4(inputLocator, 127, 0, 0, 1);
IPLocator::setLogicalPort(inputLocator, 7410);
LocatorList_t locator_list;
locator_list.push_back(inputLocator);
Locator_t outputLocator;
outputLocator.kind = LOCATOR_KIND_TCPv4;
IPLocator::setIPv4(outputLocator, 127, 0, 0, 1);
outputLocator.port = g_default_port;
IPLocator::setLogicalPort(outputLocator, 7410);
{
MockReceiverResource receiver(receiveTransportUnderTest, inputLocator);
MockMessageReceiver* msg_recv = dynamic_cast<MockMessageReceiver*>(receiver.CreateMessageReceiver());
ASSERT_TRUE(receiveTransportUnderTest.IsInputChannelOpen(inputLocator));
SendResourceList send_resource_list;
ASSERT_TRUE(sendTransportUnderTest.OpenOutputChannel(send_resource_list, outputLocator));
ASSERT_FALSE(send_resource_list.empty());
octet message[5] = { 'H', 'e', 'l', 'l', 'o' };
Semaphore sem;
std::function<void()> recCallback = [&]()
{
ASSERT_TRUE(false);
EXPECT_EQ(memcmp(message, msg_recv->data, 5), 0);
sem.post();
};
msg_recv->setCallback(recCallback);
auto sendThreadFunction = [&]()
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
bool sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
int count = 0;
while (!sent && count < 30)
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
std::this_thread::sleep_for(std::chrono::milliseconds(100));
++count;
}
EXPECT_FALSE(sent);
sem.post();
//EXPECT_TRUE(transportUnderTest.send(message, 5, outputLocator, inputLocator));
};
senderThread.reset(new std::thread(sendThreadFunction));
std::this_thread::sleep_for(std::chrono::milliseconds(1));
senderThread->join();
sem.wait();
}
}
#endif //TLS_FOUND
TEST_F(TCPv4Tests, send_and_receive_between_allowed_localhost_interfaces_ports)
{
eprosima::fastdds::dds::Log::SetVerbosity(eprosima::fastdds::dds::Log::Kind::Info);
std::regex filter("RTCP(?!_SEQ)");
eprosima::fastdds::dds::Log::SetCategoryFilter(filter);
TCPv4TransportDescriptor recvDescriptor;
recvDescriptor.interfaceWhiteList.emplace_back("127.0.0.1");
recvDescriptor.add_listener_port(g_default_port);
TCPv4Transport receiveTransportUnderTest(recvDescriptor);
receiveTransportUnderTest.init();
TCPv4TransportDescriptor sendDescriptor;
sendDescriptor.interfaceWhiteList.emplace_back("127.0.0.1");
TCPv4Transport sendTransportUnderTest(sendDescriptor);
sendTransportUnderTest.init();
Locator_t inputLocator;
inputLocator.kind = LOCATOR_KIND_TCPv4;
inputLocator.port = g_default_port;
IPLocator::setIPv4(inputLocator, 127, 0, 0, 1);
IPLocator::setLogicalPort(inputLocator, 7410);
LocatorList_t locator_list;
locator_list.push_back(inputLocator);
Locator_t outputLocator;
outputLocator.kind = LOCATOR_KIND_TCPv4;
IPLocator::setIPv4(outputLocator, 127, 0, 0, 1);
outputLocator.port = g_default_port;
IPLocator::setLogicalPort(outputLocator, 7410);
{
MockReceiverResource receiver(receiveTransportUnderTest, inputLocator);
MockMessageReceiver* msg_recv = dynamic_cast<MockMessageReceiver*>(receiver.CreateMessageReceiver());
ASSERT_TRUE(receiveTransportUnderTest.IsInputChannelOpen(inputLocator));
SendResourceList send_resource_list;
ASSERT_TRUE(sendTransportUnderTest.OpenOutputChannel(send_resource_list, outputLocator));
ASSERT_FALSE(send_resource_list.empty());
octet message[5] = { 'H', 'e', 'l', 'l', 'o' };
bool bOk = false;
std::function<void()> recCallback = [&]()
{
EXPECT_EQ(memcmp(message, msg_recv->data, 5), 0);
bOk = true;
};
msg_recv->setCallback(recCallback);
bool bFinish(false);
auto sendThreadFunction = [&]()
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
bool sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
while (!bFinish && !sent)
{
Locators input_begin2(locator_list.begin());
Locators input_end2(locator_list.end());
sent =
send_resource_list.at(0)->send(message, 5, &input_begin2, &input_end2,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
EXPECT_TRUE(sent);
//EXPECT_TRUE(transportUnderTest.send(message, 5, outputLocator, inputLocator));
};
senderThread.reset(new std::thread(sendThreadFunction));
std::this_thread::sleep_for(std::chrono::seconds(10));
bFinish = true;
senderThread->join();
ASSERT_TRUE(bOk);
}
}
TEST_F(TCPv4Tests, send_and_receive_between_blocked_interfaces_ports)
{
LocatorList_t interfaces;
if (IPFinder::getAllIPAddress(&interfaces))
{
Locator_t locator;
for (auto& tmpLocator : interfaces)
{
if (tmpLocator.kind == LOCATOR_KIND_UDPv4 && IPLocator::toIPv4string(tmpLocator) != "127.0.0.1")
{
locator = tmpLocator;
break;
}
}
if (IsAddressDefined(locator))
{
eprosima::fastdds::dds::Log::SetVerbosity(eprosima::fastdds::dds::Log::Kind::Info);
std::regex filter("RTCP(?!_SEQ)");
eprosima::fastdds::dds::Log::SetCategoryFilter(filter);
TCPv4TransportDescriptor recvDescriptor;
recvDescriptor.interfaceWhiteList.emplace_back(IPLocator::toIPv4string(locator));
recvDescriptor.add_listener_port(g_default_port);
TCPv4Transport receiveTransportUnderTest(recvDescriptor);
receiveTransportUnderTest.init();
TCPv4TransportDescriptor sendDescriptor;
sendDescriptor.interfaceWhiteList.emplace_back(IPLocator::toIPv4string(locator));
TCPv4Transport sendTransportUnderTest(sendDescriptor);
sendTransportUnderTest.init();
Locator_t inputLocator;
inputLocator.kind = LOCATOR_KIND_TCPv4;
inputLocator.port = g_default_port;
IPLocator::setIPv4(inputLocator, 127, 0, 0, 1);
IPLocator::setLogicalPort(inputLocator, 7410);
LocatorList_t locator_list;
locator_list.push_back(inputLocator);
Locator_t outputLocator;
outputLocator.kind = LOCATOR_KIND_TCPv4;
IPLocator::setIPv4(outputLocator, 127, 0, 0, 1);
outputLocator.port = g_default_port;
IPLocator::setLogicalPort(outputLocator, 7410);
{
MockReceiverResource receiver(receiveTransportUnderTest, inputLocator);
MockMessageReceiver* msg_recv = dynamic_cast<MockMessageReceiver*>(receiver.CreateMessageReceiver());
ASSERT_TRUE(receiveTransportUnderTest.IsInputChannelOpen(inputLocator));
SendResourceList send_resource_list;
ASSERT_TRUE(sendTransportUnderTest.OpenOutputChannel(send_resource_list, outputLocator));
ASSERT_FALSE(send_resource_list.empty());
octet message[5] = { 'H', 'e', 'l', 'l', 'o' };
bool bOk = false;
std::function<void()> recCallback = [&]()
{
EXPECT_EQ(memcmp(message, msg_recv->data, 5), 0);
bOk = true;
};
msg_recv->setCallback(recCallback);
bool bFinished(false);
auto sendThreadFunction = [&]()
{
Locators input_begin(locator_list.begin());
Locators input_end(locator_list.end());
bool sent =
send_resource_list.at(0)->send(message, 5, &input_begin, &input_end,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
while (!bFinished && !sent)
{
Locators input_begin2(locator_list.begin());
Locators input_end2(locator_list.end());
sent =
send_resource_list.at(0)->send(message, 5, &input_begin2, &input_end2,
(std::chrono::steady_clock::now() + std::chrono::microseconds(100)));
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
EXPECT_FALSE(sent);
//EXPECT_TRUE(transportUnderTest.send(message, 5, outputLocator, inputLocator));
};
senderThread.reset(new std::thread(sendThreadFunction));
std::this_thread::sleep_for(std::chrono::seconds(10));
bFinished = true;
senderThread->join();
ASSERT_FALSE(bOk);
}
}
}
}
#endif // ifndef __APPLE__
TEST_F(TCPv4Tests, receive_unordered_data)
{
constexpr uint16_t logical_port = 7410;
constexpr uint32_t num_bytes_1 = 3;
constexpr uint32_t num_bytes_2 = 13;
const char* bad_headers[] =
{
"-RTC", "-RT", "-R",
"-RRTC", "-RRT", "-RR",
"-RTRTC", "-RTRT", "-RTR",
"-RTCRTC", "-RTCRT", "-RTCR"
};
struct Receiver : public TransportReceiverInterface
{
std::array<std::size_t, 3> num_received{ 0, 0, 0 };
void OnDataReceived(
const octet* data,
const uint32_t size,
const Locator_t& local_locator,
const Locator_t& remote_locator) override
{
static_cast<void>(data);
static_cast<void>(local_locator);
static_cast<void>(remote_locator);
std::cout << "Received " << size << " bytes: " << std::hex << uint32_t(data[0]) << std::dec << std::endl;
switch (size)
{
case num_bytes_1:
num_received[0]++;
break;
case num_bytes_2:
num_received[1]++;
break;
default:
num_received[2]++;
break;
}
}
};
Receiver receiver;
TCPv4TransportDescriptor test_descriptor = descriptor;
test_descriptor.check_crc = false;
TCPv4Transport uut(test_descriptor);
ASSERT_TRUE(uut.init()) << "Failed to initialize transport. Port " << g_default_port << " may be in use";
Locator_t input_locator;
input_locator.kind = LOCATOR_KIND_TCPv4;
input_locator.port = g_default_port;
IPLocator::setIPv4(input_locator, 127, 0, 0, 1);
IPLocator::setLogicalPort(input_locator, logical_port);
EXPECT_TRUE(uut.OpenInputChannel(input_locator, &receiver, 0xFFFF));
// Let acceptor to be open
std::this_thread::sleep_for(std::chrono::seconds(1));
asio::error_code ec;
asio::io_context ctx;
asio::ip::tcp::socket sender(ctx);
asio::ip::tcp::endpoint destination;
destination.port(g_default_port);
destination.address(asio::ip::address::from_string("127.0.0.1"));
sender.connect(destination, ec);
ASSERT_TRUE(!ec) << ec;
std::array<octet, num_bytes_1> bytes_1{ 0 };
std::array<octet, num_bytes_2> bytes_2{ 0 };
TCPHeader h1;
h1.logical_port = logical_port;
h1.length += num_bytes_1;
TCPHeader h2;
h2.logical_port = logical_port;
h2.length += num_bytes_2;
std::array<std::size_t, 3> expected_number{ 0, 0, 0 };
auto send_first = [&]()
{
expected_number[0]++;
bytes_1[0]++;
EXPECT_EQ(TCPHeader::size(), asio::write(sender, asio::buffer(&h1, TCPHeader::size()), ec));
EXPECT_EQ(num_bytes_1, asio::write(sender, asio::buffer(bytes_1.data(), bytes_1.size()), ec));
};
// Send first synchronized
send_first();
// Send non-matching RTCP headers
for (const char* header : bad_headers)
{
asio::write(sender, asio::buffer(header, strlen(header) - 1), ec);
}
// Send first prepended with bad headers
for (const char* header : bad_headers)
{
asio::write(sender, asio::buffer(header, strlen(header) - 1), ec);
send_first();
}
// Interleave headers and data (only first will arrive)
expected_number[0]++;
EXPECT_EQ(TCPHeader::size(), asio::write(sender, asio::buffer(&h1, TCPHeader::size()), ec));
EXPECT_EQ(TCPHeader::size(), asio::write(sender, asio::buffer(&h2, TCPHeader::size()), ec));
EXPECT_EQ(num_bytes_1, asio::write(sender, asio::buffer(bytes_1.data(), bytes_1.size()), ec));
EXPECT_EQ(num_bytes_2, asio::write(sender, asio::buffer(bytes_2.data(), bytes_2.size()), ec));
// Send second without interleaving
expected_number[1]++;
EXPECT_EQ(TCPHeader::size(), asio::write(sender, asio::buffer(&h2, TCPHeader::size()), ec));
EXPECT_EQ(num_bytes_2, asio::write(sender, asio::buffer(bytes_2.data(), bytes_2.size()), ec));
// Wait for data to be received
std::this_thread::sleep_for(std::chrono::seconds(1));
EXPECT_TRUE(!sender.close(ec));
EXPECT_EQ(expected_number, receiver.num_received);
EXPECT_TRUE(uut.CloseInputChannel(input_locator));
}
void TCPv4Tests::HELPER_SetDescriptorDefaults()
{
descriptor.add_listener_port(g_default_port);
descriptor.set_WAN_address(g_test_wan_address);
}
int main(
int argc,
char** argv)
{
eprosima::fastdds::dds::Log::SetVerbosity(eprosima::fastdds::dds::Log::Info);
g_default_port = get_port(4000);
g_output_port = get_port(5000);
g_input_port = get_port(5010);
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 1 | 23,092 | Why are these variables renamed? Is there a conflict with some Windows specific variable? | eProsima-Fast-DDS | cpp |
@@ -18,6 +18,8 @@
require_once("../inc/util.inc");
require_once("../inc/xml.inc");
+if(file_exists('../../release.inc'))
+ include '../../release.inc';
BoincDb::get(true);
xml_header(); | 1 | <?php
// This file is part of BOINC.
// http://boinc.berkeley.edu
// Copyright (C) 2008 University of California
//
// BOINC is free software; you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// BOINC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with BOINC. If not, see <http://www.gnu.org/licenses/>.
require_once("../inc/util.inc");
require_once("../inc/xml.inc");
BoincDb::get(true);
xml_header();
// This all needs to work even when DB is down.
// So cache list of platforms in a file,
// and update it every hour if possible.
//
function show_platforms() {
$xmlFragment = unserialize(get_cached_data(3600, "project_config_platform_xml"));
if ($xmlFragment==false){
$platforms = BoincDB::get()->enum_fields("platform, DBNAME.app_version, DBNAME.app", "BoincPlatform", "platform.name, platform.user_friendly_name, plan_class", "app_version.platformid = platform.id and app_version.appid = app.id and app_version.deprecated=0 and app.deprecated=0 group by platform.name, plan_class", "");
$xmlFragment = " <platforms>";
foreach ($platforms as $platform){
$xmlFragment .= "
<platform>
<platform_name>$platform->name</platform_name>
<user_friendly_name>$platform->user_friendly_name</user_friendly_name>";
if ($platform->plan_class) $xmlFragment .= "
<plan_class>$platform->plan_class</plan_class>\n";
$xmlFragment .= "
</platform>";
}
$xmlFragment .= "\n </platforms>\n";
set_cached_data(3600, serialize($xmlFragment), "project_config_platform_xml");
}
echo $xmlFragment;
}
$config = get_config();
global $master_url;
$long_name = parse_config($config, "<long_name>");
$min_passwd_length = parse_config($config, "<min_passwd_length>");
if (!$min_passwd_length) {
$min_passwd_length = 6;
}
$disable_account_creation = parse_bool($config, "disable_account_creation");
echo "<project_config>
<name>$long_name</name>
<master_url>$master_url</master_url>
<web_rpc_url_base>".secure_url_base()."</web_rpc_url_base>
";
if (parse_config($config, "<account_manager>")) {
echo " <account_manager/>\n";
}
$local_revision = @trim(file_get_contents("../../local.revision"));
if ($local_revision) {
echo "<local_revision>$local_revision</local_revision>\n";
}
if (web_stopped()) {
echo "
<error_num>".ERR_PROJECT_DOWN."</error_num>
<web_stopped>1</web_stopped>
";
} else {
echo " <web_stopped>0</web_stopped>\n";
}
if ($disable_account_creation || defined('INVITE_CODES')) {
echo " <account_creation_disabled/>\n";
}
if (defined('INVITE_CODES')) {
echo " <invite_code_required/>\n";
}
echo " <min_passwd_length>$min_passwd_length</min_passwd_length>\n";
if (sched_stopped()) {
echo " <sched_stopped>1</sched_stopped>\n";
} else {
echo " <sched_stopped>0</sched_stopped>\n";
}
$min_core_client_version = parse_config($config, "<min_core_client_version>");
if ($min_core_client_version) {
echo "<min_client_version>$min_core_client_version</min_client_version>\n";
}
show_platforms();
$tou_file = "../../terms_of_use.txt";
if (file_exists($tou_file)) {
$terms_of_use = trim(file_get_contents($tou_file));
if ($terms_of_use) {
echo " <terms_of_use>\n$terms_of_use\n</terms_of_use>\n";
}
}
if (LDAP_HOST) {
echo "<ldap_auth/>\n";
}
if (file_exists("../../project_keywords.xml")) {
readfile("../../project_keywords.xml");
}
echo "</project_config>";
?>
| 1 | 11,724 | Do you test for file existence simply to reduce the warning message that may occur using the include? Also - I think the practice used in BOINC is to go ahead and use include_once even though it isn't strictly necessary in this case. | BOINC-boinc | php |
@@ -20,6 +20,8 @@ import (
)
// +kubebuilder:object:root=true
+// +kubebuilder:printcolumn:name="action",type=string,JSONPath=`.spec.action`
+// +kubebuilder:printcolumn:name="duration",type=string,JSONPath=`.spec.duration`
// +chaos-mesh:experiment
// TimeChaos is the Schema for the timechaos API | 1 | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +kubebuilder:object:root=true
// +chaos-mesh:experiment
// TimeChaos is the Schema for the timechaos API
type TimeChaos struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec defines the behavior of a time chaos experiment
Spec TimeChaosSpec `json:"spec"`
// +optional
// Most recently observed status of the time chaos experiment
Status TimeChaosStatus `json:"status"`
}
var _ InnerObjectWithSelector = (*TimeChaos)(nil)
var _ InnerObject = (*TimeChaos)(nil)
// TimeChaosSpec defines the desired state of TimeChaos
type TimeChaosSpec struct {
ContainerSelector `json:",inline"`
// TimeOffset defines the delta time of injected program. It's a possibly signed sequence of decimal numbers, such as
// "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
TimeOffset string `json:"timeOffset" webhook:"TimeOffset"`
// ClockIds defines all affected clock id
// All available options are ["CLOCK_REALTIME","CLOCK_MONOTONIC","CLOCK_PROCESS_CPUTIME_ID","CLOCK_THREAD_CPUTIME_ID",
// "CLOCK_MONOTONIC_RAW","CLOCK_REALTIME_COARSE","CLOCK_MONOTONIC_COARSE","CLOCK_BOOTTIME","CLOCK_REALTIME_ALARM",
// "CLOCK_BOOTTIME_ALARM"]
// Default value is ["CLOCK_REALTIME"]
ClockIds []string `json:"clockIds,omitempty" webhook:"ClockIds,nilable"`
// Duration represents the duration of the chaos action
Duration *string `json:"duration,omitempty"`
}
// TimeChaosStatus defines the observed state of TimeChaos
type TimeChaosStatus struct {
ChaosStatus `json:",inline"`
}
func (in *TimeChaos) GetSelectorSpecs() map[string]interface{} {
return map[string]interface{}{
".": &in.Spec.ContainerSelector,
}
}
| 1 | 25,452 | `TimeChaos` does not contains a field .spec.action | chaos-mesh-chaos-mesh | go |
@@ -1,9 +1,10 @@
package net.runelite.rs.api;
+import net.runelite.api.AbstractArchive;
import net.runelite.api.IndexDataBase;
import net.runelite.mapping.Import;
-public interface RSAbstractArchive extends IndexDataBase
+public interface RSAbstractArchive extends IndexDataBase, AbstractArchive
{
@Import("takeFile")
byte[] getConfigData(int archiveId, int fileId); | 1 | package net.runelite.rs.api;
import net.runelite.api.IndexDataBase;
import net.runelite.mapping.Import;
public interface RSAbstractArchive extends IndexDataBase
{
@Import("takeFile")
byte[] getConfigData(int archiveId, int fileId);
@Import("getGroupFileIds")
@Override
int[] getFileIds(int group);
}
| 1 | 16,544 | You need to override where possible in rs-api | open-osrs-runelite | java |
@@ -25,5 +25,12 @@ namespace Reporting
public string BuildName { get; set; }
public DateTime TimeStamp { get; set; }
+
+ public Dictionary<string, string> AdditionalData { get; set; } = new Dictionary<string, string>();
+
+ public void AddData(string key, string payload)
+ {
+ AdditionalData.Add(key, payload);
+ }
}
} | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.Runtime.Serialization;
using System.Text;
namespace Reporting
{
public sealed class Build
{
public string Repo { get; set; }
public string Branch { get; set; }
public string Architecture { get; set; }
public string Locale { get; set; }
public string GitHash { get; set; }
public string BuildName { get; set; }
public DateTime TimeStamp { get; set; }
}
}
| 1 | 11,737 | Should this have an add/update/etc pattern? What happens if I need to change some set of data? May be better to just let the callsite manipulate the dictionary. | dotnet-performance | .cs |
@@ -177,6 +177,18 @@ class PyRegion(object):
name: the name of the output
"""
+ @not_implemented
+ def getAlgorithmInstance(self):
+ """
+ Returns the instance of the underlying algorithm that is performing
+ the computation.
+
+ This method should be overridden by the region subclass.
+
+ Note that the return type depends on the Region and therefore
+ cannot be determined in advance.
+ """
+
def getParameter(self, name, index):
"""Default implementation that return an attribute with the requested name
| 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file defines the base class for NuPIC 2 Python regions.
"""
import numpy
RealNumpyDType = numpy.float32
from abc import ABCMeta, abstractmethod
from nupic.support import getCallerInfo
def not_implemented(f):
"""A decorator that raises NotImplementedError exception when called
Keeps the docstring of the original function.
"""
def decorated(*args, **kw):
gci = getCallerInfo()
caller = gci[0] + '()'
if gci[2]:
caller = gci[2] + '.' + caller
message = 'The unimplemented method '
message += '%s() was called by %s' % (f.func_name, caller)
raise NotImplementedError(message)
decorated.__doc__ == f.__doc__
return decorated
class PyRegion(object):
"""
PyRegion provides services to its sub-classes (the actual regions):
- Define and document the interface of a Python region
- Enforce implementation of required methods
- Default implementation for some methods
PyRegion is an abstract base class (http://docs.python.org/library/abc.html).
If a subclass doesn't implement all its abstract methods it can't be
instantiated. Note, that the signature of implemented abstract method in the
subclass doesn't need to match the signature of the abstract method in the
base class. This is very important for __init__() in this case.
The abstract methods (decorated with @abstract method) are:
* __init__
* initialize
* compute
In addition, PyRegion decorates some other methods with the
@not_implemented decorator. A sub-class may opt not to implement these
methods, but if such a methods is called then a NotImplementedError will be
raised. This is useful for methods like setParameterArray if a particular
subclass has no array parameters.
The not implemented methods (decorated with @not_implemented) are:
* getSpec (class method)
* setParameter
* setParameterArray
* getOutputElementCount
The getSpec is a class method, which is actually required but since it's
not an instance method the @abstractmethod decorator doesn't apply.
Finally, PyRegion provides reasonable default implementation to some methods.
Sub-classes may opt to override these methods or use the default
implementation (often recommended).
The implemented methods are:
* getParameter
* getParameterArray
* getParameterArrayCount
* executeMethod
"""
__metaclass__ = ABCMeta
@classmethod
@not_implemented
def getSpec(cls):
"""Returns the region spec for this region. The Region Spec is a dictionary
with the following keys:
description -- a string
singleNodeOnly -- a boolean (True if this Region supports only a single node)
inputs -- a dictionary in which the keys are the names of the inputs and
the values are dictionaries with these keys:
description - string
regionLevel -- True if this is a "region-level" input.
dataType - a string describing the data type, usually 'Real32'
count - the number of items in the input. 0 means unspecified.
required -- boolean - whether the input is must be connected
isDefaultInput -- must be True for exactly one input
requireSplitterMap -- [just set this to False.]
outputs -- a dictionary with similar structure to inputs. The keys
are:
description
dataType
count
regionLevel
isDefaultOutput
parameters -- a dictionary of dictionaries with the following keys:
description
dataType
count
constraints (optional)
accessMode (one of "ReadWrite", "Read", "Create")
This class method is called by NuPIC before creating a Region.
"""
@abstractmethod
def __init__(self, *args, **kwars):
"""Initialize the node with creation parameters from the node spec
Should be implemented by subclasses (unless there are no creation params)
"""
@abstractmethod
def initialize(self, inputs, outputs):
"""Initialize the node after the network is fully linked
It is called once by NuPIC before the first call to compute(). It is
a good place to perform one time initialization that depend on the inputs
and/or outputs. The region may also remember its inputs and outputs here
because they will not change.
inputs: dict of numpy arrays (one per input)
outputs: dict of numpy arrays (one per output)
"""
@abstractmethod
def compute(self, inputs, outputs):
"""Perform the main computation
This method is called in each iteration for each phase the node supports.
inputs: dict of numpy arrays (one per input)
outputs: dict of numpy arrays (one per output)
"""
@not_implemented
def getOutputElementCount(self, name):
"""Return the number of elements in the output of a single node
If the region has multiple nodes (all must have the same output
size) then just the number of output elements of a single node
should be returned.
name: the name of the output
"""
def getParameter(self, name, index):
"""Default implementation that return an attribute with the requested name
This method provides a default implementation of getParameter() that simply
returns an attribute with the parameter name. If the Region conceptually
contains multiple nodes with separate state the 'index' argument is used
to request a parameter of a specific node inside the region. In case of
a region-level parameter the index should be -1
The implementation prevents accessing parameters names that start with '_'.
It may be better to enforce this convention at the node spec level.
name: name of requested parameter
index: index of node inside the region (if relevant)
"""
if name.startswith('_'):
raise Exception('Parameter name must not start with an underscore')
value = getattr(self, name)
return value
def getParameterArrayCount(self, name, index):
"""Default implementation that return the length of the attribute
This default implementation goes hand in hand with getParameterArray().
If you override one of them in your subclass, you should probably override
both of them.
The implementation prevents accessing parameters names that start with '_'.
It may be better to enforce this convention at the node spec level.
name: name of requested parameter
index: index of node inside the region (if relevant)
"""
if name.startswith('_'):
raise Exception('Parameter name must not start with an underscore')
v = getattr(self, name)
return len(self.parameters[name])
def getParameterArray(self, name, index, array):
"""Default implementation that return an attribute with the requested name
This method provides a default implementation of getParameterArray() that
returns an attribute with the parameter name. If the Region conceptually
contains multiple nodes with separate state the 'index' argument is used
to request a parameter of a specific node inside the region. The attribute
value is written into the output array. No type or sanity checks are
performed for performance reasons. If something goes awry it will result
in a low-level exception. If you are unhappy about it you can implement
your own getParameterArray() method in the subclass.
The implementation prevents accessing parameters names that start with '_'.
It may be better to enforce this convention at the node spec level.
name: name of requested parameter
index: index of node inside the region (if relevant)
array: output numpy array that the value is written to
"""
if name.startswith('_'):
raise Exception('Parameter name must not start with an underscore')
v = getattr(self, name)
# Not performing sanity checks for performance reasons.
#assert array.dtype == v.dtype
#assert len(array) == len(v)
array[:] = v
@not_implemented
def setParameter(self, name, index, value):
"""Set the value of a parameter
If the Region conceptually contains multiple nodes with separate state
the 'index' argument is used set a parameter of a specific node inside
the region.
name: name of requested parameter
index: index of node inside the region (if relevant)
value: the value to assign to the requested parameter
"""
@not_implemented
def setParameterArray(self, name, index, array):
"""Set the value of an array parameter
If the Region conceptually contains multiple nodes with separate state
the 'index' argument is used set a parameter of a specific node inside
the region.
name: name of requested parameter
index: index of node inside the region (if relevant)
array: the value to assign to the requested parameter (a numpy array)
"""
def write(self, proto):
"""This method is called during network serialization with a proto object
so that this region can write its data to it.
proto: PyRegionProto capnproto object
"""
pass
def read(self, proto):
"""This method is called during network serialization with a proto object
so that this region can read its data from it.
proto: PyRegionProto capnproto object
"""
pass
def serializeExtraData(self, filePath):
"""This method is called during network serialization with an external
filename that can be used to bypass pickle for saving large binary states.
filePath: full filepath and name
"""
pass
def deSerializeExtraData(self, filePath):
"""This method is called during network deserialization with an external
filename that can be used to bypass pickle for loading large binary states.
filePath: full filepath and name
"""
pass
def executeMethod(self, methodName, args):
"""Executes a method named 'methodName' with the specified arguments.
This method is called when the user executes a command as defined in
the node spec. It provides a perfectly reasonble implementation
of the command mechanism. As a sub-class developer you just need to
implement a method for each command in the node spec. Note that due to
the command mechanism only unnamed argument are supported.
methodName: the name of the method that correspond to a command in the spec
args: list of arguments that will be passed to the method
"""
if not hasattr(self, methodName):
raise Exception('Missing command method: ' + methodName)
m = getattr(self, methodName)
if not hasattr(m, '__call__'):
raise Exception('Command: ' + methodName + ' must be callable')
return m(*args)
| 1 | 20,132 | I don't think this should be added to PyRegion. Adding it here requires that every region have an "algorithm" which may not always make sense and the return value type will be different in every case. But fine to use the same name in the regions where we choose to implement it as a convention if that makes the API more discoverable. | numenta-nupic | py |
@@ -421,6 +421,12 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyht
return nil
}
+ // detect connection timeout to the upstream and respond with 504
+ switch e, ok := proxyErr.(net.Error); ok {
+ case e.Timeout():
+ return caddyhttp.Error(http.StatusGatewayTimeout, proxyErr)
+ }
+
// if the roundtrip was successful, don't retry the request or
// ding the health status of the upstream (an error can still
// occur after the roundtrip if, for example, a response handler | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package reverseproxy
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddyhttp/headers"
"go.uber.org/zap"
"golang.org/x/net/http/httpguts"
)
func init() {
caddy.RegisterModule(Handler{})
}
// Handler implements a highly configurable and production-ready reverse proxy.
//
// Upon proxying, this module sets the following placeholders (which can be used
// both within and after this handler):
//
// Placeholder | Description
// ------------|-------------
// `{http.reverse_proxy.upstream.address}` | The full address to the upstream as given in the config
// `{http.reverse_proxy.upstream.hostport}` | The host:port of the upstream
// `{http.reverse_proxy.upstream.host}` | The host of the upstream
// `{http.reverse_proxy.upstream.port}` | The port of the upstream
// `{http.reverse_proxy.upstream.requests}` | The approximate current number of requests to the upstream
// `{http.reverse_proxy.upstream.max_requests}` | The maximum approximate number of requests allowed to the upstream
// `{http.reverse_proxy.upstream.fails}` | The number of recent failed requests to the upstream
type Handler struct {
// Configures the method of transport for the proxy. A transport
// is what performs the actual "round trip" to the backend.
// The default transport is plaintext HTTP.
TransportRaw json.RawMessage `json:"transport,omitempty" caddy:"namespace=http.reverse_proxy.transport inline_key=protocol"`
// A circuit breaker may be used to relieve pressure on a backend
// that is beginning to exhibit symptoms of stress or latency.
// By default, there is no circuit breaker.
CBRaw json.RawMessage `json:"circuit_breaker,omitempty" caddy:"namespace=http.reverse_proxy.circuit_breakers inline_key=type"`
// Load balancing distributes load/requests between backends.
LoadBalancing *LoadBalancing `json:"load_balancing,omitempty"`
// Health checks update the status of backends, whether they are
// up or down. Down backends will not be proxied to.
HealthChecks *HealthChecks `json:"health_checks,omitempty"`
// Upstreams is the list of backends to proxy to.
Upstreams UpstreamPool `json:"upstreams,omitempty"`
// Adjusts how often to flush the response buffer. A
// negative value disables response buffering.
// TODO: figure out good defaults and write docs for this
// (see https://github.com/caddyserver/caddy/issues/1460)
FlushInterval caddy.Duration `json:"flush_interval,omitempty"`
// Headers manipulates headers between Caddy and the backend.
// By default, all headers are passed-thru without changes,
// with the exceptions of special hop-by-hop headers.
//
// X-Forwarded-For and X-Forwarded-Proto are also set
// implicitly, but this may change in the future if the official
// standardized Forwarded header field gains more adoption.
Headers *headers.Handler `json:"headers,omitempty"`
// If true, the entire request body will be read and buffered
// in memory before being proxied to the backend. This should
// be avoided if at all possible for performance reasons.
BufferRequests bool `json:"buffer_requests,omitempty"`
// List of handlers and their associated matchers to evaluate
// after successful roundtrips. The first handler that matches
// the response from a backend will be invoked. The response
// body from the backend will not be written to the client;
// it is up to the handler to finish handling the response.
// If passive health checks are enabled, any errors from the
// handler chain will not affect the health status of the
// backend.
//
// Two new placeholders are available in this handler chain:
// - `{http.reverse_proxy.status_code}` The status code
// - `{http.reverse_proxy.status_text}` The status text
HandleResponse []caddyhttp.ResponseHandler `json:"handle_response,omitempty"`
Transport http.RoundTripper `json:"-"`
CB CircuitBreaker `json:"-"`
ctx caddy.Context
logger *zap.Logger
}
// CaddyModule returns the Caddy module information.
func (Handler) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.handlers.reverse_proxy",
New: func() caddy.Module { return new(Handler) },
}
}
// Provision ensures that h is set up properly before use.
func (h *Handler) Provision(ctx caddy.Context) error {
h.ctx = ctx
h.logger = ctx.Logger(h)
// verify SRV compatibility
for i, v := range h.Upstreams {
if v.LookupSRV == "" {
continue
}
if h.HealthChecks != nil && h.HealthChecks.Active != nil {
return fmt.Errorf(`upstream: lookup_srv is incompatible with active health checks: %d: {"dial": %q, "lookup_srv": %q}`, i, v.Dial, v.LookupSRV)
}
if v.Dial != "" {
return fmt.Errorf(`upstream: specifying dial address is incompatible with lookup_srv: %d: {"dial": %q, "lookup_srv": %q}`, i, v.Dial, v.LookupSRV)
}
}
// start by loading modules
if h.TransportRaw != nil {
mod, err := ctx.LoadModule(h, "TransportRaw")
if err != nil {
return fmt.Errorf("loading transport: %v", err)
}
h.Transport = mod.(http.RoundTripper)
}
if h.LoadBalancing != nil && h.LoadBalancing.SelectionPolicyRaw != nil {
mod, err := ctx.LoadModule(h.LoadBalancing, "SelectionPolicyRaw")
if err != nil {
return fmt.Errorf("loading load balancing selection policy: %s", err)
}
h.LoadBalancing.SelectionPolicy = mod.(Selector)
}
if h.CBRaw != nil {
mod, err := ctx.LoadModule(h, "CBRaw")
if err != nil {
return fmt.Errorf("loading circuit breaker: %s", err)
}
h.CB = mod.(CircuitBreaker)
}
// ensure any embedded headers handler module gets provisioned
// (see https://caddy.community/t/set-cookie-manipulation-in-reverse-proxy/7666?u=matt
// for what happens if we forget to provision it)
if h.Headers != nil {
err := h.Headers.Provision(ctx)
if err != nil {
return fmt.Errorf("provisioning embedded headers handler: %v", err)
}
}
// set up transport
if h.Transport == nil {
t := &HTTPTransport{
KeepAlive: &KeepAlive{
ProbeInterval: caddy.Duration(30 * time.Second),
IdleConnTimeout: caddy.Duration(2 * time.Minute),
MaxIdleConnsPerHost: 32,
},
DialTimeout: caddy.Duration(10 * time.Second),
}
err := t.Provision(ctx)
if err != nil {
return fmt.Errorf("provisioning default transport: %v", err)
}
h.Transport = t
}
// set up load balancing
if h.LoadBalancing == nil {
h.LoadBalancing = new(LoadBalancing)
}
if h.LoadBalancing.SelectionPolicy == nil {
h.LoadBalancing.SelectionPolicy = RandomSelection{}
}
if h.LoadBalancing.TryDuration > 0 && h.LoadBalancing.TryInterval == 0 {
// a non-zero try_duration with a zero try_interval
// will always spin the CPU for try_duration if the
// upstream is local or low-latency; avoid that by
// defaulting to a sane wait period between attempts
h.LoadBalancing.TryInterval = caddy.Duration(250 * time.Millisecond)
}
lbMatcherSets, err := ctx.LoadModule(h.LoadBalancing, "RetryMatchRaw")
if err != nil {
return err
}
err = h.LoadBalancing.RetryMatch.FromInterface(lbMatcherSets)
if err != nil {
return err
}
// set up upstreams
for _, upstream := range h.Upstreams {
// create or get the host representation for this upstream
var host Host = new(upstreamHost)
existingHost, loaded := hosts.LoadOrStore(upstream.String(), host)
if loaded {
host = existingHost.(Host)
}
upstream.Host = host
// give it the circuit breaker, if any
upstream.cb = h.CB
// if the passive health checker has a non-zero UnhealthyRequestCount
// but the upstream has no MaxRequests set (they are the same thing,
// but the passive health checker is a default value for for upstreams
// without MaxRequests), copy the value into this upstream, since the
// value in the upstream (MaxRequests) is what is used during
// availability checks
if h.HealthChecks != nil && h.HealthChecks.Passive != nil {
h.HealthChecks.Passive.logger = h.logger.Named("health_checker.passive")
if h.HealthChecks.Passive.UnhealthyRequestCount > 0 &&
upstream.MaxRequests == 0 {
upstream.MaxRequests = h.HealthChecks.Passive.UnhealthyRequestCount
}
}
// upstreams need independent access to the passive
// health check policy because passive health checks
// run without access to h.
if h.HealthChecks != nil {
upstream.healthCheckPolicy = h.HealthChecks.Passive
}
}
if h.HealthChecks != nil {
// set defaults on passive health checks, if necessary
if h.HealthChecks.Passive != nil {
if h.HealthChecks.Passive.FailDuration > 0 && h.HealthChecks.Passive.MaxFails == 0 {
h.HealthChecks.Passive.MaxFails = 1
}
}
// if active health checks are enabled, configure them and start a worker
if h.HealthChecks.Active != nil &&
(h.HealthChecks.Active.Path != "" || h.HealthChecks.Active.Port != 0) {
h.HealthChecks.Active.logger = h.logger.Named("health_checker.active")
timeout := time.Duration(h.HealthChecks.Active.Timeout)
if timeout == 0 {
timeout = 5 * time.Second
}
h.HealthChecks.Active.httpClient = &http.Client{
Timeout: timeout,
Transport: h.Transport,
}
for _, upstream := range h.Upstreams {
// if there's an alternative port for health-check provided in the config,
// then use it, otherwise use the port of upstream.
if h.HealthChecks.Active.Port != 0 {
upstream.activeHealthCheckPort = h.HealthChecks.Active.Port
}
}
if h.HealthChecks.Active.Interval == 0 {
h.HealthChecks.Active.Interval = caddy.Duration(30 * time.Second)
}
if h.HealthChecks.Active.ExpectBody != "" {
var err error
h.HealthChecks.Active.bodyRegexp, err = regexp.Compile(h.HealthChecks.Active.ExpectBody)
if err != nil {
return fmt.Errorf("expect_body: compiling regular expression: %v", err)
}
}
go h.activeHealthChecker()
}
}
// set up any response routes
for i, rh := range h.HandleResponse {
err := rh.Provision(ctx)
if err != nil {
return fmt.Errorf("provisioning response handler %d: %v", i, err)
}
}
return nil
}
// Cleanup cleans up the resources made by h during provisioning.
func (h *Handler) Cleanup() error {
// TODO: Close keepalive connections on reload? https://github.com/caddyserver/caddy/pull/2507/files#diff-70219fd88fe3f36834f474ce6537ed26R762
// remove hosts from our config from the pool
for _, upstream := range h.Upstreams {
hosts.Delete(upstream.String())
}
return nil
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
// if enabled, buffer client request;
// this should only be enabled if the
// upstream requires it and does not
// work with "slow clients" (gunicorn,
// etc.) - this obviously has a perf
// overhead and makes the proxy at
// risk of exhausting memory and more
// susceptible to slowloris attacks,
// so it is strongly recommended to
// only use this feature if absolutely
// required, if read timeouts are set,
// and if body size is limited
if h.BufferRequests {
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
io.Copy(buf, r.Body)
r.Body.Close()
r.Body = ioutil.NopCloser(buf)
}
// prepare the request for proxying; this is needed only once
err := h.prepareRequest(r)
if err != nil {
return caddyhttp.Error(http.StatusInternalServerError,
fmt.Errorf("preparing request for upstream round-trip: %v", err))
}
// we will need the original headers and Host value if
// header operations are configured; and we should
// restore them after we're done if they are changed
// (for example, changing the outbound Host header
// should not permanently change r.Host; issue #3509)
reqHost := r.Host
reqHeader := r.Header
defer func() {
r.Host = reqHost
r.Header = reqHeader
}()
start := time.Now()
var proxyErr error
for {
// choose an available upstream
upstream := h.LoadBalancing.SelectionPolicy.Select(h.Upstreams, r)
if upstream == nil {
if proxyErr == nil {
proxyErr = fmt.Errorf("no upstreams available")
}
if !h.LoadBalancing.tryAgain(h.ctx, start, proxyErr, r) {
break
}
continue
}
// the dial address may vary per-request if placeholders are
// used, so perform those replacements here; the resulting
// DialInfo struct should have valid network address syntax
dialInfo, err := upstream.fillDialInfo(r)
if err != nil {
err = fmt.Errorf("making dial info: %v", err)
return caddyhttp.Error(http.StatusBadGateway, err)
}
// attach to the request information about how to dial the upstream;
// this is necessary because the information cannot be sufficiently
// or satisfactorily represented in a URL
caddyhttp.SetVar(r.Context(), dialInfoVarKey, dialInfo)
// set placeholders with information about this upstream
repl.Set("http.reverse_proxy.upstream.address", dialInfo.String())
repl.Set("http.reverse_proxy.upstream.hostport", dialInfo.Address)
repl.Set("http.reverse_proxy.upstream.host", dialInfo.Host)
repl.Set("http.reverse_proxy.upstream.port", dialInfo.Port)
repl.Set("http.reverse_proxy.upstream.requests", upstream.Host.NumRequests())
repl.Set("http.reverse_proxy.upstream.max_requests", upstream.MaxRequests)
repl.Set("http.reverse_proxy.upstream.fails", upstream.Host.Fails())
// mutate request headers according to this upstream;
// because we're in a retry loop, we have to copy
// headers (and the r.Host value) from the original
// so that each retry is identical to the first
if h.Headers != nil && h.Headers.Request != nil {
r.Header = make(http.Header)
copyHeader(r.Header, reqHeader)
r.Host = reqHost
h.Headers.Request.ApplyToRequest(r)
}
// proxy the request to that upstream
proxyErr = h.reverseProxy(w, r, dialInfo, next)
if proxyErr == nil || proxyErr == context.Canceled {
// context.Canceled happens when the downstream client
// cancels the request, which is not our failure
return nil
}
// if the roundtrip was successful, don't retry the request or
// ding the health status of the upstream (an error can still
// occur after the roundtrip if, for example, a response handler
// after the roundtrip returns an error)
if succ, ok := proxyErr.(roundtripSucceeded); ok {
return succ.error
}
// remember this failure (if enabled)
h.countFailure(upstream)
// if we've tried long enough, break
if !h.LoadBalancing.tryAgain(h.ctx, start, proxyErr, r) {
break
}
}
return caddyhttp.Error(http.StatusBadGateway, proxyErr)
}
// prepareRequest modifies req so that it is ready to be proxied,
// except for directing to a specific upstream. This method mutates
// headers and other necessary properties of the request and should
// be done just once (before proxying) regardless of proxy retries.
// This assumes that no mutations of the request are performed
// by h during or after proxying.
func (h Handler) prepareRequest(req *http.Request) error {
// most of this is borrowed from the Go std lib reverse proxy
if req.ContentLength == 0 {
req.Body = nil // Issue golang/go#16036: nil Body for http.Transport retries
}
req.Close = false
// if User-Agent is not set by client, then explicitly
// disable it so it's not set to default value by std lib
if _, ok := req.Header["User-Agent"]; !ok {
req.Header.Set("User-Agent", "")
}
reqUpType := upgradeType(req.Header)
removeConnectionHeaders(req.Header)
// Remove hop-by-hop headers to the backend. Especially
// important is "Connection" because we want a persistent
// connection, regardless of what the client sent to us.
for _, h := range hopHeaders {
hv := req.Header.Get(h)
if hv == "" {
continue
}
if h == "Te" && hv == "trailers" {
// Issue golang/go#21096: tell backend applications that
// care about trailer support that we support
// trailers. (We do, but we don't go out of
// our way to advertise that unless the
// incoming client request thought it was
// worth mentioning)
continue
}
req.Header.Del(h)
}
// After stripping all the hop-by-hop connection headers above, add back any
// necessary for protocol upgrades, such as for websockets.
if reqUpType != "" {
req.Header.Set("Connection", "Upgrade")
req.Header.Set("Upgrade", reqUpType)
}
if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
// If we aren't the first proxy retain prior
// X-Forwarded-For information as a comma+space
// separated list and fold multiple headers into one.
if prior, ok := req.Header["X-Forwarded-For"]; ok {
clientIP = strings.Join(prior, ", ") + ", " + clientIP
}
req.Header.Set("X-Forwarded-For", clientIP)
}
if req.Header.Get("X-Forwarded-Proto") == "" {
// set X-Forwarded-Proto; many backend apps expect this too
proto := "https"
if req.TLS == nil {
proto = "http"
}
req.Header.Set("X-Forwarded-Proto", proto)
}
return nil
}
// reverseProxy performs a round-trip to the given backend and processes the response with the client.
// (This method is mostly the beginning of what was borrowed from the net/http/httputil package in the
// Go standard library which was used as the foundation.)
func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, di DialInfo, next caddyhttp.Handler) error {
di.Upstream.Host.CountRequest(1)
defer di.Upstream.Host.CountRequest(-1)
// point the request to this upstream
h.directRequest(req, di)
// do the round-trip; emit debug log with values we know are
// safe, or if there is no error, emit fuller log entry
start := time.Now()
res, err := h.Transport.RoundTrip(req)
duration := time.Since(start)
logger := h.logger.With(
zap.String("upstream", di.Upstream.String()),
zap.Object("request", caddyhttp.LoggableHTTPRequest{Request: req}),
zap.Duration("duration", duration))
if err != nil {
logger.Debug("upstream roundtrip", zap.Error(err))
return err
}
logger.Debug("upstream roundtrip",
zap.Object("headers", caddyhttp.LoggableHTTPHeader(res.Header)),
zap.Int("status", res.StatusCode))
// update circuit breaker on current conditions
if di.Upstream.cb != nil {
di.Upstream.cb.RecordMetric(res.StatusCode, duration)
}
// perform passive health checks (if enabled)
if h.HealthChecks != nil && h.HealthChecks.Passive != nil {
// strike if the status code matches one that is "bad"
for _, badStatus := range h.HealthChecks.Passive.UnhealthyStatus {
if caddyhttp.StatusCodeMatches(res.StatusCode, badStatus) {
h.countFailure(di.Upstream)
}
}
// strike if the roundtrip took too long
if h.HealthChecks.Passive.UnhealthyLatency > 0 &&
duration >= time.Duration(h.HealthChecks.Passive.UnhealthyLatency) {
h.countFailure(di.Upstream)
}
}
// see if any response handler is configured for this response from the backend
for i, rh := range h.HandleResponse {
if rh.Match != nil && !rh.Match.Match(res.StatusCode, res.Header) {
continue
}
repl := req.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
// if configured to only change the status code, do that then continue regular proxy response
if statusCodeStr := rh.StatusCode.String(); statusCodeStr != "" {
statusCode, err := strconv.Atoi(repl.ReplaceAll(statusCodeStr, ""))
if err != nil {
return caddyhttp.Error(http.StatusInternalServerError, err)
}
if statusCode != 0 {
res.StatusCode = statusCode
}
break
}
// otherwise, if there are any routes configured, execute those as the
// actual response instead of what we got from the proxy backend
if len(rh.Routes) == 0 {
continue
}
res.Body.Close()
repl.Set("http.reverse_proxy.status_code", res.StatusCode)
repl.Set("http.reverse_proxy.status_text", res.Status)
h.logger.Debug("handling response", zap.Int("handler", i))
if routeErr := rh.Routes.Compile(next).ServeHTTP(rw, req); routeErr != nil {
// wrap error in roundtripSucceeded so caller knows that
// the roundtrip was successful and to not retry
return roundtripSucceeded{routeErr}
}
}
// Deal with 101 Switching Protocols responses: (WebSocket, h2c, etc)
if res.StatusCode == http.StatusSwitchingProtocols {
h.handleUpgradeResponse(rw, req, res)
return nil
}
removeConnectionHeaders(res.Header)
for _, h := range hopHeaders {
res.Header.Del(h)
}
// apply any response header operations
if h.Headers != nil && h.Headers.Response != nil {
if h.Headers.Response.Require == nil ||
h.Headers.Response.Require.Match(res.StatusCode, res.Header) {
repl := req.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
h.Headers.Response.ApplyTo(res.Header, repl)
}
}
copyHeader(rw.Header(), res.Header)
// The "Trailer" header isn't included in the Transport's response,
// at least for *http.Transport. Build it up from Trailer.
announcedTrailers := len(res.Trailer)
if announcedTrailers > 0 {
trailerKeys := make([]string, 0, len(res.Trailer))
for k := range res.Trailer {
trailerKeys = append(trailerKeys, k)
}
rw.Header().Add("Trailer", strings.Join(trailerKeys, ", "))
}
rw.WriteHeader(res.StatusCode)
// some apps need the response headers before starting to stream content with http2,
// so it's important to explicitly flush the headers to the client before streaming the data.
// (see https://github.com/caddyserver/caddy/issues/3556 for use case and nuances)
if h.isBidirectionalStream(req, res) {
if wf, ok := rw.(http.Flusher); ok {
wf.Flush()
}
}
err = h.copyResponse(rw, res.Body, h.flushInterval(req, res))
res.Body.Close() // close now, instead of defer, to populate res.Trailer
if err != nil {
// we're streaming the response and we've already written headers, so
// there's nothing an error handler can do to recover at this point;
// the standard lib's proxy panics at this point, but we'll just log
// the error and abort the stream here
h.logger.Error("aborting with incomplete response", zap.Error(err))
return nil
}
if len(res.Trailer) > 0 {
// Force chunking if we saw a response trailer.
// This prevents net/http from calculating the length for short
// bodies and adding a Content-Length.
if fl, ok := rw.(http.Flusher); ok {
fl.Flush()
}
}
if len(res.Trailer) == announcedTrailers {
copyHeader(rw.Header(), res.Trailer)
return nil
}
for k, vv := range res.Trailer {
k = http.TrailerPrefix + k
for _, v := range vv {
rw.Header().Add(k, v)
}
}
return nil
}
// tryAgain takes the time that the handler was initially invoked
// as well as any error currently obtained, and the request being
// tried, and returns true if another attempt should be made at
// proxying the request. If true is returned, it has already blocked
// long enough before the next retry (i.e. no more sleeping is
// needed). If false is returned, the handler should stop trying to
// proxy the request.
func (lb LoadBalancing) tryAgain(ctx caddy.Context, start time.Time, proxyErr error, req *http.Request) bool {
// if we've tried long enough, break
if time.Since(start) >= time.Duration(lb.TryDuration) {
return false
}
// if the error occurred while dialing (i.e. a connection
// could not even be established to the upstream), then it
// should be safe to retry, since without a connection, no
// HTTP request can be transmitted; but if the error is not
// specifically a dialer error, we need to be careful
if _, ok := proxyErr.(DialError); proxyErr != nil && !ok {
// if the error occurred after a connection was established,
// we have to assume the upstream received the request, and
// retries need to be carefully decided, because some requests
// are not idempotent
if lb.RetryMatch == nil && req.Method != "GET" {
// by default, don't retry requests if they aren't GET
return false
}
if !lb.RetryMatch.AnyMatch(req) {
return false
}
}
// otherwise, wait and try the next available host
select {
case <-time.After(time.Duration(lb.TryInterval)):
return true
case <-ctx.Done():
return false
}
}
// directRequest modifies only req.URL so that it points to the upstream
// in the given DialInfo. It must modify ONLY the request URL.
func (h Handler) directRequest(req *http.Request, di DialInfo) {
// we need a host, so set the upstream's host address
reqHost := di.Address
// if the port equates to the scheme, strip the port because
// it's weird to make a request like http://example.com:80/.
if (req.URL.Scheme == "http" && di.Port == "80") ||
(req.URL.Scheme == "https" && di.Port == "443") {
reqHost = di.Host
}
req.URL.Host = reqHost
}
func copyHeader(dst, src http.Header) {
for k, vv := range src {
for _, v := range vv {
dst.Add(k, v)
}
}
}
func cloneHeader(h http.Header) http.Header {
h2 := make(http.Header, len(h))
for k, vv := range h {
vv2 := make([]string, len(vv))
copy(vv2, vv)
h2[k] = vv2
}
return h2
}
func upgradeType(h http.Header) string {
if !httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade") {
return ""
}
return strings.ToLower(h.Get("Upgrade"))
}
func singleJoiningSlash(a, b string) string {
aslash := strings.HasSuffix(a, "/")
bslash := strings.HasPrefix(b, "/")
switch {
case aslash && bslash:
return a + b[1:]
case !aslash && !bslash:
return a + "/" + b
}
return a + b
}
// removeConnectionHeaders removes hop-by-hop headers listed in the "Connection" header of h.
// See RFC 7230, section 6.1
func removeConnectionHeaders(h http.Header) {
if c := h.Get("Connection"); c != "" {
for _, f := range strings.Split(c, ",") {
if f = strings.TrimSpace(f); f != "" {
h.Del(f)
}
}
}
}
// LoadBalancing has parameters related to load balancing.
type LoadBalancing struct {
// A selection policy is how to choose an available backend.
// The default policy is random selection.
SelectionPolicyRaw json.RawMessage `json:"selection_policy,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"`
// How long to try selecting available backends for each request
// if the next available host is down. By default, this retry is
// disabled. Clients will wait for up to this long while the load
// balancer tries to find an available upstream host.
TryDuration caddy.Duration `json:"try_duration,omitempty"`
// How long to wait between selecting the next host from the pool. Default
// is 250ms. Only relevant when a request to an upstream host fails. Be
// aware that setting this to 0 with a non-zero try_duration can cause the
// CPU to spin if all backends are down and latency is very low.
TryInterval caddy.Duration `json:"try_interval,omitempty"`
// A list of matcher sets that restricts with which requests retries are
// allowed. A request must match any of the given matcher sets in order
// to be retried if the connection to the upstream succeeded but the
// subsequent round-trip failed. If the connection to the upstream failed,
// a retry is always allowed. If unspecified, only GET requests will be
// allowed to be retried. Note that a retry is done with the next available
// host according to the load balancing policy.
RetryMatchRaw caddyhttp.RawMatcherSets `json:"retry_match,omitempty" caddy:"namespace=http.matchers"`
SelectionPolicy Selector `json:"-"`
RetryMatch caddyhttp.MatcherSets `json:"-"`
}
// Selector selects an available upstream from the pool.
type Selector interface {
Select(UpstreamPool, *http.Request) *Upstream
}
// Hop-by-hop headers. These are removed when sent to the backend.
// As of RFC 7230, hop-by-hop headers are required to appear in the
// Connection header field. These are the headers defined by the
// obsoleted RFC 2616 (section 13.5.1) and are used for backward
// compatibility.
var hopHeaders = []string{
"Alt-Svc",
"Connection",
"Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google
"Keep-Alive",
"Proxy-Authenticate",
"Proxy-Authorization",
"Te", // canonicalized version of "TE"
"Trailer", // not Trailers per URL above; https://www.rfc-editor.org/errata_search.php?eid=4522
"Transfer-Encoding",
"Upgrade",
}
// DialError is an error that specifically occurs
// in a call to Dial or DialContext.
type DialError struct{ error }
// TLSTransport is implemented by transports
// that are capable of using TLS.
type TLSTransport interface {
// TLSEnabled returns true if the transport
// has TLS enabled, false otherwise.
TLSEnabled() bool
// EnableTLS enables TLS within the transport
// if it is not already, using the provided
// value as a basis for the TLS config.
EnableTLS(base *TLSConfig) error
}
// roundtripSucceeded is an error type that is returned if the
// roundtrip succeeded, but an error occurred after-the-fact.
type roundtripSucceeded struct{ error }
var bufPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
// Interface guards
var (
_ caddy.Provisioner = (*Handler)(nil)
_ caddy.CleanerUpper = (*Handler)(nil)
_ caddyhttp.MiddlewareHandler = (*Handler)(nil)
)
| 1 | 15,855 | Returning here bypasses all the health check and load balancing features. | caddyserver-caddy | go |
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Tigera, Inc. All rights reserved.
+// Copyright (c) 2020-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. | 1 | // Copyright (c) 2020 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2020 All rights reserved.
package ut_test
import (
"os"
"testing"
)
func TestMain(m *testing.M) {
initMapsOnce()
cleanUpMaps()
rc := m.Run()
cleanUpMaps()
os.Exit(rc)
}
| 1 | 19,323 | Should revert this copyright change, when the file isn't changing in any other way. | projectcalico-felix | go |
@@ -90,8 +90,10 @@ func WriteFile(fromFile io.Reader, to string, mode os.FileMode) error {
return err
}
dir, file := path.Split(to)
- if err := os.MkdirAll(dir, DirPermissions); err != nil {
- return err
+ if dir != "" {
+ if err := os.MkdirAll(dir, DirPermissions); err != nil {
+ return err
+ }
}
tempFile, err := ioutil.TempFile(dir, file)
if err != nil { | 1 | // Package fs provides various filesystem helpers.
package fs
import (
"fmt"
"io"
"io/ioutil"
"os"
"path"
"syscall"
"gopkg.in/op/go-logging.v1"
)
var log = logging.MustGetLogger("fs")
// DirPermissions are the default permission bits we apply to directories.
const DirPermissions = os.ModeDir | 0775
// EnsureDir ensures that the directory of the given file has been created.
func EnsureDir(filename string) error {
dir := path.Dir(filename)
err := os.MkdirAll(dir, DirPermissions)
if err != nil && FileExists(dir) {
// It looks like this is a file and not a directory. Attempt to remove it; this can
// happen in some cases if you change a rule from outputting a file to a directory.
log.Warning("Attempting to remove file %s; a subdirectory is required", dir)
if err2 := os.Remove(dir); err2 == nil {
err = os.MkdirAll(dir, DirPermissions)
} else {
log.Error("%s", err2)
}
}
return err
}
// PathExists returns true if the given path exists, as a file or a directory.
func PathExists(filename string) bool {
_, err := os.Lstat(filename)
return err == nil
}
// FileExists returns true if the given path exists and is a file.
func FileExists(filename string) bool {
info, err := os.Lstat(filename)
return err == nil && !info.IsDir()
}
// IsSymlink returns true if the given path exists and is a symlink.
func IsSymlink(filename string) bool {
info, err := os.Lstat(filename)
return err == nil && (info.Mode()&os.ModeSymlink) != 0
}
// IsSameFile returns true if two filenames describe the same underlying file (i.e. inode)
func IsSameFile(a, b string) bool {
i1, err1 := getInode(a)
i2, err2 := getInode(b)
return err1 == nil && err2 == nil && i1 == i2
}
// getInode returns the inode of a file.
func getInode(filename string) (uint64, error) {
fi, err := os.Stat(filename)
if err != nil {
return 0, err
}
s, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return 0, fmt.Errorf("Not a syscall.Stat_t")
}
return s.Ino, nil
}
// CopyFile copies a file from 'from' to 'to', with an attempt to perform a copy & rename
// to avoid chaos if anything goes wrong partway.
func CopyFile(from string, to string, mode os.FileMode) error {
fromFile, err := os.Open(from)
if err != nil {
return err
}
defer fromFile.Close()
return WriteFile(fromFile, to, mode)
}
// WriteFile writes data from a reader to the file named 'to', with an attempt to perform
// a copy & rename to avoid chaos if anything goes wrong partway.
func WriteFile(fromFile io.Reader, to string, mode os.FileMode) error {
if err := os.RemoveAll(to); err != nil {
return err
}
dir, file := path.Split(to)
if err := os.MkdirAll(dir, DirPermissions); err != nil {
return err
}
tempFile, err := ioutil.TempFile(dir, file)
if err != nil {
return err
}
if _, err := io.Copy(tempFile, fromFile); err != nil {
return err
}
if err := tempFile.Close(); err != nil {
return err
}
// OK, now file is written; adjust permissions appropriately.
if mode == 0 {
mode = 0664
}
if err := os.Chmod(tempFile.Name(), mode); err != nil {
return err
}
// And move it to its final destination.
return os.Rename(tempFile.Name(), to)
}
// IsDirectory checks if a given path is a directory
func IsDirectory(path string) bool {
info, err := os.Stat(path)
return err == nil && info.IsDir()
}
// IsPackage returns true if the given directory name is a package (i.e. contains a build file)
func IsPackage(buildFileNames []string, name string) bool {
for _, buildFileName := range buildFileNames {
if FileExists(path.Join(name, buildFileName)) {
return true
}
}
return false
}
| 1 | 9,394 | Interesting that filepath.Dir("thing") returns "." whereas this returns "" | thought-machine-please | go |
@@ -364,11 +364,11 @@ class Series(_Frame):
... 's2': [.3, .6, .0, .1]})
>>> s1 = df.s1
>>> s2 = df.s2
- >>> s1.corr(s2, method='pearson')
- -0.8510644963469898
+ >>> s1.corr(s2, method='pearson') # doctest: +ELLIPSIS
+ -0.851064...
- >>> s1.corr(s2, method='spearman')
- -0.9486832980505125
+ >>> s1.corr(s2, method='spearman') # doctest: +ELLIPSIS
+ -0.948683...
Notes
----- | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark Column to behave similar to pandas Series.
"""
from decorator import decorator, dispatch_on
from functools import partial
import numpy as np
import pandas as pd
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.types import FloatType, DoubleType, LongType, StructType, TimestampType, \
to_arrow_type
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.dask.utils import derived_from
from databricks.koalas.frame import DataFrame
from databricks.koalas.generic import _Frame, max_display_count
from databricks.koalas.metadata import Metadata
from databricks.koalas.missing.series import _MissingPandasLikeSeries
from databricks.koalas.selection import SparkDataFrameLocator
@decorator
def _column_op(f, self, *args):
"""
A decorator that wraps APIs taking/returning Spark Column so that Koalas Series can be
supported too. If this decorator is used for the `f` function that takes Spark Column and
returns Spark Column, decorated `f` takes Koalas Series as well and returns Koalas
Series.
:param f: a function that takes Spark Column and returns Spark Column.
:param self: Koalas Series
:param args: arguments that the function `f` takes.
"""
assert all((not isinstance(arg, Series)) or (arg._kdf is self._kdf) for arg in args), \
"Cannot combine column argument because it comes from a different dataframe"
# It is possible for the function `f` takes other arguments than Spark Column.
# To cover this case, explicitly check if the argument is Koalas Series and
# extract Spark Column. For other arguments, they are used as are.
args = [arg._scol if isinstance(arg, Series) else arg for arg in args]
scol = f(self._scol, *args)
return Series(scol, self._kdf, self._index_info)
@decorator
def _numpy_column_op(f, self, *args):
# PySpark does not support NumPy type out of the box. For now, we convert NumPy types
# into some primitive types understandable in PySpark.
new_args = []
for arg in args:
# TODO: This is a quick hack to support NumPy type. We should revisit this.
if isinstance(self.spark_type, LongType) and isinstance(arg, np.timedelta64):
new_args.append(float(arg / np.timedelta64(1, 's')))
else:
new_args.append(arg)
return _column_op(f)(self, *new_args)
class Series(_Frame):
"""
Koala Series that corresponds to Pandas Series logically. This holds Spark Column
internally.
:ivar _scol: Spark Column instance
:ivar _kdf: Parent's Koalas DataFrame
:ivar _index_info: Each pair holds the index field name which exists in Spark fields,
and the index name.
"""
@derived_from(pd.Series)
@dispatch_on('data')
def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False):
s = pd.Series(data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath)
self._init_from_pandas(s)
@__init__.register(pd.Series)
def _init_from_pandas(self, s, *args):
"""
Creates Koalas Series from Pandas Series.
:param s: Pandas Series
"""
kdf = DataFrame(pd.DataFrame(s))
self._init_from_spark(kdf._sdf[kdf._metadata.column_fields[0]],
kdf, kdf._metadata.index_info)
@__init__.register(spark.Column)
def _init_from_spark(self, scol, kdf, index_info, *args):
"""
Creates Koalas Series from Spark Column.
:param scol: Spark Column
:param kdf: Koalas DataFrame that should have the `scol`.
:param index_info: index information of this Series.
"""
assert index_info is not None
self._scol = scol
self._kdf = kdf
self._index_info = index_info
# arithmetic operators
__neg__ = _column_op(spark.Column.__neg__)
__add__ = _column_op(spark.Column.__add__)
def __sub__(self, other):
# Note that timestamp subtraction casts arguments to integer. This is to mimic Pandas's
# behaviors. Pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.
if isinstance(other, Series) and isinstance(self.spark_type, TimestampType):
if not isinstance(other.spark_type, TimestampType):
raise TypeError('datetime subtraction can only be applied to datetime series.')
return self.astype('bigint') - other.astype('bigint')
else:
return _column_op(spark.Column.__sub__)(self, other)
__mul__ = _column_op(spark.Column.__mul__)
__div__ = _numpy_column_op(spark.Column.__div__)
__truediv__ = _numpy_column_op(spark.Column.__truediv__)
__mod__ = _column_op(spark.Column.__mod__)
__radd__ = _column_op(spark.Column.__radd__)
__rsub__ = _column_op(spark.Column.__rsub__)
__rmul__ = _column_op(spark.Column.__rmul__)
__rdiv__ = _numpy_column_op(spark.Column.__rdiv__)
__rtruediv__ = _numpy_column_op(spark.Column.__rtruediv__)
__rmod__ = _column_op(spark.Column.__rmod__)
__pow__ = _column_op(spark.Column.__pow__)
__rpow__ = _column_op(spark.Column.__rpow__)
# logistic operators
__eq__ = _column_op(spark.Column.__eq__)
__ne__ = _column_op(spark.Column.__ne__)
__lt__ = _column_op(spark.Column.__lt__)
__le__ = _column_op(spark.Column.__le__)
__ge__ = _column_op(spark.Column.__ge__)
__gt__ = _column_op(spark.Column.__gt__)
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
__and__ = _column_op(spark.Column.__and__)
__or__ = _column_op(spark.Column.__or__)
__invert__ = _column_op(spark.Column.__invert__)
__rand__ = _column_op(spark.Column.__rand__)
__ror__ = _column_op(spark.Column.__ror__)
@property
def dtype(self):
"""Return the dtype object of the underlying data.
Examples
--------
>>> s = ks.Series([1, 2, 3])
>>> s.dtype
dtype('int64')
>>> s = ks.Series(list('abc'))
>>> s.dtype
dtype('O')
>>> s = ks.Series(pd.date_range('20130101', periods=3))
>>> s.dtype
dtype('<M8[ns]')
"""
if type(self.spark_type) == TimestampType:
return np.dtype('datetime64[ns]')
else:
return np.dtype(to_arrow_type(self.spark_type).to_pandas_dtype())
@property
def spark_type(self):
""" Returns the data type as defined by Spark, as a Spark DataType object."""
return self.schema.fields[-1].dataType
def astype(self, dtype):
from databricks.koalas.typedef import as_spark_type
spark_type = as_spark_type(dtype)
if not spark_type:
raise ValueError("Type {} not understood".format(dtype))
return Series(self._scol.cast(spark_type), self._kdf, self._index_info)
def getField(self, name):
if not isinstance(self.schema, StructType):
raise AttributeError("Not a struct: {}".format(self.schema))
else:
fnames = self.schema.fieldNames()
if name not in fnames:
raise AttributeError(
"Field {} not found, possible values are {}".format(name, ", ".join(fnames)))
return Series(self._scol.getField(name), self._kdf, self._index_info)
# TODO: automate the process here
def alias(self, name):
return self.rename(name)
@property
def schema(self):
return self.to_dataframe()._sdf.schema
@property
def shape(self):
"""Return a tuple of the shape of the underlying data."""
return len(self),
@property
def name(self):
return self._metadata.column_fields[0]
@name.setter
def name(self, name):
self.rename(name, inplace=True)
@derived_from(pd.Series)
def rename(self, index=None, **kwargs):
if index is None:
return self
scol = self._scol.alias(index)
if kwargs.get('inplace', False):
self._scol = scol
return self
else:
return Series(scol, self._kdf, self._index_info)
@property
def _metadata(self):
return self.to_dataframe()._metadata
@property
def index(self):
"""The index (axis labels) Column of the Series.
Currently supported only when the DataFrame has a single index.
"""
if len(self._metadata.index_info) != 1:
raise KeyError('Currently supported only when the Column has a single index.')
return self._kdf.index
@derived_from(pd.Series)
def reset_index(self, level=None, drop=False, name=None, inplace=False):
if inplace and not drop:
raise TypeError('Cannot reset_index inplace on a Series to create a DataFrame')
if name is not None:
kdf = self.rename(name).to_dataframe()
else:
kdf = self.to_dataframe()
kdf = kdf.reset_index(level=level, drop=drop)
if drop:
s = _col(kdf)
if inplace:
self._kdf = kdf
self._scol = s._scol
self._index_info = s._index_info
else:
return s
else:
return kdf
@property
def loc(self):
return SparkDataFrameLocator(self)
def to_dataframe(self):
sdf = self._kdf._sdf.select([field for field, _ in self._index_info] + [self._scol])
metadata = Metadata(column_fields=[sdf.schema[-1].name], index_info=self._index_info)
return DataFrame(sdf, metadata)
def toPandas(self):
return _col(self.to_dataframe().toPandas())
@derived_from(pd.Series)
def isnull(self):
if isinstance(self.schema[self.name].dataType, (FloatType, DoubleType)):
return Series(self._scol.isNull() | F.isnan(self._scol), self._kdf, self._index_info)
else:
return Series(self._scol.isNull(), self._kdf, self._index_info)
isna = isnull
@derived_from(pd.Series)
def notnull(self):
return ~self.isnull()
notna = notnull
@derived_from(pd.Series)
def dropna(self, axis=0, inplace=False, **kwargs):
ks = _col(self.to_dataframe().dropna(axis=axis, inplace=False))
if inplace:
self._kdf = ks._kdf
self._scol = ks._scol
else:
return ks
@derived_from(DataFrame)
def head(self, n=5):
return _col(self.to_dataframe().head(n))
def unique(self):
# Pandas wants a series/array-like object
return _col(self.to_dataframe().unique())
@derived_from(pd.Series)
def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
if bins is not None:
raise NotImplementedError("value_counts currently does not support bins")
if dropna:
sdf_dropna = self._kdf._sdf.filter(self.notna()._scol)
else:
sdf_dropna = self._kdf._sdf
sdf = sdf_dropna.groupby(self._scol).count()
if sort:
if ascending:
sdf = sdf.orderBy(F.col('count'))
else:
sdf = sdf.orderBy(F.col('count').desc())
if normalize:
sum = sdf_dropna.count()
sdf = sdf.withColumn('count', F.col('count') / F.lit(sum))
index_name = 'index' if self.name != 'index' else 'level_0'
kdf = DataFrame(sdf)
kdf.columns = [index_name, self.name]
kdf._metadata = Metadata(column_fields=[self.name], index_info=[(index_name, None)])
return _col(kdf)
def corr(self, other, method='pearson'):
"""
Compute correlation with `other` Series, excluding missing values.
Parameters
----------
other : Series
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
correlation : float
Examples
--------
>>> df = ks.DataFrame({'s1': [.2, .0, .6, .2],
... 's2': [.3, .6, .0, .1]})
>>> s1 = df.s1
>>> s2 = df.s2
>>> s1.corr(s2, method='pearson')
-0.8510644963469898
>>> s1.corr(s2, method='spearman')
-0.9486832980505125
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
# This implementation is suboptimal because it computes more than necessary,
# but it should be a start
df = self._kdf.assign(corr_arg1=self, corr_arg2=other)[["corr_arg1", "corr_arg2"]]
c = df.corr(method=method)
return c.loc["corr_arg1", "corr_arg2"]
def count(self):
"""
Return number of non-NA/null observations in the Series.
Returns
-------
nobs : int
"""
return self._reduce_for_stat_function(F.count)
def _reduce_for_stat_function(self, sfun):
return _unpack_scalar(self._kdf._sdf.select(sfun(self._scol)))
def __len__(self):
return len(self.to_dataframe())
def __getitem__(self, key):
return Series(self._scol.__getitem__(key), self._kdf, self._index_info)
def __getattr__(self, item):
if item.startswith("__") or item.startswith("_pandas_") or item.startswith("_spark_"):
raise AttributeError(item)
if hasattr(_MissingPandasLikeSeries, item):
return partial(getattr(_MissingPandasLikeSeries, item), self)
return self.getField(item)
def __str__(self):
return self._pandas_orig_repr()
def __repr__(self):
return repr(self.head(max_display_count).toPandas())
def __dir__(self):
if not isinstance(self.schema, StructType):
fields = []
else:
fields = [f for f in self.schema.fieldNames() if ' ' not in f]
return super(Series, self).__dir__() + fields
def _pandas_orig_repr(self):
# TODO: figure out how to reuse the original one.
return 'Column<%s>' % self._scol._jc.toString().encode('utf8')
def _unpack_scalar(sdf):
"""
Takes a dataframe that is supposed to contain a single row with a single scalar value,
and returns this value.
"""
l = sdf.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row.asDict().values())
assert len(l2) == 1, (row, l2)
return l2[0]
def _col(df):
assert isinstance(df, (DataFrame, pd.DataFrame)), type(df)
return df[df.columns[0]]
| 1 | 8,568 | Nice, I did not know about that | databricks-koalas | py |
@@ -2521,4 +2521,19 @@ namespace hip_impl {
std::terminate();
#endif
}
+
+ std::mutex executables_cache_mutex;
+
+ void executables_cache(
+ std::string elf, hsa_isa_t isa, hsa_agent_t agent,
+ std::vector<hsa_executable_t>& exes, bool write) {
+ static std::unordered_map<std::string,
+ std::unordered_map<hsa_isa_t,
+ std::unordered_map<hsa_agent_t, std::vector<hsa_executable_t>>>> cache;
+ if (write) {
+ cache[elf][isa][agent] = exes;
+ } else {
+ exes = cache[elf][isa][agent];
+ }
+ }
} // Namespace hip_impl. | 1 | /*
Copyright (c) 2015 - present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
/**
* @file hip_hcc.cpp
*
* Contains definitions for functions that are large enough that we don't want to inline them
* everywhere. This file is compiled and linked into apps running HIP / HCC path.
*/
#include <assert.h>
#include <exception>
#include <stdint.h>
#include <iostream>
#include <sstream>
#include <list>
#include <sys/types.h>
#include <unistd.h>
#include <deque>
#include <vector>
#include <algorithm>
#include <atomic>
#include <mutex>
#include <hc.hpp>
#include <hc_am.hpp>
#include "hsa/hsa_ext_amd.h"
#include "hip/hip_runtime.h"
#include "hip_hcc_internal.h"
#include "trace_helper.h"
#include "env.h"
// TODO - create a stream-based debug interface as an additional option for tprintf
#define DB_PEER_CTX 0
//=================================================================================================
// Global variables:
//=================================================================================================
const int release = 1;
const char* API_COLOR = KGRN;
const char* API_COLOR_END = KNRM;
int HIP_LAUNCH_BLOCKING = 0;
std::string HIP_LAUNCH_BLOCKING_KERNELS;
std::vector<std::string> g_hipLaunchBlockingKernels;
int HIP_API_BLOCKING = 0;
int HIP_PRINT_ENV = 0;
int HIP_TRACE_API = 0;
std::string HIP_TRACE_API_COLOR("green");
int HIP_PROFILE_API = 0;
// TODO - DB_START/STOP need more testing.
std::string HIP_DB_START_API;
std::string HIP_DB_STOP_API;
int HIP_DB = 0;
int HIP_VISIBLE_DEVICES = 0;
int HIP_WAIT_MODE = 0;
int HIP_FORCE_P2P_HOST = 0;
int HIP_FAIL_SOC = 0;
int HIP_DENY_PEER_ACCESS = 0;
int HIP_HIDDEN_FREE_MEM = 256;
// Force async copies to actually use the synchronous copy interface.
int HIP_FORCE_SYNC_COPY = 0;
// TODO - set these to 0 and 1
int HIP_EVENT_SYS_RELEASE = 0;
int HIP_HOST_COHERENT = 1;
int HIP_SYNC_HOST_ALLOC = 1;
int HIP_INIT_ALLOC = -1;
int HIP_SYNC_STREAM_WAIT = 0;
int HIP_FORCE_NULL_STREAM = 0;
int HIP_DUMP_CODE_OBJECT = 0;
#if (__hcc_workweek__ >= 17300)
// Make sure we have required bug fix in HCC
// Perform resolution on the GPU:
// Chicken bit to sync on host to implement null stream.
// If 0, null stream synchronization is performed on the GPU
int HIP_SYNC_NULL_STREAM = 0;
#else
int HIP_SYNC_NULL_STREAM = 1;
#endif
// HIP needs to change some behavior based on HCC_OPT_FLUSH :
#if (__hcc_workweek__ >= 17296)
int HCC_OPT_FLUSH = 1;
#else
#warning "HIP disabled HCC_OPT_FLUSH since HCC version does not yet support"
int HCC_OPT_FLUSH = 0;
#endif
// Array of pointers to devices.
ihipDevice_t** g_deviceArray;
bool g_visible_device = false;
unsigned g_deviceCnt;
std::vector<int> g_hip_visible_devices;
hsa_agent_t g_cpu_agent;
hsa_agent_t* g_allAgents; // CPU agents + all the visible GPU agents.
unsigned g_numLogicalThreads;
std::atomic<int> g_lastShortTid(1);
// Indexed by short-tid:
//
std::vector<ProfTrigger> g_dbStartTriggers;
std::vector<ProfTrigger> g_dbStopTriggers;
//=================================================================================================
// Thread-local storage:
//=================================================================================================
// This is the implicit context used by all HIP commands.
// It can be set by hipSetDevice or by the CTX manipulation commands:
thread_local hipError_t tls_lastHipError = hipSuccess;
thread_local TidInfo tls_tidInfo;
//=================================================================================================
// Top-level "free" functions:
//=================================================================================================
uint64_t recordApiTrace(std::string* fullStr, const std::string& apiStr) {
auto apiSeqNum = tls_tidInfo.apiSeqNum();
auto tid = tls_tidInfo.tid();
if ((tid < g_dbStartTriggers.size()) && (apiSeqNum >= g_dbStartTriggers[tid].nextTrigger())) {
printf("info: resume profiling at %lu\n", apiSeqNum);
RESUME_PROFILING;
g_dbStartTriggers.pop_back();
};
if ((tid < g_dbStopTriggers.size()) && (apiSeqNum >= g_dbStopTriggers[tid].nextTrigger())) {
printf("info: stop profiling at %lu\n", apiSeqNum);
STOP_PROFILING;
g_dbStopTriggers.pop_back();
};
fullStr->reserve(16 + apiStr.length());
*fullStr = std::to_string(tid) + ".";
*fullStr += std::to_string(apiSeqNum);
*fullStr += " ";
*fullStr += apiStr;
uint64_t apiStartTick = getTicks();
if (COMPILE_HIP_DB && HIP_TRACE_API) {
fprintf(stderr, "%s<<hip-api pid:%d tid:%s @%lu%s\n", API_COLOR, tls_tidInfo.pid(), fullStr->c_str(), apiStartTick,
API_COLOR_END);
}
return apiStartTick;
}
static inline bool ihipIsValidDevice(unsigned deviceIndex) {
// deviceIndex is unsigned so always > 0
return (deviceIndex < g_deviceCnt);
}
ihipDevice_t* ihipGetDevice(int deviceIndex) {
if (ihipIsValidDevice(deviceIndex)) {
return g_deviceArray[deviceIndex];
} else {
return NULL;
}
}
ihipCtx_t* ihipGetPrimaryCtx(unsigned deviceIndex) {
ihipDevice_t* device = ihipGetDevice(deviceIndex);
return device ? device->getPrimaryCtx() : NULL;
};
static thread_local ihipCtx_t* tls_defaultCtx = nullptr;
void ihipSetTlsDefaultCtx(ihipCtx_t* ctx) { tls_defaultCtx = ctx; }
//---
// TODO - review the context creation strategy here. Really should be:
// - first "non-device" runtime call creates the context for this thread. Allowed to call
// setDevice first.
// - hipDeviceReset destroys the primary context for device?
// - Then context is created again for next usage.
ihipCtx_t* ihipGetTlsDefaultCtx() {
// Per-thread initialization of the TLS:
if ((tls_defaultCtx == nullptr) && (g_deviceCnt > 0)) {
ihipSetTlsDefaultCtx(ihipGetPrimaryCtx(0));
}
return tls_defaultCtx;
}
hipError_t ihipSynchronize(void) {
ihipGetTlsDefaultCtx()->locked_waitAllStreams(); // ignores non-blocking streams, this waits
// for all activity to finish.
return (hipSuccess);
}
//=================================================================================================
// ihipStream_t:
//=================================================================================================
TidInfo::TidInfo() : _apiSeqNum(0) {
_shortTid = g_lastShortTid.fetch_add(1);
_pid = getpid();
if (COMPILE_HIP_DB && HIP_TRACE_API) {
std::stringstream tid_ss;
std::stringstream tid_ss_num;
tid_ss_num << std::this_thread::get_id();
tid_ss << std::hex << std::stoull(tid_ss_num.str());
tprintf(DB_API, "HIP initialized short_tid#%d (maps to full_tid: 0x%s)\n", _shortTid,
tid_ss.str().c_str());
};
}
//=================================================================================================
// ihipStream_t:
//=================================================================================================
//---
ihipStream_t::ihipStream_t(ihipCtx_t* ctx, hc::accelerator_view av, unsigned int flags)
: _id(0), // will be set by add function.
_flags(flags),
_ctx(ctx),
_criticalData(this, av) {
unsigned schedBits = ctx->_ctxFlags & hipDeviceScheduleMask;
switch (schedBits) {
case hipDeviceScheduleAuto:
_scheduleMode = Auto;
break;
case hipDeviceScheduleSpin:
_scheduleMode = Spin;
break;
case hipDeviceScheduleYield:
_scheduleMode = Yield;
break;
case hipDeviceScheduleBlockingSync:
_scheduleMode = Yield;
break;
default:
_scheduleMode = Auto;
};
};
//---
ihipStream_t::~ihipStream_t() {}
hc::hcWaitMode ihipStream_t::waitMode() const {
hc::hcWaitMode waitMode = hc::hcWaitModeActive;
if (_scheduleMode == Auto) {
if (g_deviceCnt > g_numLogicalThreads) {
waitMode = hc::hcWaitModeActive;
} else {
waitMode = hc::hcWaitModeBlocked;
}
} else if (_scheduleMode == Spin) {
waitMode = hc::hcWaitModeActive;
} else if (_scheduleMode == Yield) {
waitMode = hc::hcWaitModeBlocked;
} else {
assert(0); // bad wait mode.
}
if (HIP_WAIT_MODE == 1) {
waitMode = hc::hcWaitModeBlocked;
} else if (HIP_WAIT_MODE == 2) {
waitMode = hc::hcWaitModeActive;
}
return waitMode;
}
// Wait for all kernel and data copy commands in this stream to complete.
// This signature should be used in routines that already have locked the stream mutex
void ihipStream_t::wait(LockedAccessor_StreamCrit_t& crit) {
tprintf(DB_SYNC, "%s wait for queue-empty..\n", ToString(this).c_str());
crit->_av.wait(waitMode());
crit->_kernelCnt = 0;
}
//---
// Wait for all kernel and data copy commands in this stream to complete.
void ihipStream_t::locked_wait() {
LockedAccessor_StreamCrit_t crit(_criticalData);
wait(crit);
};
// Causes current stream to wait for specified event to complete:
// Note this does not provide any kind of host serialization.
void ihipStream_t::locked_streamWaitEvent(ihipEventData_t& ecd) {
LockedAccessor_StreamCrit_t crit(_criticalData);
crit->_av.create_blocking_marker(ecd.marker(), hc::accelerator_scope);
}
// Causes current stream to wait for specified event to complete:
// Note this does not provide any kind of host serialization.
bool ihipStream_t::locked_eventIsReady(hipEvent_t event) {
// Event query that returns "Complete" may cause HCC to manipulate
// internal queue state so lock the stream's queue here.
LockedAccessor_StreamCrit_t scrit(_criticalData);
LockedAccessor_EventCrit_t ecrit(event->criticalData());
return (ecrit->_eventData.marker().is_ready());
}
// Waiting on event can cause HCC to reclaim stream resources - so need to lock the stream.
void ihipStream_t::locked_eventWaitComplete(hc::completion_future& marker,
hc::hcWaitMode waitMode) {
LockedAccessor_StreamCrit_t crit(_criticalData);
marker.wait(waitMode);
}
// Create a marker in this stream.
// Save state in the event so it can track the status of the event.
hc::completion_future ihipStream_t::locked_recordEvent(hipEvent_t event) {
// Lock the stream to prevent simultaneous access
LockedAccessor_StreamCrit_t crit(_criticalData);
auto scopeFlag = hc::accelerator_scope;
// The env var HIP_EVENT_SYS_RELEASE sets the default,
// The explicit flags override the env var (if specified)
if (event->_flags & hipEventReleaseToSystem) {
scopeFlag = hc::system_scope;
} else if (event->_flags & hipEventReleaseToDevice) {
scopeFlag = hc::accelerator_scope;
} else {
scopeFlag = HIP_EVENT_SYS_RELEASE ? hc::system_scope : hc::accelerator_scope;
}
return crit->_av.create_marker(scopeFlag);
};
//=============================================================================
//-------------------------------------------------------------------------------------------------
//---
const ihipDevice_t* ihipStream_t::getDevice() const { return _ctx->getDevice(); };
ihipCtx_t* ihipStream_t::getCtx() const { return _ctx; };
//--
// Lock the stream to prevent other threads from intervening.
LockedAccessor_StreamCrit_t ihipStream_t::lockopen_preKernelCommand() {
LockedAccessor_StreamCrit_t crit(_criticalData, false /*no unlock at destruction*/);
return crit;
}
//---
// Must be called after kernel finishes, this releases the lock on the stream so other commands can
// submit.
void ihipStream_t::lockclose_postKernelCommand(const char* kernelName, hc::accelerator_view* av) {
bool blockThisKernel = false;
if (!g_hipLaunchBlockingKernels.empty()) {
std::string kernelNameString(kernelName);
for (auto o = g_hipLaunchBlockingKernels.begin(); o != g_hipLaunchBlockingKernels.end();
o++) {
if ((*o == kernelNameString)) {
// printf ("force blocking for kernel %s\n", o->c_str());
blockThisKernel = true;
}
}
}
if (HIP_LAUNCH_BLOCKING || blockThisKernel) {
// TODO - fix this so it goes through proper stream::wait() call.// direct wait OK since we
// know the stream is locked.
av->wait(hc::hcWaitModeActive);
tprintf(DB_SYNC, "%s LAUNCH_BLOCKING for kernel '%s' completion\n", ToString(this).c_str(),
kernelName);
}
_criticalData.unlock(); // paired with lock from lockopen_preKernelCommand.
};
//=============================================================================
// Recompute the peercnt and the packed _peerAgents whenever a peer is added or deleted.
// The packed _peerAgents can efficiently be used on each memory allocation.
template <>
void ihipCtxCriticalBase_t<CtxMutex>::recomputePeerAgents() {
_peerCnt = 0;
std::for_each(_peers.begin(), _peers.end(), [this](ihipCtx_t* ctx) {
_peerAgents[_peerCnt++] = ctx->getDevice()->_hsaAgent;
});
}
template <>
bool ihipCtxCriticalBase_t<CtxMutex>::isPeerWatcher(const ihipCtx_t* peer) {
auto match = std::find_if(_peers.begin(), _peers.end(), [=](const ihipCtx_t* d) {
return d->getDeviceNum() == peer->getDeviceNum();
});
return (match != std::end(_peers));
}
template <>
bool ihipCtxCriticalBase_t<CtxMutex>::addPeerWatcher(const ihipCtx_t* thisCtx,
ihipCtx_t* peerWatcher) {
auto match = std::find(_peers.begin(), _peers.end(), peerWatcher);
if (match == std::end(_peers)) {
// Not already a peer, let's update the list:
tprintf(DB_COPY, "addPeerWatcher. Allocations on %s now visible to peerWatcher %s.\n",
thisCtx->toString().c_str(), peerWatcher->toString().c_str());
_peers.push_back(peerWatcher);
recomputePeerAgents();
return true;
}
// If we get here - peer was already on list, silently ignore.
return false;
}
template <>
bool ihipCtxCriticalBase_t<CtxMutex>::removePeerWatcher(const ihipCtx_t* thisCtx,
ihipCtx_t* peerWatcher) {
auto match = std::find(_peers.begin(), _peers.end(), peerWatcher);
if (match != std::end(_peers)) {
// Found a valid peer, let's remove it.
tprintf(
DB_COPY,
"removePeerWatcher. Allocations on %s no longer visible to former peerWatcher %s.\n",
thisCtx->toString().c_str(), peerWatcher->toString().c_str());
_peers.remove(peerWatcher);
recomputePeerAgents();
return true;
} else {
return false;
}
}
template <>
void ihipCtxCriticalBase_t<CtxMutex>::resetPeerWatchers(ihipCtx_t* thisCtx) {
tprintf(DB_COPY, "resetPeerWatchers for context=%s\n", thisCtx->toString().c_str());
_peers.clear();
_peerCnt = 0;
addPeerWatcher(thisCtx, thisCtx); // peer-list always contains self agent.
}
template <>
void ihipCtxCriticalBase_t<CtxMutex>::printPeerWatchers(FILE* f) const {
for (auto iter = _peers.begin(); iter != _peers.end(); iter++) {
fprintf(f, "%s ", (*iter)->toString().c_str());
};
}
template <>
void ihipCtxCriticalBase_t<CtxMutex>::addStream(ihipStream_t* stream) {
stream->_id = _streams.size();
_streams.push_back(stream);
tprintf(DB_SYNC, " addStream: %s\n", ToString(stream).c_str());
}
template <>
void ihipDeviceCriticalBase_t<DeviceMutex>::addContext(ihipCtx_t* ctx) {
_ctxs.push_back(ctx);
tprintf(DB_SYNC, " addContext: %s\n", ToString(ctx).c_str());
}
//=============================================================================
//=================================================================================================
// ihipDevice_t
//=================================================================================================
ihipDevice_t::ihipDevice_t(unsigned deviceId, unsigned deviceCnt, hc::accelerator& acc)
: _deviceId(deviceId), _acc(acc), _state(0), _criticalData(this) {
hsa_agent_t* agent = static_cast<hsa_agent_t*>(acc.get_hsa_agent());
if (agent) {
int err = hsa_agent_get_info(
*agent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_COMPUTE_UNIT_COUNT, &_computeUnits);
if (err != HSA_STATUS_SUCCESS) {
_computeUnits = 1;
}
_hsaAgent = *agent;
} else {
_hsaAgent.handle = static_cast<uint64_t>(-1);
}
initProperties(&_props);
_primaryCtx = new ihipCtx_t(this, deviceCnt, hipDeviceMapHost);
}
ihipDevice_t::~ihipDevice_t() {
delete _primaryCtx;
_primaryCtx = NULL;
}
void ihipDevice_t::locked_removeContext(ihipCtx_t* c) {
LockedAccessor_DeviceCrit_t crit(_criticalData);
crit->ctxs().remove(c);
tprintf(DB_SYNC, " locked_removeContext: %s\n", ToString(c).c_str());
}
void ihipDevice_t::locked_reset() {
// Obtain mutex access to the device critical data, release by destructor
LockedAccessor_DeviceCrit_t crit(_criticalData);
//---
// Wait for pending activity to complete? TODO - check if this is required behavior:
tprintf(DB_SYNC, "locked_reset waiting for activity to complete.\n");
// Reset and remove streams:
// Delete all created streams including the default one.
for (auto ctxI = crit->const_ctxs().begin(); ctxI != crit->const_ctxs().end(); ctxI++) {
ihipCtx_t* ctx = *ctxI;
(*ctxI)->locked_reset();
tprintf(DB_SYNC, " ctx cleanup %s\n", ToString(ctx).c_str());
delete ctx;
}
// Clear the list.
crit->ctxs().clear();
// reset _primaryCtx
_primaryCtx->locked_reset();
tprintf(DB_SYNC, " _primaryCtx cleanup %s\n", ToString(_primaryCtx).c_str());
// Reset and release all memory stored in the tracker:
// Reset will remove peer mapping so don't need to do this explicitly.
// FIXME - This is clearly a non-const action! Is this a context reset or a device reset -
// maybe should reference count?
_state = 0;
am_memtracker_reset(_acc);
// FIXME - Calling am_memtracker_reset is really bad since it destroyed all buffers allocated by
// the HCC runtime as well such as the printf buffer. Re-initialze the printf buffer as a
// workaround for now.
#ifdef HC_FEATURE_PRINTF
Kalmar::getContext()->initPrintfBuffer();
#endif
};
#define ErrorCheck(x) error_check(x, __LINE__, __FILE__)
void error_check(hsa_status_t hsa_error_code, int line_num, std::string str) {
if ((hsa_error_code != HSA_STATUS_SUCCESS) && (hsa_error_code != HSA_STATUS_INFO_BREAK)) {
printf("HSA reported error!\n In file: %s\nAt line: %d\n", str.c_str(), line_num);
}
}
//---
// Helper for initProperties
// Determines if the given agent is of type HSA_DEVICE_TYPE_GPU and counts it.
static hsa_status_t countGpuAgents(hsa_agent_t agent, void* data) {
if (data == NULL) {
return HSA_STATUS_ERROR_INVALID_ARGUMENT;
}
hsa_device_type_t device_type;
hsa_status_t status = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type);
if (status != HSA_STATUS_SUCCESS) {
return status;
}
if (device_type == HSA_DEVICE_TYPE_GPU) {
(*static_cast<int*>(data))++;
}
return HSA_STATUS_SUCCESS;
}
hsa_status_t FindGpuDevice(hsa_agent_t agent, void* data) {
if (data == NULL) {
return HSA_STATUS_ERROR_INVALID_ARGUMENT;
}
hsa_device_type_t hsa_device_type;
hsa_status_t hsa_error_code =
hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &hsa_device_type);
if (hsa_error_code != HSA_STATUS_SUCCESS) {
return hsa_error_code;
}
if (hsa_device_type == HSA_DEVICE_TYPE_GPU) {
*((hsa_agent_t*)data) = agent;
return HSA_STATUS_INFO_BREAK;
}
return HSA_STATUS_SUCCESS;
}
hsa_status_t GetDevicePool(hsa_amd_memory_pool_t pool, void* data) {
if (NULL == data) {
return HSA_STATUS_ERROR_INVALID_ARGUMENT;
}
hsa_status_t err;
hsa_amd_segment_t segment;
uint32_t flag;
err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SEGMENT, &segment);
ErrorCheck(err);
if (HSA_AMD_SEGMENT_GLOBAL != segment) return HSA_STATUS_SUCCESS;
err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &flag);
ErrorCheck(err);
*((hsa_amd_memory_pool_t*)data) = pool;
return HSA_STATUS_SUCCESS;
}
int checkAccess(hsa_agent_t agent, hsa_amd_memory_pool_t pool) {
hsa_status_t err;
hsa_amd_memory_pool_access_t access;
err = hsa_amd_agent_memory_pool_get_info(agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS,
&access);
ErrorCheck(err);
return access;
}
hsa_status_t get_pool_info(hsa_amd_memory_pool_t pool, void* data) {
hsa_status_t err;
hipDeviceProp_t* p_prop = reinterpret_cast<hipDeviceProp_t*>(data);
uint32_t region_segment;
// Get pool segment
err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SEGMENT, ®ion_segment);
ErrorCheck(err);
switch (region_segment) {
case HSA_REGION_SEGMENT_READONLY:
err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SIZE,
&(p_prop->totalConstMem));
break;
case HSA_REGION_SEGMENT_GROUP:
err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SIZE,
&(p_prop->sharedMemPerBlock));
break;
default:
break;
}
return err;
}
// Determines if the given agent is of type HSA_DEVICE_TYPE_GPU and counts it.
static hsa_status_t findCpuAgent(hsa_agent_t agent, void* data) {
hsa_device_type_t device_type;
hsa_status_t status = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type);
if (status != HSA_STATUS_SUCCESS) {
return status;
}
if (device_type == HSA_DEVICE_TYPE_CPU) {
(*static_cast<hsa_agent_t*>(data)) = agent;
return HSA_STATUS_INFO_BREAK;
}
return HSA_STATUS_SUCCESS;
}
#define DeviceErrorCheck(x) \
if (x != HSA_STATUS_SUCCESS) { \
return hipErrorInvalidDevice; \
}
//---
// Initialize properties for the device.
// Call this once when the ihipDevice_t is created:
hipError_t ihipDevice_t::initProperties(hipDeviceProp_t* prop) {
hipError_t e = hipSuccess;
hsa_status_t err;
memset(prop, 0, sizeof(hipDeviceProp_t));
if (_hsaAgent.handle == -1) {
return hipErrorInvalidDevice;
}
// Iterates over the agents to determine Multiple GPU devices
// using the countGpuAgents callback.
//! @bug : on HCC, isMultiGpuBoard returns True if system contains multiple GPUS (rather than if
//! GPU is on a multi-ASIC board)
int gpuAgentsCount = 0;
err = hsa_iterate_agents(countGpuAgents, &gpuAgentsCount);
if (err == HSA_STATUS_INFO_BREAK) {
err = HSA_STATUS_SUCCESS;
}
DeviceErrorCheck(err);
prop->isMultiGpuBoard = 0 ? gpuAgentsCount < 2 : 1;
// Get agent name
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_PRODUCT_NAME,
&(prop->name));
DeviceErrorCheck(err);
char archName[256];
err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_NAME, &archName);
prop->gcnArch = atoi(archName + 3);
DeviceErrorCheck(err);
// Get agent node
uint32_t node;
err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_NODE, &node);
DeviceErrorCheck(err);
// Get wavefront size
err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_WAVEFRONT_SIZE, &prop->warpSize);
DeviceErrorCheck(err);
// Get max total number of work-items in a workgroup
err =
hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_WORKGROUP_MAX_SIZE, &prop->maxThreadsPerBlock);
DeviceErrorCheck(err);
// Get max number of work-items of each dimension of a work-group
uint16_t work_group_max_dim[3];
err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_WORKGROUP_MAX_DIM, work_group_max_dim);
DeviceErrorCheck(err);
for (int i = 0; i < 3; i++) {
prop->maxThreadsDim[i] = work_group_max_dim[i];
}
hsa_dim3_t grid_max_dim;
err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_GRID_MAX_DIM, &grid_max_dim);
DeviceErrorCheck(err);
prop->maxGridSize[0] = (int)((grid_max_dim.x == UINT32_MAX) ? (INT32_MAX) : grid_max_dim.x);
prop->maxGridSize[1] = (int)((grid_max_dim.y == UINT32_MAX) ? (INT32_MAX) : grid_max_dim.y);
prop->maxGridSize[2] = (int)((grid_max_dim.z == UINT32_MAX) ? (INT32_MAX) : grid_max_dim.z);
// Get Max clock frequency
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_MAX_CLOCK_FREQUENCY,
&prop->clockRate);
prop->clockRate *= 1000.0; // convert Mhz to Khz.
DeviceErrorCheck(err);
uint64_t counterHz;
err = hsa_system_get_info(HSA_SYSTEM_INFO_TIMESTAMP_FREQUENCY, &counterHz);
DeviceErrorCheck(err);
prop->clockInstructionRate = counterHz / 1000;
// Get Agent BDFID (bus/device/function ID)
uint16_t bdf_id = 1;
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_BDFID, &bdf_id);
DeviceErrorCheck(err);
// BDFID is 16bit uint: [8bit - BusID | 5bit - Device ID | 3bit - Function/DomainID]
prop->pciDomainID = bdf_id & 0x7;
prop->pciDeviceID = (bdf_id >> 3) & 0x1F;
prop->pciBusID = (bdf_id >> 8) & 0xFF;
// Masquerade as a 3.0-level device. This will change as more HW functions are properly
// supported. Application code should use the arch.has* to do detailed feature detection.
prop->major = 3;
prop->minor = 0;
// Get number of Compute Unit
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_COMPUTE_UNIT_COUNT,
&(prop->multiProcessorCount));
DeviceErrorCheck(err);
// TODO-hsart - this appears to return 0?
uint32_t cache_size[4];
err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_CACHE_SIZE, cache_size);
DeviceErrorCheck(err);
prop->l2CacheSize = cache_size[1];
/* Computemode for HSA Devices is always : cudaComputeModeDefault */
prop->computeMode = 0;
_isLargeBar = _acc.has_cpu_accessible_am();
// Get Max Threads Per Multiprocessor
uint32_t max_waves_per_cu;
err = hsa_agent_get_info(_hsaAgent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_MAX_WAVES_PER_CU,
&max_waves_per_cu);
DeviceErrorCheck(err);
prop->maxThreadsPerMultiProcessor = prop->warpSize * max_waves_per_cu;
// Get memory properties
err = hsa_amd_agent_iterate_memory_pools(_hsaAgent, get_pool_info, prop);
if (err == HSA_STATUS_INFO_BREAK) {
err = HSA_STATUS_SUCCESS;
}
DeviceErrorCheck(err);
// Get the size of the pool we are using for Accelerator Memory allocations:
hsa_region_t* am_region = static_cast<hsa_region_t*>(_acc.get_hsa_am_region());
err = hsa_region_get_info(*am_region, HSA_REGION_INFO_SIZE, &prop->totalGlobalMem);
DeviceErrorCheck(err);
// maxSharedMemoryPerMultiProcessor should be as the same as group memory size.
// Group memory will not be paged out, so, the physical memory size is the total shared memory
// size, and also equal to the group pool size.
prop->maxSharedMemoryPerMultiProcessor = prop->totalGlobalMem;
// Get Max memory clock frequency
err =
hsa_region_get_info(*am_region, (hsa_region_info_t)HSA_AMD_REGION_INFO_MAX_CLOCK_FREQUENCY,
&prop->memoryClockRate);
DeviceErrorCheck(err);
prop->memoryClockRate *= 1000.0; // convert Mhz to Khz.
// Get global memory bus width in bits
err = hsa_region_get_info(*am_region, (hsa_region_info_t)HSA_AMD_REGION_INFO_BUS_WIDTH,
&prop->memoryBusWidth);
DeviceErrorCheck(err);
// Set feature flags - these are all mandatory for HIP on HCC path:
// Some features are under-development and future revs may support flags that are currently 0.
// Reporting of these flags should be synchronized with the HIP_ARCH* compile-time defines in
// hip_runtime.h
prop->arch.hasGlobalInt32Atomics = 1;
prop->arch.hasGlobalFloatAtomicExch = 1;
prop->arch.hasSharedInt32Atomics = 1;
prop->arch.hasSharedFloatAtomicExch = 1;
prop->arch.hasFloatAtomicAdd = 1; // supported with CAS loop, but is supported
prop->arch.hasGlobalInt64Atomics = 1;
prop->arch.hasSharedInt64Atomics = 1;
prop->arch.hasDoubles = 1;
prop->arch.hasWarpVote = 1;
prop->arch.hasWarpBallot = 1;
prop->arch.hasWarpShuffle = 1;
prop->arch.hasFunnelShift = 0; // TODO-hcc
prop->arch.hasThreadFenceSystem = 1;
prop->arch.hasSyncThreadsExt = 0; // TODO-hcc
prop->arch.hasSurfaceFuncs = 0; // TODO-hcc
prop->arch.has3dGrid = 1;
prop->arch.hasDynamicParallelism = 0;
prop->concurrentKernels =
1; // All ROCm hardware supports executing multiple kernels concurrently
prop->canMapHostMemory = 1; // All ROCm devices can map host memory
prop->totalConstMem = 16384;
#if 0
// TODO - code broken below since it always returns 1.
// Are the flags part of the context or part of the device?
if ( _device_flags | hipDeviceMapHost) {
prop->canMapHostMemory = 1;
} else {
prop->canMapHostMemory = 0;
}
#endif
// Get profile
hsa_profile_t agent_profile;
err = hsa_agent_get_info(_hsaAgent, HSA_AGENT_INFO_PROFILE, &agent_profile);
DeviceErrorCheck(err);
if(agent_profile == HSA_PROFILE_FULL) {
prop->integrated = 1;
}
return e;
}
//=================================================================================================
// ihipCtx_t
//=================================================================================================
ihipCtx_t::ihipCtx_t(ihipDevice_t* device, unsigned deviceCnt, unsigned flags)
: _ctxFlags(flags), _device(device), _criticalData(this, deviceCnt) {
// locked_reset();
LockedAccessor_CtxCrit_t crit(_criticalData);
_defaultStream = new ihipStream_t(this, getDevice()->_acc.get_default_view(), hipStreamDefault);
crit->addStream(_defaultStream);
// Reset peer list to just me:
crit->resetPeerWatchers(this);
tprintf(DB_SYNC, "created ctx with defaultStream=%p (%s)\n", _defaultStream,
ToString(_defaultStream).c_str());
};
ihipCtx_t::~ihipCtx_t() {
if (_defaultStream) {
delete _defaultStream;
_defaultStream = NULL;
}
}
// Reset the device - this is called from hipDeviceReset.
// Device may be reset multiple times, and may be reset after init.
void ihipCtx_t::locked_reset() {
// Obtain mutex access to the device critical data, release by destructor
LockedAccessor_CtxCrit_t crit(_criticalData);
//---
// Wait for pending activity to complete? TODO - check if this is required behavior:
tprintf(DB_SYNC, "locked_reset waiting for activity to complete.\n");
// Reset and remove streams:
// Delete all created streams including the default one.
for (auto streamI = crit->const_streams().begin(); streamI != crit->const_streams().end();
streamI++) {
ihipStream_t* stream = *streamI;
(*streamI)->locked_wait();
tprintf(DB_SYNC, " delete %s\n", ToString(stream).c_str());
delete stream;
}
// Clear the list.
crit->streams().clear();
// Create a fresh default stream and add it:
_defaultStream = new ihipStream_t(this, getDevice()->_acc.get_default_view(), hipStreamDefault);
crit->addStream(_defaultStream);
#if 0
// Reset peer list to just me:
crit->resetPeerWatchers(this);
// Reset and release all memory stored in the tracker:
// Reset will remove peer mapping so don't need to do this explicitly.
// FIXME - This is clearly a non-const action! Is this a context reset or a device reset - maybe should reference count?
ihipDevice_t *device = getWriteableDevice();
device->_state = 0;
am_memtracker_reset(device->_acc);
#endif
};
//---
std::string ihipCtx_t::toString() const {
std::ostringstream ss;
ss << this;
return ss.str();
};
//----
//=================================================================================================
// Utility functions, these are not part of the public HIP API
//=================================================================================================
//=================================================================================================
// This called for submissions that are sent to the null/default stream. This routine ensures
// that this new command waits for activity in the other streams to complete before proceeding.
//
// HIP_SYNC_NULL_STREAM=0 does all dependency resolutiokn on the GPU
// HIP_SYNC_NULL_STREAM=1 s legacy non-optimal mode which conservatively waits on host.
//
// If waitOnSelf is set, this additionally waits for the default stream to empty.
// In new HIP_SYNC_NULL_STREAM=0 mode, this enqueues a marker which causes the default stream to
// wait for other activity, but doesn't actually block the host. If host blocking is desired, the
// caller should set syncHost.
//
// syncToHost causes host to wait for the stream to finish.
// Note HIP_SYNC_NULL_STREAM=1 path always sync to Host.
void ihipCtx_t::locked_syncDefaultStream(bool waitOnSelf, bool syncHost) {
LockedAccessor_CtxCrit_t crit(_criticalData);
tprintf(DB_SYNC, "syncDefaultStream \n");
// Vector of ops sent to each stream that will complete before ops sent to null stream:
std::vector<hc::completion_future> depOps;
for (auto streamI = crit->const_streams().begin(); streamI != crit->const_streams().end();
streamI++) {
ihipStream_t* stream = *streamI;
// Don't wait for streams that have "opted-out" of syncing with NULL stream.
// And - don't wait for the NULL stream, unless waitOnSelf specified.
bool waitThisStream = (!(stream->_flags & hipStreamNonBlocking)) &&
(waitOnSelf || (stream != _defaultStream));
if (HIP_SYNC_NULL_STREAM) {
if (waitThisStream) {
stream->locked_wait();
}
} else {
if (waitThisStream) {
LockedAccessor_StreamCrit_t streamCrit(stream->_criticalData);
// The last marker will provide appropriate visibility:
if (!streamCrit->_av.get_is_empty()) {
depOps.push_back(streamCrit->_av.create_marker(hc::accelerator_scope));
tprintf(DB_SYNC, " push marker to wait for stream=%s\n",
ToString(stream).c_str());
} else {
tprintf(DB_SYNC, " skipped stream=%s since it is empty\n",
ToString(stream).c_str());
}
}
}
}
// Enqueue a barrier to wait on all the barriers we sent above:
if (!HIP_SYNC_NULL_STREAM && !depOps.empty()) {
LockedAccessor_StreamCrit_t defaultStreamCrit(_defaultStream->_criticalData);
tprintf(DB_SYNC, " null-stream wait on %zu non-empty streams. sync_host=%d\n",
depOps.size(), syncHost);
hc::completion_future defaultCf = defaultStreamCrit->_av.create_blocking_marker(
depOps.begin(), depOps.end(), hc::accelerator_scope);
if (syncHost) {
defaultCf.wait(); // TODO - account for active or blocking here.
}
}
tprintf(DB_SYNC, " syncDefaultStream depOps=%zu\n", depOps.size());
}
//---
void ihipCtx_t::locked_removeStream(ihipStream_t* s) {
LockedAccessor_CtxCrit_t crit(_criticalData);
crit->streams().remove(s);
}
//---
// Heavyweight synchronization that waits on all streams, ignoring hipStreamNonBlocking flag.
void ihipCtx_t::locked_waitAllStreams() {
LockedAccessor_CtxCrit_t crit(_criticalData);
tprintf(DB_SYNC, "waitAllStream\n");
for (auto streamI = crit->const_streams().begin(); streamI != crit->const_streams().end();
streamI++) {
(*streamI)->locked_wait();
}
}
std::string HIP_DB_string(unsigned db) {
std::string dbStr;
bool first = true;
for (int i = 0; i < DB_MAX_FLAG; i++) {
if (db & (1 << i)) {
if (!first) {
dbStr += "+";
};
dbStr += dbName[i]._color;
dbStr += dbName[i]._shortName;
dbStr += KNRM;
first = false;
};
}
return dbStr;
}
// Callback used to process HIP_DB input, supports either
// integer or flag names separated by +
std::string HIP_DB_callback(void* var_ptr, const char* envVarString) {
int* var_ptr_int = static_cast<int*>(var_ptr);
std::string e(envVarString);
trim(&e);
if (!e.empty() && isdigit(e.c_str()[0])) {
long int v = strtol(envVarString, NULL, 0);
*var_ptr_int = (int)(v);
} else {
*var_ptr_int = 0;
std::vector<std::string> tokens;
tokenize(e, '+', &tokens);
for (auto t = tokens.begin(); t != tokens.end(); t++) {
for (int i = 0; i < DB_MAX_FLAG; i++) {
if (!strcmp(t->c_str(), dbName[i]._shortName)) {
*var_ptr_int |= (1 << i);
} // TODO - else throw error?
}
}
}
return HIP_DB_string(*var_ptr_int);
;
}
// Callback used to process list of visible devices.
std::string HIP_VISIBLE_DEVICES_callback(void* var_ptr, const char* envVarString) {
// Parse the string stream of env and store the device ids to g_hip_visible_devices global
// variable
std::string str = envVarString;
std::istringstream ss(str);
std::string device_id;
// Clean up the defult value
g_hip_visible_devices.clear();
g_visible_device = true;
// Read the visible device numbers
while (std::getline(ss, device_id, ',')) {
if (atoi(device_id.c_str()) >= 0) {
g_hip_visible_devices.push_back(atoi(device_id.c_str()));
} else { // Any device number after invalid number will not present
break;
}
}
std::string valueString;
// Print out the number of ids
for (int i = 0; i < g_hip_visible_devices.size(); i++) {
valueString += std::to_string((g_hip_visible_devices[i]));
valueString += ' ';
}
return valueString;
}
// TODO - change last arg to pointer.
void parseTrigger(std::string triggerString, std::vector<ProfTrigger>& profTriggers) {
std::vector<std::string> tidApiTokens;
tokenize(std::string(triggerString), ',', &tidApiTokens);
for (auto t = tidApiTokens.begin(); t != tidApiTokens.end(); t++) {
std::vector<std::string> oneToken;
// std::cout << "token=" << *t << "\n";
tokenize(std::string(*t), '.', &oneToken);
int tid = 1;
uint64_t apiTrigger = 0;
if (oneToken.size() == 1) {
// the case with just apiNum
apiTrigger = std::strtoull(oneToken[0].c_str(), nullptr, 0);
} else if (oneToken.size() == 2) {
// the case with tid.apiNum
tid = std::strtoul(oneToken[0].c_str(), nullptr, 0);
apiTrigger = std::strtoull(oneToken[1].c_str(), nullptr, 0);
} else {
throw ihipException(hipErrorRuntimeOther); // TODO -> bad env var?
}
if (tid > 10000) {
throw ihipException(hipErrorRuntimeOther); // TODO -> bad env var?
} else {
profTriggers.resize(tid + 1);
// std::cout << "tid:" << tid << " add: " << apiTrigger << "\n";
profTriggers[tid].add(apiTrigger);
}
}
for (int tid = 1; tid < profTriggers.size(); tid++) {
profTriggers[tid].sort();
profTriggers[tid].print(tid);
}
}
void HipReadEnv() {
/*
* Environment variables
*/
g_hip_visible_devices.push_back(0); /* Set the default value of visible devices */
READ_ENV_I(release, HIP_PRINT_ENV, 0, "Print HIP environment variables.");
//-- READ HIP_PRINT_ENV env first, since it has impact on later env var reading
// TODO: In HIP/hcc, this variable blocks after both kernel commmands and data transfer. Maybe
// should be bit-mask for each command type?
READ_ENV_I(release, HIP_LAUNCH_BLOCKING, CUDA_LAUNCH_BLOCKING,
"Make HIP kernel launches 'host-synchronous', so they block until any kernel "
"launches. Alias: CUDA_LAUNCH_BLOCKING.");
READ_ENV_S(release, HIP_LAUNCH_BLOCKING_KERNELS, 0,
"Comma-separated list of kernel names to make host-synchronous, so they block until "
"completed.");
if (!HIP_LAUNCH_BLOCKING_KERNELS.empty()) {
tokenize(HIP_LAUNCH_BLOCKING_KERNELS, ',', &g_hipLaunchBlockingKernels);
}
READ_ENV_I(release, HIP_API_BLOCKING, 0,
"Make HIP APIs 'host-synchronous', so they block until completed. Impacts "
"hipMemcpyAsync, hipMemsetAsync.");
READ_ENV_I(release, HIP_HIDDEN_FREE_MEM, 0,
"Amount of memory to hide from the free memory reported by hipMemGetInfo, specified "
"in MB. Impacts hipMemGetInfo.");
READ_ENV_C(release, HIP_DB, 0,
"Print debug info. Bitmask (HIP_DB=0xff) or flags separated by '+' "
"(HIP_DB=api+sync+mem+copy+fatbin)",
HIP_DB_callback);
if ((HIP_DB & (1 << DB_API)) && (HIP_TRACE_API == 0)) {
// Set HIP_TRACE_API default before we read it, so it is printed correctly.
HIP_TRACE_API = 1;
}
READ_ENV_I(release, HIP_TRACE_API, 0,
"Trace each HIP API call. Print function name and return code to stderr as program "
"executes.");
READ_ENV_S(release, HIP_TRACE_API_COLOR, 0,
"Color to use for HIP_API. None/Red/Green/Yellow/Blue/Magenta/Cyan/White");
READ_ENV_I(release, HIP_PROFILE_API, 0,
"Add HIP API markers to ATP file generated with CodeXL. 0x1=short API name, "
"0x2=full API name including args.");
READ_ENV_S(release, HIP_DB_START_API, 0,
"Comma-separated list of tid.api_seq_num for when to start debug and profiling.");
READ_ENV_S(release, HIP_DB_STOP_API, 0,
"Comma-separated list of tid.api_seq_num for when to stop debug and profiling.");
READ_ENV_C(release, HIP_VISIBLE_DEVICES, CUDA_VISIBLE_DEVICES,
"Only devices whose index is present in the sequence are visible to HIP "
"applications and they are enumerated in the order of sequence.",
HIP_VISIBLE_DEVICES_callback);
READ_ENV_I(release, HIP_WAIT_MODE, 0,
"Force synchronization mode. 1= force yield, 2=force spin, 0=defaults specified in "
"application");
READ_ENV_I(release, HIP_FORCE_P2P_HOST, 0,
"Force use of host/staging copy for peer-to-peer copies.1=always use copies, "
"2=always return false for hipDeviceCanAccessPeer");
READ_ENV_I(release, HIP_FORCE_SYNC_COPY, 0,
"Force all copies (even hipMemcpyAsync) to use sync copies");
READ_ENV_I(release, HIP_FAIL_SOC, 0,
"Fault on Sub-Optimal-Copy, rather than use a slower but functional implementation. "
" Bit 0x1=Fail on async copy with unpinned memory. Bit 0x2=Fail peer copy rather "
"than use staging buffer copy");
READ_ENV_I(release, HIP_SYNC_HOST_ALLOC, 0,
"Sync before and after all host memory allocations. May help stability");
READ_ENV_I(release, HIP_INIT_ALLOC, 0,
"If not -1, initialize allocated memory to specified byte");
READ_ENV_I(release, HIP_SYNC_NULL_STREAM, 0, "Synchronize on host for null stream submissions");
READ_ENV_I(release, HIP_FORCE_NULL_STREAM, 0,
"Force all stream allocations to secretly return the null stream");
READ_ENV_I(release, HIP_SYNC_STREAM_WAIT, 0, "hipStreamWaitEvent will synchronize to host");
READ_ENV_I(release, HIP_HOST_COHERENT, 0,
"If set, all host memory will be allocated as fine-grained system memory. This "
"allows threadfence_system to work but prevents host memory from being cached on "
"GPU which may have performance impact.");
READ_ENV_I(release, HCC_OPT_FLUSH, 0,
"When set, use agent-scope fence operations rather than system-scope fence "
"operationsflush when possible. This flag controls both HIP and HCC behavior.");
READ_ENV_I(release, HIP_EVENT_SYS_RELEASE, 0,
"If set, event are created with hipEventReleaseToSystem by default. If 0, events "
"are created with hipEventReleaseToDevice by default. The defaults can be "
"overridden by specifying hipEventReleaseToSystem or hipEventReleaseToDevice flag "
"when creating the event.");
READ_ENV_I(release, HIP_DUMP_CODE_OBJECT, 0,
"If set, dump code object as __hip_dump_code_object[nnnn].o in the current directory,"
"where nnnn is the index number.");
// Some flags have both compile-time and runtime flags - generate a warning if user enables the
// runtime flag but the compile-time flag is disabled.
if (HIP_DB && !COMPILE_HIP_DB) {
fprintf(stderr,
"warning: env var HIP_DB=0x%x but COMPILE_HIP_DB=0. (perhaps enable "
"COMPILE_HIP_DB in src code before compiling?)\n",
HIP_DB);
}
if (HIP_TRACE_API && !COMPILE_HIP_TRACE_API) {
fprintf(stderr,
"warning: env var HIP_TRACE_API=0x%x but COMPILE_HIP_TRACE_API=0. (perhaps enable "
"COMPILE_HIP_TRACE_API in src code before compiling?)\n",
HIP_DB);
}
if (HIP_TRACE_API) {
HIP_DB |= 0x1;
}
if (HIP_PROFILE_API && !COMPILE_HIP_ATP_MARKER) {
fprintf(stderr,
"warning: env var HIP_PROFILE_API=0x%x but COMPILE_HIP_ATP_MARKER=0. (perhaps "
"enable COMPILE_HIP_ATP_MARKER in src code before compiling?)\n",
HIP_PROFILE_API);
HIP_PROFILE_API = 0;
}
if (HIP_DB) {
fprintf(stderr, "HIP_DB=0x%x [%s]\n", HIP_DB, HIP_DB_string(HIP_DB).c_str());
}
std::transform(HIP_TRACE_API_COLOR.begin(), HIP_TRACE_API_COLOR.end(),
HIP_TRACE_API_COLOR.begin(), ::tolower);
if (HIP_TRACE_API_COLOR == "none") {
API_COLOR = "";
API_COLOR_END = "";
} else if (HIP_TRACE_API_COLOR == "red") {
API_COLOR = KRED;
} else if (HIP_TRACE_API_COLOR == "green") {
API_COLOR = KGRN;
} else if (HIP_TRACE_API_COLOR == "yellow") {
API_COLOR = KYEL;
} else if (HIP_TRACE_API_COLOR == "blue") {
API_COLOR = KBLU;
} else if (HIP_TRACE_API_COLOR == "magenta") {
API_COLOR = KMAG;
} else if (HIP_TRACE_API_COLOR == "cyan") {
API_COLOR = KCYN;
} else if (HIP_TRACE_API_COLOR == "white") {
API_COLOR = KWHT;
} else {
fprintf(stderr,
"warning: env var HIP_TRACE_API_COLOR=%s must be "
"None/Red/Green/Yellow/Blue/Magenta/Cyan/White",
HIP_TRACE_API_COLOR.c_str());
};
parseTrigger(HIP_DB_START_API, g_dbStartTriggers);
parseTrigger(HIP_DB_STOP_API, g_dbStopTriggers);
};
//---
// Function called one-time at initialization time to construct a table of all GPU devices.
// HIP/CUDA uses integer "deviceIds" - these are indexes into this table.
// AMP maintains a table of accelerators, but some are emulated - ie for debug or CPU.
// This function creates a vector with only the GPU accelerators.
// It is called with C++11 call_once, which provided thread-safety.
void ihipInit() {
#if COMPILE_HIP_ATP_MARKER
amdtInitializeActivityLogger();
amdtScopedMarker("ihipInit", "HIP", NULL);
#endif
HipReadEnv();
/*
* Build a table of valid compute devices.
*/
auto accs = hc::accelerator::get_all();
int deviceCnt = 0;
for (int i = 0; i < accs.size(); i++) {
if (!accs[i].get_is_emulated()) {
deviceCnt++;
}
};
// Make sure the hip visible devices are within the deviceCnt range
for (int i = 0; i < g_hip_visible_devices.size(); i++) {
if (g_hip_visible_devices[i] >= deviceCnt) {
// Make sure any DeviceID after invalid DeviceID will be erased.
g_hip_visible_devices.resize(i);
break;
}
}
hsa_status_t err = hsa_iterate_agents(findCpuAgent, &g_cpu_agent);
if (err != HSA_STATUS_INFO_BREAK) {
// didn't find a CPU.
throw ihipException(hipErrorRuntimeOther);
}
g_deviceArray = new ihipDevice_t*[deviceCnt];
g_deviceCnt = 0;
for (int i = 0; i < accs.size(); i++) {
// check if the device id is included in the HIP_VISIBLE_DEVICES env variable
if (!accs[i].get_is_emulated()) {
if (std::find(g_hip_visible_devices.begin(), g_hip_visible_devices.end(), (i - 1)) ==
g_hip_visible_devices.end() &&
g_visible_device) {
// If device is not in visible devices list, ignore
continue;
}
g_deviceArray[g_deviceCnt] = new ihipDevice_t(g_deviceCnt, deviceCnt, accs[i]);
g_deviceCnt++;
}
}
g_allAgents = static_cast<hsa_agent_t*>(malloc((g_deviceCnt + 1) * sizeof(hsa_agent_t)));
g_allAgents[0] = g_cpu_agent;
for (int i = 0; i < g_deviceCnt; i++) {
g_allAgents[i + 1] = g_deviceArray[i]->_hsaAgent;
}
g_numLogicalThreads = std::thread::hardware_concurrency();
// If HIP_VISIBLE_DEVICES is not set, make sure all devices are initialized
if (!g_visible_device) {
assert(deviceCnt == g_deviceCnt);
}
tprintf(DB_SYNC, "pid=%u %-30s g_numLogicalThreads=%u\n", getpid(), "<ihipInit>",
g_numLogicalThreads);
}
namespace hip_impl {
hipError_t hip_init() {
static std::once_flag hip_initialized;
std::call_once(hip_initialized, ihipInit);
ihipCtxStackUpdate();
return hipSuccess;
}
}
hipError_t ihipStreamSynchronize(hipStream_t stream) {
hipError_t e = hipSuccess;
if (stream == hipStreamNull) {
ihipCtx_t* ctx = ihipGetTlsDefaultCtx();
ctx->locked_syncDefaultStream(true /*waitOnSelf*/, true /*syncToHost*/);
} else {
// note this does not synchornize with the NULL stream:
stream->locked_wait();
e = hipSuccess;
}
return e;
}
void ihipStreamCallbackHandler(ihipStreamCallback_t* cb) {
hipError_t e = hipSuccess;
// Synchronize stream
tprintf(DB_SYNC, "ihipStreamCallbackHandler wait on stream %s\n",
ToString(cb->_stream).c_str());
e = ihipStreamSynchronize(cb->_stream);
// Call registered callback function
cb->_callback(cb->_stream, e, cb->_userData);
delete cb;
}
//---
// Get the stream to use for a command submission.
//
// If stream==NULL synchronize appropriately with other streams and return the default av for the
// device. If stream is valid, return the AV to use.
hipStream_t ihipSyncAndResolveStream(hipStream_t stream) {
if (stream == hipStreamNull) {
// Submitting to NULL stream, call locked_syncDefaultStream to wait for all other streams:
ihipCtx_t* ctx = ihipGetTlsDefaultCtx();
tprintf(DB_SYNC, "ihipSyncAndResolveStream %s wait on default stream\n",
ToString(stream).c_str());
#ifndef HIP_API_PER_THREAD_DEFAULT_STREAM
ctx->locked_syncDefaultStream(false, false);
#endif
return ctx->_defaultStream;
} else {
// Submitting to a "normal" stream, just wait for null stream:
if (!(stream->_flags & hipStreamNonBlocking)) {
if (HIP_SYNC_NULL_STREAM) {
tprintf(DB_SYNC, "ihipSyncAndResolveStream %s host-wait on default stream\n",
ToString(stream).c_str());
stream->getCtx()->_defaultStream->locked_wait();
} else {
ihipStream_t* defaultStream = stream->getCtx()->_defaultStream;
bool needGatherMarker = false; // used to gather together other markers.
hc::completion_future dcf;
{
LockedAccessor_StreamCrit_t defaultStreamCrit(defaultStream->criticalData());
// TODO - could call create_blocking_marker(queue) or uses existing marker.
if (!defaultStreamCrit->_av.get_is_empty()) {
needGatherMarker = true;
tprintf(DB_SYNC, " %s adding marker to default %s for dependency\n",
ToString(stream).c_str(), ToString(defaultStream).c_str());
dcf = defaultStreamCrit->_av.create_marker(hc::accelerator_scope);
} else {
tprintf(DB_SYNC, " %s skipping marker since default stream is empty\n",
ToString(stream).c_str());
}
}
if (needGatherMarker) {
// ensure any commands sent to this stream wait on the NULL stream before
// continuing
LockedAccessor_StreamCrit_t thisStreamCrit(stream->criticalData());
// TODO - could be "noret" version of create_blocking_marker
thisStreamCrit->_av.create_blocking_marker(dcf, hc::accelerator_scope);
tprintf(
DB_SYNC,
" %s adding marker to wait for freshly recorded default-stream marker \n",
ToString(stream).c_str());
}
}
}
return stream;
}
}
void ihipPrintKernelLaunch(const char* kernelName, const grid_launch_parm* lp,
const hipStream_t stream) {
if ((HIP_TRACE_API & (1 << TRACE_KCMD)) || HIP_PROFILE_API ||
(COMPILE_HIP_DB & HIP_TRACE_API)) {
std::stringstream os;
os << tls_tidInfo.pid() << " " << tls_tidInfo.tid() << "." << tls_tidInfo.apiSeqNum() << " hipLaunchKernel '"
<< kernelName << "'"
<< " gridDim:" << lp->grid_dim << " groupDim:" << lp->group_dim << " sharedMem:+"
<< lp->dynamic_group_mem_bytes << " " << *stream;
if (COMPILE_HIP_DB && HIP_TRACE_API) {
std::string fullStr;
recordApiTrace(&fullStr, os.str());
}
if (HIP_PROFILE_API == 0x1) {
std::string shortAtpString("hipLaunchKernel:");
shortAtpString += kernelName;
MARKER_BEGIN(shortAtpString.c_str(), "HIP");
} else if (HIP_PROFILE_API == 0x2) {
MARKER_BEGIN(os.str().c_str(), "HIP");
}
}
}
// Called just before a kernel is launched from hipLaunchKernel.
// Allows runtime to track some information about the stream.
hipStream_t ihipPreLaunchKernel(hipStream_t stream, dim3 grid, dim3 block, grid_launch_parm* lp,
const char* kernelNameStr) {
stream = ihipSyncAndResolveStream(stream);
lp->grid_dim.x = grid.x;
lp->grid_dim.y = grid.y;
lp->grid_dim.z = grid.z;
lp->group_dim.x = block.x;
lp->group_dim.y = block.y;
lp->group_dim.z = block.z;
lp->barrier_bit = barrier_bit_queue_default;
lp->launch_fence = -1;
auto crit = stream->lockopen_preKernelCommand();
lp->av = &(crit->_av);
lp->cf = nullptr;
ihipPrintKernelLaunch(kernelNameStr, lp, stream);
return (stream);
}
hipStream_t ihipPreLaunchKernel(hipStream_t stream, size_t grid, dim3 block, grid_launch_parm* lp,
const char* kernelNameStr) {
stream = ihipSyncAndResolveStream(stream);
lp->grid_dim.x = grid;
lp->grid_dim.y = 1;
lp->grid_dim.z = 1;
lp->group_dim.x = block.x;
lp->group_dim.y = block.y;
lp->group_dim.z = block.z;
lp->barrier_bit = barrier_bit_queue_default;
lp->launch_fence = -1;
auto crit = stream->lockopen_preKernelCommand();
lp->av = &(crit->_av);
lp->cf = nullptr;
ihipPrintKernelLaunch(kernelNameStr, lp, stream);
return (stream);
}
hipStream_t ihipPreLaunchKernel(hipStream_t stream, dim3 grid, size_t block, grid_launch_parm* lp,
const char* kernelNameStr) {
stream = ihipSyncAndResolveStream(stream);
lp->grid_dim.x = grid.x;
lp->grid_dim.y = grid.y;
lp->grid_dim.z = grid.z;
lp->group_dim.x = block;
lp->group_dim.y = 1;
lp->group_dim.z = 1;
lp->barrier_bit = barrier_bit_queue_default;
lp->launch_fence = -1;
auto crit = stream->lockopen_preKernelCommand();
lp->av = &(crit->_av);
lp->cf = nullptr;
ihipPrintKernelLaunch(kernelNameStr, lp, stream);
return (stream);
}
hipStream_t ihipPreLaunchKernel(hipStream_t stream, size_t grid, size_t block, grid_launch_parm* lp,
const char* kernelNameStr) {
stream = ihipSyncAndResolveStream(stream);
lp->grid_dim.x = grid;
lp->grid_dim.y = 1;
lp->grid_dim.z = 1;
lp->group_dim.x = block;
lp->group_dim.y = 1;
lp->group_dim.z = 1;
lp->barrier_bit = barrier_bit_queue_default;
lp->launch_fence = -1;
auto crit = stream->lockopen_preKernelCommand();
lp->av = &(crit->_av);
lp->cf = nullptr;
ihipPrintKernelLaunch(kernelNameStr, lp, stream);
return (stream);
}
//---
// Called after kernel finishes execution.
// This releases the lock on the stream.
void ihipPostLaunchKernel(const char* kernelName, hipStream_t stream, grid_launch_parm& lp) {
tprintf(DB_SYNC, "ihipPostLaunchKernel, unlocking stream\n");
stream->lockclose_postKernelCommand(kernelName, lp.av);
if (HIP_PROFILE_API) {
MARKER_END();
}
}
//=================================================================================================
// HIP API Implementation
//
// Implementor notes:
// _ All functions should call HIP_INIT_API as first action:
// HIP_INIT_API(<function_arguments>);
//
// - ALl functions should use ihipLogStatus to return error code (not return error directly).
//=================================================================================================
//
//---
//-------------------------------------------------------------------------------------------------
const char* ihipErrorString(hipError_t hip_error) {
switch (hip_error) {
case hipSuccess:
return "hipSuccess";
case hipErrorOutOfMemory:
return "hipErrorOutOfMemory";
case hipErrorNotInitialized:
return "hipErrorNotInitialized";
case hipErrorDeinitialized:
return "hipErrorDeinitialized";
case hipErrorProfilerDisabled:
return "hipErrorProfilerDisabled";
case hipErrorProfilerNotInitialized:
return "hipErrorProfilerNotInitialized";
case hipErrorProfilerAlreadyStarted:
return "hipErrorProfilerAlreadyStarted";
case hipErrorProfilerAlreadyStopped:
return "hipErrorProfilerAlreadyStopped";
case hipErrorInvalidImage:
return "hipErrorInvalidImage";
case hipErrorInvalidContext:
return "hipErrorInvalidContext";
case hipErrorContextAlreadyCurrent:
return "hipErrorContextAlreadyCurrent";
case hipErrorMapFailed:
return "hipErrorMapFailed";
case hipErrorUnmapFailed:
return "hipErrorUnmapFailed";
case hipErrorArrayIsMapped:
return "hipErrorArrayIsMapped";
case hipErrorAlreadyMapped:
return "hipErrorAlreadyMapped";
case hipErrorNoBinaryForGpu:
return "hipErrorNoBinaryForGpu";
case hipErrorAlreadyAcquired:
return "hipErrorAlreadyAcquired";
case hipErrorNotMapped:
return "hipErrorNotMapped";
case hipErrorNotMappedAsArray:
return "hipErrorNotMappedAsArray";
case hipErrorNotMappedAsPointer:
return "hipErrorNotMappedAsPointer";
case hipErrorECCNotCorrectable:
return "hipErrorECCNotCorrectable";
case hipErrorUnsupportedLimit:
return "hipErrorUnsupportedLimit";
case hipErrorContextAlreadyInUse:
return "hipErrorContextAlreadyInUse";
case hipErrorPeerAccessUnsupported:
return "hipErrorPeerAccessUnsupported";
case hipErrorInvalidKernelFile:
return "hipErrorInvalidKernelFile";
case hipErrorInvalidGraphicsContext:
return "hipErrorInvalidGraphicsContext";
case hipErrorInvalidSource:
return "hipErrorInvalidSource";
case hipErrorFileNotFound:
return "hipErrorFileNotFound";
case hipErrorSharedObjectSymbolNotFound:
return "hipErrorSharedObjectSymbolNotFound";
case hipErrorSharedObjectInitFailed:
return "hipErrorSharedObjectInitFailed";
case hipErrorOperatingSystem:
return "hipErrorOperatingSystem";
case hipErrorSetOnActiveProcess:
return "hipErrorSetOnActiveProcess";
case hipErrorInvalidHandle:
return "hipErrorInvalidHandle";
case hipErrorNotFound:
return "hipErrorNotFound";
case hipErrorIllegalAddress:
return "hipErrorIllegalAddress";
case hipErrorMissingConfiguration:
return "hipErrorMissingConfiguration";
case hipErrorMemoryAllocation:
return "hipErrorMemoryAllocation";
case hipErrorInitializationError:
return "hipErrorInitializationError";
case hipErrorLaunchFailure:
return "hipErrorLaunchFailure";
case hipErrorPriorLaunchFailure:
return "hipErrorPriorLaunchFailure";
case hipErrorLaunchTimeOut:
return "hipErrorLaunchTimeOut";
case hipErrorLaunchOutOfResources:
return "hipErrorLaunchOutOfResources";
case hipErrorInvalidDeviceFunction:
return "hipErrorInvalidDeviceFunction";
case hipErrorInvalidConfiguration:
return "hipErrorInvalidConfiguration";
case hipErrorInvalidDevice:
return "hipErrorInvalidDevice";
case hipErrorInvalidValue:
return "hipErrorInvalidValue";
case hipErrorInvalidDevicePointer:
return "hipErrorInvalidDevicePointer";
case hipErrorInvalidMemcpyDirection:
return "hipErrorInvalidMemcpyDirection";
case hipErrorUnknown:
return "hipErrorUnknown";
case hipErrorInvalidResourceHandle:
return "hipErrorInvalidResourceHandle";
case hipErrorNotReady:
return "hipErrorNotReady";
case hipErrorNoDevice:
return "hipErrorNoDevice";
case hipErrorPeerAccessAlreadyEnabled:
return "hipErrorPeerAccessAlreadyEnabled";
case hipErrorPeerAccessNotEnabled:
return "hipErrorPeerAccessNotEnabled";
case hipErrorRuntimeMemory:
return "hipErrorRuntimeMemory";
case hipErrorRuntimeOther:
return "hipErrorRuntimeOther";
case hipErrorHostMemoryAlreadyRegistered:
return "hipErrorHostMemoryAlreadyRegistered";
case hipErrorHostMemoryNotRegistered:
return "hipErrorHostMemoryNotRegistered";
case hipErrorTbd:
return "hipErrorTbd";
default:
return "hipErrorUnknown";
};
};
// Returns true if copyEngineCtx can see the memory allocated on dstCtx and srcCtx.
// The peer-list for a context controls which contexts have access to the memory allocated on that
// context. So we check dstCtx's and srcCtx's peerList to see if the both include thisCtx.
bool ihipStream_t::canSeeMemory(const ihipCtx_t* copyEngineCtx, const hc::AmPointerInfo* dstPtrInfo,
const hc::AmPointerInfo* srcPtrInfo) {
if (copyEngineCtx == nullptr) {
return false;
}
// Make sure this is a device-to-device copy with all memory available to the requested copy
// engine
//
// TODO - pointer-info stores a deviceID not a context,may have some unusual side-effects here:
if (dstPtrInfo->_sizeBytes == 0) {
return false;
} else if (dstPtrInfo->_appId != -1) {
#if USE_APP_PTR_FOR_CTX
ihipCtx_t* dstCtx = static_cast<ihipCtx_t*>(dstPtrInfo->_appPtr);
#else
ihipCtx_t* dstCtx = ihipGetPrimaryCtx(dstPtrInfo->_appId);
#endif
if (copyEngineCtx != dstCtx) {
// Only checks peer list if contexts are different
LockedAccessor_CtxCrit_t ctxCrit(dstCtx->criticalData());
#if DB_PEER_CTX
std::cerr << "checking peer : copyEngineCtx =" << copyEngineCtx << " dstCtx =" << dstCtx
<< " peerCnt=" << ctxCrit->peerCnt() << "\n";
#endif
if (!ctxCrit->isPeerWatcher(copyEngineCtx)) {
return false;
};
}
}
// TODO - pointer-info stores a deviceID not a context,may have some unusual side-effects here:
if (srcPtrInfo->_sizeBytes == 0) {
return false;
} else if (srcPtrInfo->_appId != -1) {
#if USE_APP_PTR_FOR_CTX
ihipCtx_t* srcCtx = static_cast<ihipCtx_t*>(srcPtrInfo->_appPtr);
#else
ihipCtx_t* srcCtx = ihipGetPrimaryCtx(srcPtrInfo->_appId);
#endif
if (copyEngineCtx != srcCtx) {
// Only checks peer list if contexts are different
LockedAccessor_CtxCrit_t ctxCrit(srcCtx->criticalData());
#if DB_PEER_CTX
std::cerr << "checking peer : copyEngineCtx =" << copyEngineCtx << " srcCtx =" << srcCtx
<< " peerCnt=" << ctxCrit->peerCnt() << "\n";
#endif
if (!ctxCrit->isPeerWatcher(copyEngineCtx)) {
return false;
};
}
}
return true;
};
#define CASE_STRING(X) \
case X: \
return #X; \
break;
const char* hipMemcpyStr(unsigned memKind) {
switch (memKind) {
CASE_STRING(hipMemcpyHostToHost);
CASE_STRING(hipMemcpyHostToDevice);
CASE_STRING(hipMemcpyDeviceToHost);
CASE_STRING(hipMemcpyDeviceToDevice);
CASE_STRING(hipMemcpyDefault);
default:
return ("unknown memcpyKind");
};
}
const char* hcMemcpyStr(hc::hcCommandKind memKind) {
using namespace hc;
switch (memKind) {
CASE_STRING(hcMemcpyHostToHost);
CASE_STRING(hcMemcpyHostToDevice);
CASE_STRING(hcMemcpyDeviceToHost);
CASE_STRING(hcMemcpyDeviceToDevice);
// CASE_STRING(hcMemcpyDefault);
default:
return ("unknown memcpyKind");
};
}
// Resolve hipMemcpyDefault to a known type.
unsigned ihipStream_t::resolveMemcpyDirection(bool srcInDeviceMem, bool dstInDeviceMem) {
hipMemcpyKind kind = hipMemcpyDefault;
if (srcInDeviceMem && dstInDeviceMem) {
kind = hipMemcpyDeviceToDevice;
}
if (srcInDeviceMem && !dstInDeviceMem) {
kind = hipMemcpyDeviceToHost;
}
if (!srcInDeviceMem && !dstInDeviceMem) {
kind = hipMemcpyHostToHost;
}
if (!srcInDeviceMem && dstInDeviceMem) {
kind = hipMemcpyHostToDevice;
}
assert(kind != hipMemcpyDefault);
return kind;
}
// hipMemKind must be "resolved" to a specific direction - cannot be default.
void ihipStream_t::resolveHcMemcpyDirection(unsigned hipMemKind,
const hc::AmPointerInfo* dstPtrInfo,
const hc::AmPointerInfo* srcPtrInfo,
hc::hcCommandKind* hcCopyDir, ihipCtx_t** copyDevice,
bool* forceUnpinnedCopy) {
// Ignore what the user tells us and always resolve the direction:
// Some apps apparently rely on this.
hipMemKind = resolveMemcpyDirection(srcPtrInfo->_isInDeviceMem, dstPtrInfo->_isInDeviceMem);
switch (hipMemKind) {
case hipMemcpyHostToHost:
*hcCopyDir = hc::hcMemcpyHostToHost;
break;
case hipMemcpyHostToDevice:
*hcCopyDir = hc::hcMemcpyHostToDevice;
break;
case hipMemcpyDeviceToHost:
*hcCopyDir = hc::hcMemcpyDeviceToHost;
break;
case hipMemcpyDeviceToDevice:
*hcCopyDir = hc::hcMemcpyDeviceToDevice;
break;
default:
throw ihipException(hipErrorRuntimeOther);
};
if (srcPtrInfo->_isInDeviceMem) {
*copyDevice = ihipGetPrimaryCtx(srcPtrInfo->_appId);
} else if (dstPtrInfo->_isInDeviceMem) {
*copyDevice = ihipGetPrimaryCtx(dstPtrInfo->_appId);
} else {
*copyDevice = nullptr;
}
*forceUnpinnedCopy = false;
if (canSeeMemory(*copyDevice, dstPtrInfo, srcPtrInfo)) {
if (HIP_FORCE_P2P_HOST & 0x1) {
*forceUnpinnedCopy = true;
tprintf(DB_COPY,
"Copy engine (dev:%d agent=0x%lx) can see src and dst but "
"HIP_FORCE_P2P_HOST=0, forcing copy through staging buffers.\n",
*copyDevice ? (*copyDevice)->getDeviceNum() : -1,
*copyDevice ? (*copyDevice)->getDevice()->_hsaAgent.handle : 0x0);
} else {
tprintf(DB_COPY, "Copy engine (dev:%d agent=0x%lx) can see src and dst.\n",
*copyDevice ? (*copyDevice)->getDeviceNum() : -1,
*copyDevice ? (*copyDevice)->getDevice()->_hsaAgent.handle : 0x0);
}
} else {
*forceUnpinnedCopy = true;
tprintf(DB_COPY,
"Copy engine(dev:%d agent=0x%lx) cannot see both host and device pointers - "
"forcing copy with unpinned engine.\n",
*copyDevice ? (*copyDevice)->getDeviceNum() : -1,
*copyDevice ? (*copyDevice)->getDevice()->_hsaAgent.handle : 0x0);
if (HIP_FAIL_SOC & 0x2) {
fprintf(stderr,
"HIP_FAIL_SOC: P2P: copy engine(dev:%d agent=0x%lx) cannot see both host and "
"device pointers - forcing copy with unpinned engine.\n",
*copyDevice ? (*copyDevice)->getDeviceNum() : -1,
*copyDevice ? (*copyDevice)->getDevice()->_hsaAgent.handle : 0x0);
throw ihipException(hipErrorRuntimeOther);
}
}
}
void printPointerInfo(unsigned dbFlag, const char* tag, const void* ptr,
const hc::AmPointerInfo& ptrInfo) {
tprintf(dbFlag,
" %s=%p baseHost=%p baseDev=%p sz=%zu home_dev=%d tracked=%d isDevMem=%d "
"registered=%d allocSeqNum=%zu, appAllocationFlags=%x, appPtr=%p\n",
tag, ptr, ptrInfo._hostPointer, ptrInfo._devicePointer, ptrInfo._sizeBytes,
ptrInfo._appId, ptrInfo._sizeBytes != 0, ptrInfo._isInDeviceMem, !ptrInfo._isAmManaged,
ptrInfo._allocSeqNum, ptrInfo._appAllocationFlags, ptrInfo._appPtr);
}
// the pointer-info as returned by HC refers to the allocation
// This routine modifies the pointer-info so it appears to refer to the specific ptr and sizeBytes.
// TODO -remove this when HCC uses HSA pointer info functions directly.
void tailorPtrInfo(hc::AmPointerInfo* ptrInfo, const void* ptr, size_t sizeBytes) {
const char* ptrc = static_cast<const char*>(ptr);
if (ptrInfo->_sizeBytes == 0) {
// invalid ptrInfo, don't modify
return;
} else if (ptrInfo->_isInDeviceMem) {
assert(ptrInfo->_devicePointer != nullptr);
std::ptrdiff_t diff = ptrc - static_cast<const char*>(ptrInfo->_devicePointer);
// TODO : assert-> runtime assert that only appears in debug mode
assert(diff >= 0);
assert(diff <= ptrInfo->_sizeBytes);
ptrInfo->_devicePointer = const_cast<void*>(ptr);
if (ptrInfo->_hostPointer != nullptr) {
ptrInfo->_hostPointer = static_cast<char*>(ptrInfo->_hostPointer) + diff;
}
} else {
assert(ptrInfo->_hostPointer != nullptr);
std::ptrdiff_t diff = ptrc - static_cast<const char*>(ptrInfo->_hostPointer);
// TODO : assert-> runtime assert that only appears in debug mode
assert(diff >= 0);
assert(diff <= ptrInfo->_sizeBytes);
ptrInfo->_hostPointer = const_cast<void*>(ptr);
if (ptrInfo->_devicePointer != nullptr) {
ptrInfo->_devicePointer = static_cast<char*>(ptrInfo->_devicePointer) + diff;
}
}
assert(sizeBytes <= ptrInfo->_sizeBytes);
ptrInfo->_sizeBytes = sizeBytes;
};
bool getTailoredPtrInfo(const char* tag, hc::AmPointerInfo* ptrInfo, const void* ptr,
size_t sizeBytes) {
bool tracked = (hc::am_memtracker_getinfo(ptrInfo, ptr) == AM_SUCCESS);
printPointerInfo(DB_COPY, tag, ptr, *ptrInfo);
if (tracked) {
tailorPtrInfo(ptrInfo, ptr, sizeBytes);
printPointerInfo(DB_COPY, " mod", ptr, *ptrInfo);
}
return tracked;
};
// TODO : For registered and host memory, if the portable flag is set, we need to recognize that and
// perform appropriate copy operation. What can happen now is that Portable memory is mapped into
// multiple devices but Peer access is not enabled. i The peer detection logic doesn't see that the
// memory is already mapped and so tries to use an unpinned copy algorithm. If this is PinInPlace,
// then an error can occur. Need to track Portable flag correctly or use new RT functionality to
// query the peer status for the pointer.
//
// TODO - remove kind parm from here or use it below?
void ihipStream_t::locked_copySync(void* dst, const void* src, size_t sizeBytes, unsigned kind,
bool resolveOn) {
ihipCtx_t* ctx = this->getCtx();
const ihipDevice_t* device = ctx->getDevice();
if (device == NULL) {
throw ihipException(hipErrorInvalidDevice);
}
hc::accelerator acc;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0);
#endif
bool dstTracked = getTailoredPtrInfo(" dst", &dstPtrInfo, dst, sizeBytes);
bool srcTracked = getTailoredPtrInfo(" src", &srcPtrInfo, src, sizeBytes);
// Some code in HCC and in printPointerInfo uses _sizeBytes==0 as an indication ptr is not
// valid, so check it here:
if (!dstTracked) {
assert(dstPtrInfo._sizeBytes == 0);
}
if (!srcTracked) {
assert(srcPtrInfo._sizeBytes == 0);
}
hc::hcCommandKind hcCopyDir;
ihipCtx_t* copyDevice;
bool forceUnpinnedCopy;
resolveHcMemcpyDirection(kind, &dstPtrInfo, &srcPtrInfo, &hcCopyDir, ©Device,
&forceUnpinnedCopy);
{
LockedAccessor_StreamCrit_t crit(_criticalData);
tprintf(DB_COPY,
"copySync copyDev:%d dst=%p (phys_dev:%d, isDevMem:%d) src=%p(phys_dev:%d, "
"isDevMem:%d) sz=%zu dir=%s forceUnpinnedCopy=%d\n",
copyDevice ? copyDevice->getDeviceNum() : -1, dst, dstPtrInfo._appId,
dstPtrInfo._isInDeviceMem, src, srcPtrInfo._appId, srcPtrInfo._isInDeviceMem,
sizeBytes, hcMemcpyStr(hcCopyDir), forceUnpinnedCopy);
printPointerInfo(DB_COPY, " dst", dst, dstPtrInfo);
printPointerInfo(DB_COPY, " src", src, srcPtrInfo);
crit->_av.copy_ext(src, dst, sizeBytes, hcCopyDir, srcPtrInfo, dstPtrInfo,
copyDevice ? ©Device->getDevice()->_acc : nullptr,
forceUnpinnedCopy);
}
}
bool ihipStream_t::locked_copy2DSync(void* dst, const void* src, size_t width, size_t height, size_t srcPitch, size_t dstPitch, unsigned kind,
bool resolveOn) {
bool retStatus = true;
ihipCtx_t* ctx = this->getCtx();
const ihipDevice_t* device = ctx->getDevice();
if (device == NULL) {
throw ihipException(hipErrorInvalidDevice);
}
size_t sizeBytes = width*height;
hc::accelerator acc;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0);
#endif
bool dstTracked = getTailoredPtrInfo(" dst", &dstPtrInfo, dst, sizeBytes);
bool srcTracked = getTailoredPtrInfo(" src", &srcPtrInfo, src, sizeBytes);
// Some code in HCC and in printPointerInfo uses _sizeBytes==0 as an indication ptr is not
// // valid, so check it here:
if (!dstTracked) {
assert(dstPtrInfo._sizeBytes == 0);
}
if (!srcTracked) {
assert(srcPtrInfo._sizeBytes == 0);
}
hc::hcCommandKind hcCopyDir;
ihipCtx_t* copyDevice;
bool forceUnpinnedCopy;
resolveHcMemcpyDirection(kind, &dstPtrInfo, &srcPtrInfo, &hcCopyDir, ©Device,
&forceUnpinnedCopy);
{
LockedAccessor_StreamCrit_t crit(_criticalData);
tprintf(DB_COPY,
"copy2DSync copyDev:%d dst=%p (phys_dev:%d, isDevMem:%d) src=%p(phys_dev:%d, "
"isDevMem:%d) sz=%zu dir=%s forceUnpinnedCopy=%d\n",
copyDevice ? copyDevice->getDeviceNum() : -1, dst, dstPtrInfo._appId,
dstPtrInfo._isInDeviceMem, src, srcPtrInfo._appId, srcPtrInfo._isInDeviceMem,
sizeBytes, hcMemcpyStr(hcCopyDir), forceUnpinnedCopy);
printPointerInfo(DB_COPY, " dst", dst, dstPtrInfo);
printPointerInfo(DB_COPY, " src", src, srcPtrInfo);
#if (__hcc_workweek__ >= 19101)
if(!crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo,
copyDevice ? ©Device->getDevice()->_acc : nullptr,
forceUnpinnedCopy)) {
tprintf(DB_COPY,"locked_copy2DSync failed to use SDMA\n");
retStatus = false;
}
#else
crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo,
copyDevice ? ©Device->getDevice()->_acc : nullptr,
forceUnpinnedCopy);
#endif
}
return retStatus;
}
void ihipStream_t::addSymbolPtrToTracker(hc::accelerator& acc, void* ptr, size_t sizeBytes) {
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo ptrInfo(NULL, ptr, ptr, sizeBytes, acc, true, false);
#else
hc::AmPointerInfo ptrInfo(NULL, ptr, sizeBytes, acc, true, false);
#endif
hc::am_memtracker_add(ptr, ptrInfo);
}
void ihipStream_t::lockedSymbolCopySync(hc::accelerator& acc, void* dst, void* src,
size_t sizeBytes, size_t offset, unsigned kind) {
if (kind == hipMemcpyHostToHost) {
acc.memcpy_symbol(dst, (void*)src, sizeBytes, offset, Kalmar::hcMemcpyHostToHost);
}
if (kind == hipMemcpyHostToDevice) {
acc.memcpy_symbol(dst, (void*)src, sizeBytes, offset);
}
if (kind == hipMemcpyDeviceToDevice) {
acc.memcpy_symbol(dst, (void*)src, sizeBytes, offset, Kalmar::hcMemcpyDeviceToDevice);
}
if (kind == hipMemcpyDeviceToHost) {
acc.memcpy_symbol((void*)src, (void*)dst, sizeBytes, offset, Kalmar::hcMemcpyDeviceToHost);
}
}
void ihipStream_t::lockedSymbolCopyAsync(hc::accelerator& acc, void* dst, void* src,
size_t sizeBytes, size_t offset, unsigned kind) {
// TODO - review - this looks broken , should not be adding pointers to tracker dynamically:
if (kind == hipMemcpyHostToDevice) {
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0);
#endif
bool srcTracked = (hc::am_memtracker_getinfo(&srcPtrInfo, src) == AM_SUCCESS);
if (srcTracked) {
addSymbolPtrToTracker(acc, dst, sizeBytes);
locked_getAv()->copy_async((void*)src, dst, sizeBytes);
} else {
LockedAccessor_StreamCrit_t crit(_criticalData);
this->wait(crit);
acc.memcpy_symbol(dst, (void*)src, sizeBytes, offset);
}
}
if (kind == hipMemcpyDeviceToHost) {
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0);
#endif
bool dstTracked = (hc::am_memtracker_getinfo(&dstPtrInfo, dst) == AM_SUCCESS);
if (dstTracked) {
addSymbolPtrToTracker(acc, src, sizeBytes);
locked_getAv()->copy_async((void*)src, dst, sizeBytes);
} else {
LockedAccessor_StreamCrit_t crit(_criticalData);
this->wait(crit);
acc.memcpy_symbol((void*)src, (void*)dst, sizeBytes, offset,
Kalmar::hcMemcpyDeviceToHost);
}
}
}
void ihipStream_t::locked_copyAsync(void* dst, const void* src, size_t sizeBytes, unsigned kind) {
const ihipCtx_t* ctx = this->getCtx();
if ((ctx == nullptr) || (ctx->getDevice() == nullptr)) {
tprintf(DB_COPY, "locked_copyAsync bad ctx or device\n");
throw ihipException(hipErrorInvalidDevice);
}
if (kind == hipMemcpyHostToHost) {
tprintf(DB_COPY, "locked_copyAsync: H2H with memcpy");
// TODO - consider if we want to perhaps use the GPU SDMA engines anyway, to avoid the
// host-side sync here and keep everything flowing on the GPU.
/* As this is a CPU op, we need to wait until all
the commands in current stream are finished.
*/
LockedAccessor_StreamCrit_t crit(_criticalData);
this->wait(crit);
memcpy(dst, src, sizeBytes);
} else {
hc::accelerator acc;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0);
#endif
tprintf(DB_COPY, "copyASync dst=%p src=%p, sz=%zu\n", dst, src, sizeBytes);
bool dstTracked = getTailoredPtrInfo(" dst", &dstPtrInfo, dst, sizeBytes);
bool srcTracked = getTailoredPtrInfo(" src", &srcPtrInfo, src, sizeBytes);
hc::hcCommandKind hcCopyDir;
ihipCtx_t* copyDevice;
bool forceUnpinnedCopy;
resolveHcMemcpyDirection(kind, &dstPtrInfo, &srcPtrInfo, &hcCopyDir, ©Device,
&forceUnpinnedCopy);
tprintf(DB_COPY, " copyDev:%d dir=%s forceUnpinnedCopy=%d\n",
copyDevice ? copyDevice->getDeviceNum() : -1, hcMemcpyStr(hcCopyDir),
forceUnpinnedCopy);
// "tracked" really indicates if the pointer's virtual address is available in the GPU
// address space. If both pointers are not tracked, we need to fall back to a sync copy.
if (dstTracked && srcTracked && !forceUnpinnedCopy &&
copyDevice /*code below assumes this is !nullptr*/) {
LockedAccessor_StreamCrit_t crit(_criticalData);
// Perform fast asynchronous copy - we know copyDevice != NULL based on check above
try {
if (HIP_FORCE_SYNC_COPY) {
crit->_av.copy_ext(src, dst, sizeBytes, hcCopyDir, srcPtrInfo, dstPtrInfo,
©Device->getDevice()->_acc, forceUnpinnedCopy);
} else {
crit->_av.copy_async_ext(src, dst, sizeBytes, hcCopyDir, srcPtrInfo, dstPtrInfo,
©Device->getDevice()->_acc);
}
} catch (Kalmar::runtime_exception) {
throw ihipException(hipErrorRuntimeOther);
};
if (HIP_API_BLOCKING) {
tprintf(DB_SYNC, "%s LAUNCH_BLOCKING for completion of hipMemcpyAsync(sz=%zu)\n",
ToString(this).c_str(), sizeBytes);
this->wait(crit);
}
} else {
if (HIP_FAIL_SOC & 0x1) {
fprintf(stderr,
"HIP_FAIL_SOC failed, async_copy requested but could not be completed "
"since src or dst not accesible to copy agent\n");
fprintf(stderr,
"copyASync copyDev:%d dst=%p (phys_dev:%d, isDevMem:%d) "
"src=%p(phys_dev:%d, isDevMem:%d) sz=%zu dir=%s forceUnpinnedCopy=%d\n",
copyDevice ? copyDevice->getDeviceNum() : -1, dst, dstPtrInfo._appId,
dstPtrInfo._isInDeviceMem, src, srcPtrInfo._appId,
srcPtrInfo._isInDeviceMem, sizeBytes, hcMemcpyStr(hcCopyDir),
forceUnpinnedCopy);
fprintf(
stderr,
" dst=%p baseHost=%p baseDev=%p sz=%zu home_dev=%d tracked=%d isDevMem=%d\n",
dst, dstPtrInfo._hostPointer, dstPtrInfo._devicePointer, dstPtrInfo._sizeBytes,
dstPtrInfo._appId, dstTracked, dstPtrInfo._isInDeviceMem);
fprintf(
stderr,
" src=%p baseHost=%p baseDev=%p sz=%zu home_dev=%d tracked=%d isDevMem=%d\n",
src, srcPtrInfo._hostPointer, srcPtrInfo._devicePointer, srcPtrInfo._sizeBytes,
srcPtrInfo._appId, srcTracked, srcPtrInfo._isInDeviceMem);
throw ihipException(hipErrorRuntimeOther);
}
// Perform slow synchronous copy:
LockedAccessor_StreamCrit_t crit(_criticalData);
crit->_av.copy_ext(src, dst, sizeBytes, hcCopyDir, srcPtrInfo, dstPtrInfo,
copyDevice ? ©Device->getDevice()->_acc : nullptr,
forceUnpinnedCopy);
}
}
}
bool ihipStream_t::locked_copy2DAsync(void* dst, const void* src, size_t width, size_t height, size_t srcPitch, size_t dstPitch, unsigned kind)
{
bool retStatus = true;
const ihipCtx_t* ctx = this->getCtx();
if ((ctx == nullptr) || (ctx->getDevice() == nullptr)) {
tprintf(DB_COPY, "locked_copy2DAsync bad ctx or device\n");
throw ihipException(hipErrorInvalidDevice);
}
hc::accelerator acc;
size_t sizeBytes = width*height;
#if (__hcc_workweek__ >= 17332)
hc::AmPointerInfo dstPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, NULL, 0, acc, 0, 0);
#else
hc::AmPointerInfo dstPtrInfo(NULL, NULL, 0, acc, 0, 0);
hc::AmPointerInfo srcPtrInfo(NULL, NULL, 0, acc, 0, 0);
#endif
tprintf(DB_COPY, "copy2DAsync dst=%p src=%p, sz=%zu\n", dst, src, sizeBytes);
bool dstTracked = getTailoredPtrInfo(" dst", &dstPtrInfo, dst, sizeBytes);
bool srcTracked = getTailoredPtrInfo(" src", &srcPtrInfo, src, sizeBytes);
hc::hcCommandKind hcCopyDir;
ihipCtx_t* copyDevice;
bool forceUnpinnedCopy;
resolveHcMemcpyDirection(kind, &dstPtrInfo, &srcPtrInfo, &hcCopyDir, ©Device,
&forceUnpinnedCopy);
tprintf(DB_COPY, " copyDev:%d dir=%s forceUnpinnedCopy=%d\n",
copyDevice ? copyDevice->getDeviceNum() : -1, hcMemcpyStr(hcCopyDir),
forceUnpinnedCopy);
if (dstTracked && srcTracked && !forceUnpinnedCopy &&
copyDevice /*code below assumes this is !nullptr*/) {
LockedAccessor_StreamCrit_t crit(_criticalData);
try {
if (HIP_FORCE_SYNC_COPY) {
#if (__hcc_workweek__ >= 19101)
if(!crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo,
©Device->getDevice()->_acc,
forceUnpinnedCopy)){
tprintf(DB_COPY,"locked_copy2DASync with HIP_FORCE_SYNC_COPY failed to use SDMA\n");
retStatus = false;
}
#else
crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo,
©Device->getDevice()->_acc,
forceUnpinnedCopy);
#endif
} else {
const auto& future = crit->_av.copy2d_async_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo,
©Device->getDevice()->_acc);
if(!future.valid()) {
tprintf(DB_COPY, "locked_copy2DAsync failed to use SDMA\n");
retStatus = false;
}
}
} catch (Kalmar::runtime_exception) {
throw ihipException(hipErrorRuntimeOther);
};
if (HIP_API_BLOCKING) {
tprintf(DB_SYNC, "%s LAUNCH_BLOCKING for completion of hipMemcpy2DAsync(sz=%zu)\n",
ToString(this).c_str(), sizeBytes);
this->wait(crit);
}
} else {
//Do sync 2D copy
LockedAccessor_StreamCrit_t crit(_criticalData);
#if (__hcc_workweek__ >= 19101)
if(!crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo,
copyDevice ? ©Device->getDevice()->_acc : nullptr,
forceUnpinnedCopy)){
tprintf(DB_COPY, "locked_copy2DAsync Sync copy failed to use SDMA\n");
retStatus = false;
}
#else
crit->_av.copy2d_ext(src, dst, width, height, srcPitch, dstPitch, hcCopyDir, srcPtrInfo, dstPtrInfo,
copyDevice ? ©Device->getDevice()->_acc : nullptr,
forceUnpinnedCopy);
#endif
}
return retStatus;
}
//-------------------------------------------------------------------------------------------------
//-------------------------------------------------------------------------------------------------
// Profiler, really these should live elsewhere:
hipError_t hipProfilerStart() {
HIP_INIT_API(hipProfilerStart);
#if COMPILE_HIP_ATP_MARKER
amdtResumeProfiling(AMDT_ALL_PROFILING);
#endif
return ihipLogStatus(hipSuccess);
};
hipError_t hipProfilerStop() {
HIP_INIT_API(hipProfilerStop);
#if COMPILE_HIP_ATP_MARKER
amdtStopProfiling(AMDT_ALL_PROFILING);
#endif
return ihipLogStatus(hipSuccess);
};
//-------------------------------------------------------------------------------------------------
//-------------------------------------------------------------------------------------------------
// HCC-specific accessor functions:
//---
hipError_t hipHccGetAccelerator(int deviceId, hc::accelerator* acc) {
HIP_INIT_API(hipHccGetAccelerator, deviceId, acc);
const ihipDevice_t* device = ihipGetDevice(deviceId);
hipError_t err;
if (device == NULL) {
err = hipErrorInvalidDevice;
} else {
*acc = device->_acc;
err = hipSuccess;
}
return ihipLogStatus(err);
}
//---
hipError_t hipHccGetAcceleratorView(hipStream_t stream, hc::accelerator_view** av) {
HIP_INIT_API(hipHccGetAcceleratorView, stream, av);
if (stream == hipStreamNull) {
ihipCtx_t* device = ihipGetTlsDefaultCtx();
stream = device->_defaultStream;
}
*av = stream->locked_getAv(); // TODO - review.
hipError_t err = hipSuccess;
return ihipLogStatus(err);
}
//// TODO - add identifier numbers for streams and devices to help with debugging.
// TODO - add a contect sequence number for debug. Print operator<< ctx:0.1 (device.ctx)
namespace hip_impl {
std::vector<hsa_agent_t> all_hsa_agents() {
std::vector<hsa_agent_t> r{};
for (auto&& acc : hc::accelerator::get_all()) {
const auto agent = acc.get_hsa_agent();
if (!agent || !acc.is_hsa_accelerator()) continue;
r.emplace_back(*static_cast<hsa_agent_t*>(agent));
}
return r;
}
[[noreturn]]
void hip_throw(const std::exception& ex) {
#if defined(__cpp_exceptions)
throw ex;
#else
std::cerr << ex.what() << std::endl;
std::terminate();
#endif
}
} // Namespace hip_impl.
| 1 | 7,549 | Since the lock is have moved outside of this function, I think it would be simpler and efficient to just return a ref to the std::vector<hsa_executable_t>? That way, we don't need to make a new copy on read and we won't actually need a write operation. | ROCm-Developer-Tools-HIP | cpp |
@@ -448,12 +448,15 @@ func (api *Server) ReadState(ctx context.Context, in *iotexapi.ReadStateRequest)
if !ok {
return nil, status.Errorf(codes.Internal, "protocol %s isn't registered", string(in.ProtocolID))
}
- data, err := api.readState(ctx, p, in.GetHeight(), in.MethodName, in.Arguments...)
+ data, readStateHeight, err := api.readState(ctx, p, in.GetHeight(), in.MethodName, in.Arguments...)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
out := iotexapi.ReadStateResponse{
Data: data,
+ BlockIdentifier: &iotextypes.BlockIdentifier{
+ Height: readStateHeight,
+ },
}
return &out, nil
} | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package api
import (
"bytes"
"context"
"encoding/hex"
"math"
"math/big"
"net"
"strconv"
"time"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/status"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-election/committee"
"github.com/iotexproject/iotex-proto/golang/iotexapi"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/poll"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockchain/blockdao"
"github.com/iotexproject/iotex-core/blockindex"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/gasstation"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/version"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/state/factory"
)
var (
// ErrInternalServer indicates the internal server error
ErrInternalServer = errors.New("internal server error")
// ErrReceipt indicates the error of receipt
ErrReceipt = errors.New("invalid receipt")
// ErrAction indicates the error of action
ErrAction = errors.New("invalid action")
)
// BroadcastOutbound sends a broadcast message to the whole network
type BroadcastOutbound func(ctx context.Context, chainID uint32, msg proto.Message) error
// Config represents the config to setup api
type Config struct {
broadcastHandler BroadcastOutbound
electionCommittee committee.Committee
}
// Option is the option to override the api config
type Option func(cfg *Config) error
// WithBroadcastOutbound is the option to broadcast msg outbound
func WithBroadcastOutbound(broadcastHandler BroadcastOutbound) Option {
return func(cfg *Config) error {
cfg.broadcastHandler = broadcastHandler
return nil
}
}
// WithNativeElection is the option to return native election data through API.
func WithNativeElection(committee committee.Committee) Option {
return func(cfg *Config) error {
cfg.electionCommittee = committee
return nil
}
}
// Server provides api for user to query blockchain data
type Server struct {
bc blockchain.Blockchain
sf factory.Factory
dao blockdao.BlockDAO
indexer blockindex.Indexer
ap actpool.ActPool
gs *gasstation.GasStation
broadcastHandler BroadcastOutbound
cfg config.Config
registry *protocol.Registry
chainListener Listener
grpcServer *grpc.Server
hasActionIndex bool
electionCommittee committee.Committee
}
// NewServer creates a new server
func NewServer(
cfg config.Config,
chain blockchain.Blockchain,
sf factory.Factory,
dao blockdao.BlockDAO,
indexer blockindex.Indexer,
actPool actpool.ActPool,
registry *protocol.Registry,
opts ...Option,
) (*Server, error) {
apiCfg := Config{}
for _, opt := range opts {
if err := opt(&apiCfg); err != nil {
return nil, err
}
}
if cfg.API == (config.API{}) {
log.L().Warn("API server is not configured.")
cfg.API = config.Default.API
}
if cfg.API.RangeQueryLimit < uint64(cfg.API.TpsWindow) {
return nil, errors.New("range query upper limit cannot be less than tps window")
}
svr := &Server{
bc: chain,
sf: sf,
dao: dao,
indexer: indexer,
ap: actPool,
broadcastHandler: apiCfg.broadcastHandler,
cfg: cfg,
registry: registry,
chainListener: NewChainListener(),
gs: gasstation.NewGasStation(chain, sf.SimulateExecution, dao, cfg.API),
electionCommittee: apiCfg.electionCommittee,
}
if _, ok := cfg.Plugins[config.GatewayPlugin]; ok {
svr.hasActionIndex = true
}
svr.grpcServer = grpc.NewServer(
grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
)
iotexapi.RegisterAPIServiceServer(svr.grpcServer, svr)
grpc_prometheus.Register(svr.grpcServer)
reflection.Register(svr.grpcServer)
return svr, nil
}
// GetAccount returns the metadata of an account
func (api *Server) GetAccount(ctx context.Context, in *iotexapi.GetAccountRequest) (*iotexapi.GetAccountResponse, error) {
state, tipHeight, err := accountutil.AccountStateWithHeight(api.sf, in.Address)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
pendingNonce, err := api.ap.GetPendingNonce(in.Address)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if api.indexer == nil {
return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
addr, err := address.FromString(in.Address)
if err != nil {
return nil, err
}
numActions, err := api.indexer.GetActionCountByAddress(hash.BytesToHash160(addr.Bytes()))
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
accountMeta := &iotextypes.AccountMeta{
Address: in.Address,
Balance: state.Balance.String(),
Nonce: state.Nonce,
PendingNonce: pendingNonce,
NumActions: numActions,
}
header, err := api.bc.BlockHeaderByHeight(tipHeight)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
hash := header.HashBlock()
return &iotexapi.GetAccountResponse{AccountMeta: accountMeta, BlockIdentifier: &iotextypes.BlockIdentifier{
Hash: hex.EncodeToString(hash[:]),
Height: tipHeight,
}}, nil
}
// GetActions returns actions
func (api *Server) GetActions(ctx context.Context, in *iotexapi.GetActionsRequest) (*iotexapi.GetActionsResponse, error) {
if (!api.hasActionIndex || api.indexer == nil) && (in.GetByHash() != nil || in.GetByAddr() != nil) {
return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
switch {
case in.GetByIndex() != nil:
request := in.GetByIndex()
return api.getActions(request.Start, request.Count)
case in.GetByHash() != nil:
request := in.GetByHash()
return api.getSingleAction(request.ActionHash, request.CheckPending)
case in.GetByAddr() != nil:
request := in.GetByAddr()
return api.getActionsByAddress(request.Address, request.Start, request.Count)
case in.GetUnconfirmedByAddr() != nil:
request := in.GetUnconfirmedByAddr()
return api.getUnconfirmedActionsByAddress(request.Address, request.Start, request.Count)
case in.GetByBlk() != nil:
request := in.GetByBlk()
return api.getActionsByBlock(request.BlkHash, request.Start, request.Count)
default:
return nil, status.Error(codes.NotFound, "invalid GetActionsRequest type")
}
}
// GetBlockMetas returns block metadata
func (api *Server) GetBlockMetas(ctx context.Context, in *iotexapi.GetBlockMetasRequest) (*iotexapi.GetBlockMetasResponse, error) {
switch {
case in.GetByIndex() != nil:
request := in.GetByIndex()
return api.getBlockMetas(request.Start, request.Count)
case in.GetByHash() != nil:
request := in.GetByHash()
return api.getBlockMeta(request.BlkHash)
default:
return nil, status.Error(codes.NotFound, "invalid GetBlockMetasRequest type")
}
}
// GetChainMeta returns blockchain metadata
func (api *Server) GetChainMeta(ctx context.Context, in *iotexapi.GetChainMetaRequest) (*iotexapi.GetChainMetaResponse, error) {
tipHeight := api.bc.TipHeight()
if tipHeight == 0 {
return &iotexapi.GetChainMetaResponse{
ChainMeta: &iotextypes.ChainMeta{
Epoch: &iotextypes.EpochData{},
},
}, nil
}
if api.indexer == nil {
// TODO: in case indexer does not exist, may consider return a value like 0 instead of exit
return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
totalActions, err := api.indexer.GetTotalActions()
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
blockLimit := int64(api.cfg.API.TpsWindow)
if blockLimit <= 0 {
return nil, status.Errorf(codes.Internal, "block limit is %d", blockLimit)
}
// avoid genesis block
if int64(tipHeight) < blockLimit {
blockLimit = int64(tipHeight)
}
r, err := api.getBlockMetas(tipHeight-uint64(blockLimit)+1, uint64(blockLimit))
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
blks := r.BlkMetas
if len(blks) == 0 {
return nil, status.Error(codes.NotFound, "get 0 blocks! not able to calculate aps")
}
var numActions int64
for _, blk := range blks {
numActions += blk.NumActions
}
t1 := time.Unix(blks[0].Timestamp.GetSeconds(), int64(blks[0].Timestamp.GetNanos()))
t2 := time.Unix(blks[len(blks)-1].Timestamp.GetSeconds(), int64(blks[len(blks)-1].Timestamp.GetNanos()))
// duration of time difference in milli-seconds
// TODO: use config.Genesis.BlockInterval after PR1289 merges
timeDiff := (t2.Sub(t1) + 10*time.Second) / time.Millisecond
tps := float32(numActions*1000) / float32(timeDiff)
chainMeta := &iotextypes.ChainMeta{
Height: tipHeight,
NumActions: int64(totalActions),
Tps: int64(math.Ceil(float64(tps))),
TpsFloat: tps,
}
rp := rolldpos.FindProtocol(api.registry)
if rp != nil {
epochNum := rp.GetEpochNum(tipHeight)
epochHeight := rp.GetEpochHeight(epochNum)
gravityChainStartHeight, err := api.getGravityChainStartHeight(epochHeight)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
chainMeta.Epoch = &iotextypes.EpochData{
Num: epochNum,
Height: epochHeight,
GravityChainStartHeight: gravityChainStartHeight,
}
}
return &iotexapi.GetChainMetaResponse{ChainMeta: chainMeta}, nil
}
// GetServerMeta gets the server metadata
func (api *Server) GetServerMeta(ctx context.Context,
in *iotexapi.GetServerMetaRequest) (*iotexapi.GetServerMetaResponse, error) {
return &iotexapi.GetServerMetaResponse{ServerMeta: &iotextypes.ServerMeta{
PackageVersion: version.PackageVersion,
PackageCommitID: version.PackageCommitID,
GitStatus: version.GitStatus,
GoVersion: version.GoVersion,
BuildTime: version.BuildTime,
}}, nil
}
// SendAction is the API to send an action to blockchain.
func (api *Server) SendAction(ctx context.Context, in *iotexapi.SendActionRequest) (*iotexapi.SendActionResponse, error) {
log.L().Debug("receive send action request")
var selp action.SealedEnvelope
var err error
if err = selp.LoadProto(in.Action); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
// Add to local actpool
ctx = protocol.WithRegistry(ctx, api.registry)
if err = api.ap.Add(ctx, selp); err != nil {
log.L().Debug(err.Error())
var desc string
switch errors.Cause(err) {
case action.ErrBalance:
desc = "Invalid balance"
case action.ErrInsufficientBalanceForGas:
desc = "Insufficient balance for gas"
case action.ErrNonce:
desc = "Invalid nonce"
case action.ErrAddress:
desc = "Blacklisted address"
case action.ErrActPool:
desc = "Invalid actpool"
case action.ErrGasPrice:
desc = "Invalid gas price"
default:
desc = "Unknown"
}
st := status.New(codes.Internal, err.Error())
v := &errdetails.BadRequest_FieldViolation{
Field: "Action rejected",
Description: desc,
}
br := &errdetails.BadRequest{}
br.FieldViolations = append(br.FieldViolations, v)
st, err := st.WithDetails(br)
if err != nil {
log.S().Panicf("Unexpected error attaching metadata: %v", err)
}
return nil, st.Err()
}
// If there is no error putting into local actpool,
// Broadcast it to the network
if err = api.broadcastHandler(context.Background(), api.bc.ChainID(), in.Action); err != nil {
log.L().Warn("Failed to broadcast SendAction request.", zap.Error(err))
}
hash := selp.Hash()
return &iotexapi.SendActionResponse{ActionHash: hex.EncodeToString(hash[:])}, nil
}
// GetReceiptByAction gets receipt with corresponding action hash
func (api *Server) GetReceiptByAction(ctx context.Context, in *iotexapi.GetReceiptByActionRequest) (*iotexapi.GetReceiptByActionResponse, error) {
if !api.hasActionIndex || api.indexer == nil {
return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
actHash, err := hash.HexStringToHash256(in.ActionHash)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
receipt, err := api.GetReceiptByActionHash(actHash)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
blkHash, err := api.getBlockHashByActionHash(actHash)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
return &iotexapi.GetReceiptByActionResponse{
ReceiptInfo: &iotexapi.ReceiptInfo{
Receipt: receipt.ConvertToReceiptPb(),
BlkHash: hex.EncodeToString(blkHash[:]),
},
}, nil
}
// ReadContract reads the state in a contract address specified by the slot
func (api *Server) ReadContract(ctx context.Context, in *iotexapi.ReadContractRequest) (*iotexapi.ReadContractResponse, error) {
log.L().Debug("receive read smart contract request")
sc := &action.Execution{}
if err := sc.LoadProto(in.Execution); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
state, err := accountutil.AccountState(api.sf, in.CallerAddress)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
sc, _ = action.NewExecution(
sc.Contract(),
state.Nonce+1,
sc.Amount(),
api.cfg.Genesis.BlockGasLimit,
big.NewInt(0),
sc.Data(),
)
callerAddr, err := address.FromString(in.CallerAddress)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
ctx, err = api.bc.Context()
if err != nil {
return nil, err
}
retval, receipt, err := api.sf.SimulateExecution(ctx, callerAddr, sc, api.dao.GetBlockHash)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &iotexapi.ReadContractResponse{
Data: hex.EncodeToString(retval),
Receipt: receipt.ConvertToReceiptPb(),
}, nil
}
// ReadState reads state on blockchain
func (api *Server) ReadState(ctx context.Context, in *iotexapi.ReadStateRequest) (*iotexapi.ReadStateResponse, error) {
p, ok := api.registry.Find(string(in.ProtocolID))
if !ok {
return nil, status.Errorf(codes.Internal, "protocol %s isn't registered", string(in.ProtocolID))
}
data, err := api.readState(ctx, p, in.GetHeight(), in.MethodName, in.Arguments...)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
out := iotexapi.ReadStateResponse{
Data: data,
}
return &out, nil
}
// SuggestGasPrice suggests gas price
func (api *Server) SuggestGasPrice(ctx context.Context, in *iotexapi.SuggestGasPriceRequest) (*iotexapi.SuggestGasPriceResponse, error) {
suggestPrice, err := api.gs.SuggestGasPrice()
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &iotexapi.SuggestGasPriceResponse{GasPrice: suggestPrice}, nil
}
// EstimateGasForAction estimates gas for action
func (api *Server) EstimateGasForAction(ctx context.Context, in *iotexapi.EstimateGasForActionRequest) (*iotexapi.EstimateGasForActionResponse, error) {
estimateGas, err := api.gs.EstimateGasForAction(in.Action)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &iotexapi.EstimateGasForActionResponse{Gas: estimateGas}, nil
}
// EstimateActionGasConsumption estimate gas consume for action without signature
func (api *Server) EstimateActionGasConsumption(ctx context.Context, in *iotexapi.EstimateActionGasConsumptionRequest) (respone *iotexapi.EstimateActionGasConsumptionResponse, err error) {
respone = &iotexapi.EstimateActionGasConsumptionResponse{}
switch {
case in.GetExecution() != nil:
request := in.GetExecution()
return api.estimateActionGasConsumptionForExecution(request, in.GetCallerAddress())
case in.GetTransfer() != nil:
respone.Gas = uint64(len(in.GetTransfer().Payload))*action.TransferPayloadGas + action.TransferBaseIntrinsicGas
case in.GetStakeCreate() != nil:
respone.Gas = uint64(len(in.GetStakeCreate().Payload))*action.CreateStakePayloadGas + action.CreateStakeBaseIntrinsicGas
case in.GetStakeUnstake() != nil:
respone.Gas = uint64(len(in.GetStakeUnstake().Payload))*action.ReclaimStakePayloadGas + action.ReclaimStakeBaseIntrinsicGas
case in.GetStakeWithdraw() != nil:
respone.Gas = uint64(len(in.GetStakeWithdraw().Payload))*action.ReclaimStakePayloadGas + action.ReclaimStakeBaseIntrinsicGas
case in.GetStakeAddDeposit() != nil:
respone.Gas = uint64(len(in.GetStakeAddDeposit().Payload))*action.DepositToStakePayloadGas + action.DepositToStakeBaseIntrinsicGas
case in.GetStakeRestake() != nil:
respone.Gas = uint64(len(in.GetStakeRestake().Payload))*action.RestakePayloadGas + action.RestakeBaseIntrinsicGas
case in.GetStakeChangeCandidate() != nil:
respone.Gas = uint64(len(in.GetStakeChangeCandidate().Payload))*action.MoveStakePayloadGas + action.MoveStakeBaseIntrinsicGas
case in.GetStakeTransferOwnership() != nil:
respone.Gas = uint64(len(in.GetStakeTransferOwnership().Payload))*action.MoveStakePayloadGas + action.MoveStakeBaseIntrinsicGas
case in.GetCandidateRegister() != nil:
respone.Gas = uint64(len(in.GetCandidateRegister().Payload))*action.CandidateRegisterPayloadGas + action.CandidateRegisterBaseIntrinsicGas
case in.GetCandidateUpdate() != nil:
respone.Gas = action.CandidateUpdateBaseIntrinsicGas
default:
return nil, status.Error(codes.InvalidArgument, "invalid argument")
}
return
}
// GetEpochMeta gets epoch metadata
func (api *Server) GetEpochMeta(
ctx context.Context,
in *iotexapi.GetEpochMetaRequest,
) (*iotexapi.GetEpochMetaResponse, error) {
rp := rolldpos.FindProtocol(api.registry)
if rp == nil {
return &iotexapi.GetEpochMetaResponse{}, nil
}
if in.EpochNumber < 1 {
return nil, status.Error(codes.InvalidArgument, "epoch number cannot be less than one")
}
epochHeight := rp.GetEpochHeight(in.EpochNumber)
gravityChainStartHeight, err := api.getGravityChainStartHeight(epochHeight)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
epochData := &iotextypes.EpochData{
Num: in.EpochNumber,
Height: epochHeight,
GravityChainStartHeight: gravityChainStartHeight,
}
pp := poll.FindProtocol(api.registry)
if pp == nil {
return nil, status.Error(codes.Internal, "poll protocol is not registered")
}
methodName := []byte("ActiveBlockProducersByEpoch")
arguments := [][]byte{[]byte(strconv.FormatUint(in.EpochNumber, 10))}
height := strconv.FormatUint(epochHeight, 10)
data, err := api.readState(context.Background(), pp, height, methodName, arguments...)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
var activeConsensusBlockProducers state.CandidateList
if err := activeConsensusBlockProducers.Deserialize(data); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
numBlks, produce, err := api.getProductivityByEpoch(rp, in.EpochNumber, api.bc.TipHeight(), activeConsensusBlockProducers)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
methodName = []byte("BlockProducersByEpoch")
data, err = api.readState(context.Background(), pp, height, methodName, arguments...)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
var BlockProducers state.CandidateList
if err := BlockProducers.Deserialize(data); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
var blockProducersInfo []*iotexapi.BlockProducerInfo
for _, bp := range BlockProducers {
var active bool
var blockProduction uint64
if production, ok := produce[bp.Address]; ok {
active = true
blockProduction = production
}
blockProducersInfo = append(blockProducersInfo, &iotexapi.BlockProducerInfo{
Address: bp.Address,
Votes: bp.Votes.String(),
Active: active,
Production: blockProduction,
})
}
return &iotexapi.GetEpochMetaResponse{
EpochData: epochData,
TotalBlocks: numBlks,
BlockProducersInfo: blockProducersInfo,
}, nil
}
// GetRawBlocks gets raw block data
func (api *Server) GetRawBlocks(
ctx context.Context,
in *iotexapi.GetRawBlocksRequest,
) (*iotexapi.GetRawBlocksResponse, error) {
if in.Count == 0 || in.Count > api.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
tipHeight := api.bc.TipHeight()
if in.StartHeight > tipHeight {
return nil, status.Error(codes.InvalidArgument, "start height should not exceed tip height")
}
var res []*iotexapi.BlockInfo
for height := int(in.StartHeight); height <= int(tipHeight); height++ {
if uint64(len(res)) >= in.Count {
break
}
blk, err := api.dao.GetBlockByHeight(uint64(height))
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
var receiptsPb []*iotextypes.Receipt
if in.WithReceipts {
receipts, err := api.dao.GetReceipts(uint64(height))
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
for _, receipt := range receipts {
receiptsPb = append(receiptsPb, receipt.ConvertToReceiptPb())
}
}
res = append(res, &iotexapi.BlockInfo{
Block: blk.ConvertToBlockPb(),
Receipts: receiptsPb,
})
}
return &iotexapi.GetRawBlocksResponse{Blocks: res}, nil
}
// GetLogs get logs filtered by contract address and topics
func (api *Server) GetLogs(
ctx context.Context,
in *iotexapi.GetLogsRequest,
) (*iotexapi.GetLogsResponse, error) {
switch {
case in.GetByBlock() != nil:
req := in.GetByBlock()
h, err := api.dao.GetBlockHeight(hash.BytesToHash256(req.BlockHash))
if err != nil {
return nil, status.Error(codes.InvalidArgument, "invalid block hash")
}
filter, ok := NewLogFilter(in.Filter, nil, nil).(*LogFilter)
if !ok {
return nil, status.Error(codes.Internal, "cannot convert to *LogFilter")
}
logs, err := api.getLogsInBlock(filter, h, 1)
return &iotexapi.GetLogsResponse{Logs: logs}, err
case in.GetByRange() != nil:
req := in.GetByRange()
if req.FromBlock > api.bc.TipHeight() {
return nil, status.Error(codes.InvalidArgument, "start block > tip height")
}
filter, ok := NewLogFilter(in.Filter, nil, nil).(*LogFilter)
if !ok {
return nil, status.Error(codes.Internal, "cannot convert to *LogFilter")
}
logs, err := api.getLogsInBlock(filter, req.FromBlock, req.Count)
return &iotexapi.GetLogsResponse{Logs: logs}, err
default:
return nil, status.Error(codes.InvalidArgument, "invalid GetLogsRequest type")
}
}
// StreamBlocks streams blocks
func (api *Server) StreamBlocks(in *iotexapi.StreamBlocksRequest, stream iotexapi.APIService_StreamBlocksServer) error {
errChan := make(chan error)
if err := api.chainListener.AddResponder(NewBlockListener(stream, errChan)); err != nil {
return status.Error(codes.Internal, err.Error())
}
for {
select {
case err := <-errChan:
if err != nil {
err = status.Error(codes.Aborted, err.Error())
}
return err
}
}
}
// StreamLogs streams logs that match the filter condition
func (api *Server) StreamLogs(in *iotexapi.StreamLogsRequest, stream iotexapi.APIService_StreamLogsServer) error {
errChan := make(chan error)
// register the log filter so it will match logs in new blocks
if err := api.chainListener.AddResponder(NewLogFilter(in.Filter, stream, errChan)); err != nil {
return status.Error(codes.Internal, err.Error())
}
for {
select {
case err := <-errChan:
if err != nil {
err = status.Error(codes.Aborted, err.Error())
}
return err
}
}
}
// GetElectionBuckets returns the native election buckets.
func (api *Server) GetElectionBuckets(
ctx context.Context,
in *iotexapi.GetElectionBucketsRequest,
) (*iotexapi.GetElectionBucketsResponse, error) {
if api.electionCommittee == nil {
return nil, status.Error(codes.Unavailable, "Native election no supported")
}
buckets, err := api.electionCommittee.NativeBucketsByEpoch(in.GetEpochNum())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
re := make([]*iotextypes.ElectionBucket, len(buckets))
for i, b := range buckets {
startTime, err := ptypes.TimestampProto(b.StartTime())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
re[i] = &iotextypes.ElectionBucket{
Voter: b.Voter(),
Candidate: b.Candidate(),
Amount: b.Amount().Bytes(),
StartTime: startTime,
Duration: ptypes.DurationProto(b.Duration()),
Decay: b.Decay(),
}
}
return &iotexapi.GetElectionBucketsResponse{Buckets: re}, nil
}
// GetReceiptByActionHash returns receipt by action hash
func (api *Server) GetReceiptByActionHash(h hash.Hash256) (*action.Receipt, error) {
if !api.hasActionIndex || api.indexer == nil {
return nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
actIndex, err := api.indexer.GetActionIndex(h[:])
if err != nil {
return nil, err
}
return api.dao.GetReceiptByActionHash(h, actIndex.BlockHeight())
}
// GetActionByActionHash returns action by action hash
func (api *Server) GetActionByActionHash(h hash.Hash256) (action.SealedEnvelope, error) {
if !api.hasActionIndex || api.indexer == nil {
return action.SealedEnvelope{}, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())
}
selp, _, _, err := api.getActionByActionHash(h)
return selp, err
}
// GetEvmTransfersByActionHash returns evm transfers by action hash
func (api *Server) GetEvmTransfersByActionHash(ctx context.Context, in *iotexapi.GetEvmTransfersByActionHashRequest) (*iotexapi.GetEvmTransfersByActionHashResponse, error) {
return nil, status.Error(codes.Unimplemented, "evm transfer index is deprecated, call GetSystemLogByActionHash instead")
}
// GetEvmTransfersByBlockHeight returns evm transfers by block height
func (api *Server) GetEvmTransfersByBlockHeight(ctx context.Context, in *iotexapi.GetEvmTransfersByBlockHeightRequest) (*iotexapi.GetEvmTransfersByBlockHeightResponse, error) {
return nil, status.Error(codes.Unimplemented, "evm transfer index is deprecated, call GetSystemLogByBlockHeight instead")
}
// GetImplicitTransferLogByActionHash returns implict transfer log by action hash
func (api *Server) GetImplicitTransferLogByActionHash(
ctx context.Context,
in *iotexapi.GetImplicitTransferLogByActionHashRequest) (*iotexapi.GetImplicitTransferLogByActionHashResponse, error) {
if !api.hasActionIndex || api.indexer == nil {
return nil, status.Error(codes.Unimplemented, blockindex.ErrActionIndexNA.Error())
}
if !api.dao.ContainsImplicitTransferLog() {
return nil, status.Error(codes.Unimplemented, blockdao.ErrNotSupported.Error())
}
h, err := hex.DecodeString(in.ActionHash)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
actIndex, err := api.indexer.GetActionIndex(h)
if err != nil {
if errors.Cause(err) == db.ErrNotExist {
return nil, status.Error(codes.NotFound, err.Error())
}
return nil, status.Error(codes.Internal, err.Error())
}
sysLog, err := api.dao.GetImplicitTransferLog(actIndex.BlockHeight())
if err != nil {
if errors.Cause(err) == db.ErrNotExist {
return nil, status.Error(codes.NotFound, err.Error())
}
return nil, status.Error(codes.Internal, err.Error())
}
for _, log := range sysLog.ImplicitTransferLog {
if bytes.Compare(h, log.ActionHash) == 0 {
return &iotexapi.GetImplicitTransferLogByActionHashResponse{
ImplicitTransferLog: log,
}, nil
}
}
return nil, status.Errorf(codes.NotFound, "implicit transfer log not found for action %s", in.ActionHash)
}
// GetImplicitTransferLogByBlockHeight returns implict transfer log by block height
func (api *Server) GetImplicitTransferLogByBlockHeight(
ctx context.Context,
in *iotexapi.GetImplicitTransferLogByBlockHeightRequest) (*iotexapi.GetImplicitTransferLogByBlockHeightResponse, error) {
if !api.dao.ContainsImplicitTransferLog() {
return nil, status.Error(codes.Unimplemented, blockdao.ErrNotSupported.Error())
}
tip, err := api.dao.Height()
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if in.BlockHeight < 1 || in.BlockHeight > tip {
return nil, status.Errorf(codes.InvalidArgument, "invalid block height = %d", in.BlockHeight)
}
h, err := api.dao.GetBlockHash(in.BlockHeight)
if err != nil {
if errors.Cause(err) == db.ErrNotExist {
return nil, status.Error(codes.NotFound, err.Error())
}
return nil, status.Error(codes.Internal, err.Error())
}
sysLog, err := api.dao.GetImplicitTransferLog(in.BlockHeight)
if err != nil {
if errors.Cause(err) == db.ErrNotExist {
return nil, status.Error(codes.NotFound, err.Error())
}
return nil, status.Error(codes.Internal, err.Error())
}
return &iotexapi.GetImplicitTransferLogByBlockHeightResponse{
BlockImplicitTransferLog: sysLog,
BlockIdentifier: &iotextypes.BlockIdentifier{
Hash: hex.EncodeToString(h[:]),
Height: in.BlockHeight,
},
}, nil
}
// Start starts the API server
func (api *Server) Start() error {
portStr := ":" + strconv.Itoa(api.cfg.API.Port)
lis, err := net.Listen("tcp", portStr)
if err != nil {
log.L().Error("API server failed to listen.", zap.Error(err))
return errors.Wrap(err, "API server failed to listen")
}
log.L().Info("API server is listening.", zap.String("addr", lis.Addr().String()))
go func() {
if err := api.grpcServer.Serve(lis); err != nil {
log.L().Fatal("Node failed to serve.", zap.Error(err))
}
}()
if err := api.bc.AddSubscriber(api.chainListener); err != nil {
return errors.Wrap(err, "failed to subscribe to block creations")
}
if err := api.chainListener.Start(); err != nil {
return errors.Wrap(err, "failed to start blockchain listener")
}
return nil
}
// Stop stops the API server
func (api *Server) Stop() error {
api.grpcServer.Stop()
if err := api.bc.RemoveSubscriber(api.chainListener); err != nil {
return errors.Wrap(err, "failed to unsubscribe blockchain listener")
}
return api.chainListener.Stop()
}
func (api *Server) readState(ctx context.Context, p protocol.Protocol, height string, methodName []byte, arguments ...[]byte) ([]byte, error) {
// TODO: need to complete the context
tipHeight := api.bc.TipHeight()
ctx = protocol.WithBlockCtx(ctx, protocol.BlockCtx{
BlockHeight: tipHeight,
})
ctx = protocol.WithBlockchainCtx(
protocol.WithRegistry(ctx, api.registry),
protocol.BlockchainCtx{
Genesis: api.cfg.Genesis,
},
)
rp := rolldpos.FindProtocol(api.registry)
if rp == nil {
return nil, errors.New("rolldpos is not registered")
}
tipEpochNum := rp.GetEpochNum(tipHeight)
if height != "" {
inputHeight, err := strconv.ParseUint(height, 0, 64)
if err != nil {
return nil, err
}
inputEpochNum := rp.GetEpochNum(inputHeight)
if inputEpochNum < tipEpochNum {
// old data, wrap to history state reader
return p.ReadState(ctx, factory.NewHistoryStateReader(api.sf, rp.GetEpochHeight(inputEpochNum)), methodName, arguments...)
}
}
// TODO: need to distinguish user error and system error
return p.ReadState(ctx, api.sf, methodName, arguments...)
}
func (api *Server) getActionsFromIndex(totalActions, start, count uint64) (*iotexapi.GetActionsResponse, error) {
var actionInfo []*iotexapi.ActionInfo
hashes, err := api.indexer.GetActionHashFromIndex(start, count)
if err != nil {
return nil, status.Error(codes.Unavailable, err.Error())
}
for i := range hashes {
act, err := api.getAction(hash.BytesToHash256(hashes[i]), false)
if err != nil {
return nil, status.Error(codes.Unavailable, err.Error())
}
actionInfo = append(actionInfo, act)
}
return &iotexapi.GetActionsResponse{
Total: totalActions,
ActionInfo: actionInfo,
}, nil
}
// GetActions returns actions within the range
func (api *Server) getActions(start uint64, count uint64) (*iotexapi.GetActionsResponse, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > api.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
totalActions, err := api.indexer.GetTotalActions()
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if start >= totalActions {
return nil, status.Error(codes.InvalidArgument, "start exceeds the limit")
}
if totalActions == uint64(0) || count == 0 {
return &iotexapi.GetActionsResponse{}, nil
}
if start+count > totalActions {
count = totalActions - start
}
if api.hasActionIndex {
return api.getActionsFromIndex(totalActions, start, count)
}
// Finding actions in reverse order saves time for querying most recent actions
reverseStart := totalActions - (start + count)
if totalActions < start+count {
reverseStart = uint64(0)
count = totalActions - start
}
var res []*iotexapi.ActionInfo
var hit bool
for height := api.bc.TipHeight(); height >= 1 && count > 0; height-- {
blk, err := api.dao.GetBlockByHeight(height)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
if !hit && reverseStart >= uint64(len(blk.Actions)) {
reverseStart -= uint64(len(blk.Actions))
continue
}
// now reverseStart < len(blk.Actions), we are going to fetch actions from this block
hit = true
act := api.reverseActionsInBlock(blk, reverseStart, count)
res = append(act, res...)
count -= uint64(len(act))
reverseStart = 0
}
return &iotexapi.GetActionsResponse{
Total: totalActions,
ActionInfo: res,
}, nil
}
// getSingleAction returns action by action hash
func (api *Server) getSingleAction(actionHash string, checkPending bool) (*iotexapi.GetActionsResponse, error) {
actHash, err := hash.HexStringToHash256(actionHash)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
act, err := api.getAction(actHash, checkPending)
if err != nil {
return nil, status.Error(codes.Unavailable, err.Error())
}
return &iotexapi.GetActionsResponse{
Total: 1,
ActionInfo: []*iotexapi.ActionInfo{act},
}, nil
}
// getActionsByAddress returns all actions associated with an address
func (api *Server) getActionsByAddress(addrStr string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > api.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
addr, err := address.FromString(addrStr)
if err != nil {
return nil, err
}
actions, err := api.indexer.GetActionsByAddress(hash.BytesToHash160(addr.Bytes()), start, count)
if err != nil && (errors.Cause(err) == db.ErrBucketNotExist || errors.Cause(err) == db.ErrNotExist) {
// no actions associated with address, return nil
return &iotexapi.GetActionsResponse{}, nil
}
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
res := &iotexapi.GetActionsResponse{Total: uint64(len(actions))}
for i := range actions {
act, err := api.getAction(hash.BytesToHash256(actions[i]), false)
if err != nil {
continue
}
res.ActionInfo = append(res.ActionInfo, act)
}
return res, nil
}
// getBlockHashByActionHash returns block hash by action hash
func (api *Server) getBlockHashByActionHash(h hash.Hash256) (hash.Hash256, error) {
actIndex, err := api.indexer.GetActionIndex(h[:])
if err != nil {
return hash.ZeroHash256, err
}
return api.dao.GetBlockHash(actIndex.BlockHeight())
}
// getActionByActionHash returns action by action hash
func (api *Server) getActionByActionHash(h hash.Hash256) (action.SealedEnvelope, hash.Hash256, uint64, error) {
actIndex, err := api.indexer.GetActionIndex(h[:])
if err != nil {
return action.SealedEnvelope{}, hash.ZeroHash256, 0, err
}
blk, err := api.dao.GetBlockByHeight(actIndex.BlockHeight())
if err != nil {
return action.SealedEnvelope{}, hash.ZeroHash256, 0, err
}
selp, err := api.dao.GetActionByActionHash(h, actIndex.BlockHeight())
return selp, blk.HashBlock(), actIndex.BlockHeight(), err
}
// getUnconfirmedActionsByAddress returns all unconfirmed actions in actpool associated with an address
func (api *Server) getUnconfirmedActionsByAddress(address string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > api.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
selps := api.ap.GetUnconfirmedActs(address)
if len(selps) == 0 {
return &iotexapi.GetActionsResponse{}, nil
}
if start >= uint64(len(selps)) {
return nil, status.Error(codes.InvalidArgument, "start exceeds the limit")
}
var res []*iotexapi.ActionInfo
for i := start; i < uint64(len(selps)) && i < start+count; i++ {
act, err := api.pendingAction(selps[i])
if err != nil {
continue
}
res = append(res, act)
}
return &iotexapi.GetActionsResponse{
Total: uint64(len(selps)),
ActionInfo: res,
}, nil
}
// getActionsByBlock returns all actions in a block
func (api *Server) getActionsByBlock(blkHash string, start uint64, count uint64) (*iotexapi.GetActionsResponse, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > api.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
hash, err := hash.HexStringToHash256(blkHash)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
blk, err := api.dao.GetBlock(hash)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
if len(blk.Actions) == 0 {
return &iotexapi.GetActionsResponse{}, nil
}
if start >= uint64(len(blk.Actions)) {
return nil, status.Error(codes.InvalidArgument, "start exceeds the limit")
}
res := api.actionsInBlock(blk, start, count)
return &iotexapi.GetActionsResponse{
Total: uint64(len(blk.Actions)),
ActionInfo: res,
}, nil
}
// getBlockMetas returns blockmetas response within the height range
func (api *Server) getBlockMetas(start uint64, count uint64) (*iotexapi.GetBlockMetasResponse, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
if count > api.cfg.API.RangeQueryLimit {
return nil, status.Error(codes.InvalidArgument, "range exceeds the limit")
}
tipHeight := api.bc.TipHeight()
if start > tipHeight {
return nil, status.Error(codes.InvalidArgument, "start height should not exceed tip height")
}
var res []*iotextypes.BlockMeta
for height := start; height <= tipHeight && count > 0; height++ {
blockMeta, err := api.getBlockMetasByHeader(height)
if errors.Cause(err) == db.ErrNotExist {
blockMeta, err = api.getBlockMetasByBlock(height)
if err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
res = append(res, blockMeta)
count--
}
return &iotexapi.GetBlockMetasResponse{
Total: tipHeight,
BlkMetas: res,
}, nil
}
// getBlockMeta returns blockmetas response by block hash
func (api *Server) getBlockMeta(blkHash string) (*iotexapi.GetBlockMetasResponse, error) {
hash, err := hash.HexStringToHash256(blkHash)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
blockMeta, err := api.getBlockMetaByHeader(hash)
if errors.Cause(err) == db.ErrNotExist {
blockMeta, err = api.getBlockMetaByBlock(hash)
if err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
return &iotexapi.GetBlockMetasResponse{
Total: 1,
BlkMetas: []*iotextypes.BlockMeta{blockMeta},
}, nil
}
// putBlockMetaUpgradeByBlock puts numActions and transferAmount for blockmeta by block
func (api *Server) putBlockMetaUpgradeByBlock(blk *block.Block, blockMeta *iotextypes.BlockMeta) *iotextypes.BlockMeta {
blockMeta.NumActions = int64(len(blk.Actions))
blockMeta.TransferAmount = blk.CalculateTransferAmount().String()
return blockMeta
}
// putBlockMetaUpgradeByHeader puts numActions and transferAmount for blockmeta by header height
func (api *Server) putBlockMetaUpgradeByHeader(height uint64, blockMeta *iotextypes.BlockMeta) (*iotextypes.BlockMeta, error) {
index, err := api.indexer.GetBlockIndex(height)
if err != nil {
return nil, errors.Wrapf(err, "missing block index at height %d", height)
}
blockMeta.NumActions = int64(index.NumAction())
blockMeta.TransferAmount = index.TsfAmount().String()
return blockMeta, nil
}
// getBlockMetasByHeader gets block header by height
func (api *Server) getBlockMetasByHeader(height uint64) (*iotextypes.BlockMeta, error) {
header, err := api.bc.BlockHeaderByHeight(height)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
blockMeta := api.getCommonBlockMeta(header)
blockMeta, err = api.putBlockMetaUpgradeByHeader(header.Height(), blockMeta)
if err != nil {
return nil, err
}
return blockMeta, nil
}
// getBlockMetasByBlock gets block by height
func (api *Server) getBlockMetasByBlock(height uint64) (*iotextypes.BlockMeta, error) {
blk, err := api.dao.GetBlockByHeight(height)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
blockMeta := api.getCommonBlockMeta(blk)
blockMeta = api.putBlockMetaUpgradeByBlock(blk, blockMeta)
return blockMeta, nil
}
// getBlockMetaByHeader gets block header by hash
func (api *Server) getBlockMetaByHeader(h hash.Hash256) (*iotextypes.BlockMeta, error) {
header, err := api.dao.Header(h)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
blockMeta := api.getCommonBlockMeta(header)
blockMeta, err = api.putBlockMetaUpgradeByHeader(header.Height(), blockMeta)
if err != nil {
return nil, err
}
return blockMeta, nil
}
// getBlockMetaByBlock gets block by hash
func (api *Server) getBlockMetaByBlock(h hash.Hash256) (*iotextypes.BlockMeta, error) {
blk, err := api.dao.GetBlock(h)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
blockMeta := api.getCommonBlockMeta(blk)
blockMeta = api.putBlockMetaUpgradeByBlock(blk, blockMeta)
return blockMeta, nil
}
// getCommonBlockMeta gets blockmeta by empty interface
func (api *Server) getCommonBlockMeta(common interface{}) *iotextypes.BlockMeta {
header, ok := common.(*block.Header)
if !ok {
blk := common.(*block.Block)
header = &blk.Header
}
hash := header.HashBlock()
height := header.Height()
ts, _ := ptypes.TimestampProto(header.Timestamp())
producerAddress := header.ProducerAddress()
txRoot := header.TxRoot()
receiptRoot := header.ReceiptRoot()
deltaStateDigest := header.DeltaStateDigest()
logsBloom := header.LogsBloomfilter()
blockMeta := &iotextypes.BlockMeta{
Hash: hex.EncodeToString(hash[:]),
Height: height,
Timestamp: ts,
ProducerAddress: producerAddress,
TxRoot: hex.EncodeToString(txRoot[:]),
ReceiptRoot: hex.EncodeToString(receiptRoot[:]),
DeltaStateDigest: hex.EncodeToString(deltaStateDigest[:]),
}
if logsBloom != nil {
blockMeta.LogsBloom = hex.EncodeToString(logsBloom.Bytes())
}
return blockMeta
}
func (api *Server) getGravityChainStartHeight(epochHeight uint64) (uint64, error) {
gravityChainStartHeight := epochHeight
if pp := poll.FindProtocol(api.registry); pp != nil {
methodName := []byte("GetGravityChainStartHeight")
arguments := [][]byte{[]byte(strconv.FormatUint(epochHeight, 10))}
data, err := api.readState(context.Background(), pp, "", methodName, arguments...)
if err != nil {
return 0, err
}
if len(data) == 0 {
return 0, nil
}
if gravityChainStartHeight, err = strconv.ParseUint(string(data), 10, 64); err != nil {
return 0, err
}
}
return gravityChainStartHeight, nil
}
func (api *Server) committedAction(selp action.SealedEnvelope, blkHash hash.Hash256, blkHeight uint64) (
*iotexapi.ActionInfo, error) {
actHash := selp.Hash()
header, err := api.dao.Header(blkHash)
if err != nil {
return nil, err
}
sender, _ := address.FromBytes(selp.SrcPubkey().Hash())
receipt, err := api.dao.GetReceiptByActionHash(actHash, blkHeight)
if err != nil {
return nil, err
}
gas := new(big.Int)
gas = gas.Mul(selp.GasPrice(), big.NewInt(int64(receipt.GasConsumed)))
return &iotexapi.ActionInfo{
Action: selp.Proto(),
ActHash: hex.EncodeToString(actHash[:]),
BlkHash: hex.EncodeToString(blkHash[:]),
BlkHeight: header.Height(),
Sender: sender.String(),
GasFee: gas.String(),
Timestamp: header.BlockHeaderCoreProto().Timestamp,
}, nil
}
func (api *Server) pendingAction(selp action.SealedEnvelope) (*iotexapi.ActionInfo, error) {
actHash := selp.Hash()
sender, _ := address.FromBytes(selp.SrcPubkey().Hash())
return &iotexapi.ActionInfo{
Action: selp.Proto(),
ActHash: hex.EncodeToString(actHash[:]),
BlkHash: hex.EncodeToString(hash.ZeroHash256[:]),
BlkHeight: 0,
Sender: sender.String(),
Timestamp: nil,
}, nil
}
func (api *Server) getAction(actHash hash.Hash256, checkPending bool) (*iotexapi.ActionInfo, error) {
selp, blkHash, blkHeight, err := api.getActionByActionHash(actHash)
if err == nil {
return api.committedAction(selp, blkHash, blkHeight)
}
// Try to fetch pending action from actpool
if checkPending {
selp, err = api.ap.GetActionByHash(actHash)
}
if err != nil {
return nil, err
}
return api.pendingAction(selp)
}
func (api *Server) actionsInBlock(blk *block.Block, start, count uint64) []*iotexapi.ActionInfo {
h := blk.HashBlock()
blkHash := hex.EncodeToString(h[:])
blkHeight := blk.Height()
ts := blk.Header.BlockHeaderCoreProto().Timestamp
var res []*iotexapi.ActionInfo
for i := start; i < uint64(len(blk.Actions)) && i < start+count; i++ {
selp := blk.Actions[i]
actHash := selp.Hash()
sender, _ := address.FromBytes(selp.SrcPubkey().Hash())
res = append(res, &iotexapi.ActionInfo{
Action: selp.Proto(),
ActHash: hex.EncodeToString(actHash[:]),
BlkHash: blkHash,
BlkHeight: blkHeight,
Sender: sender.String(),
Timestamp: ts,
})
}
return res
}
func (api *Server) reverseActionsInBlock(blk *block.Block, reverseStart, count uint64) []*iotexapi.ActionInfo {
h := blk.HashBlock()
blkHash := hex.EncodeToString(h[:])
blkHeight := blk.Height()
ts := blk.Header.BlockHeaderCoreProto().Timestamp
var res []*iotexapi.ActionInfo
for i := reverseStart; i < uint64(len(blk.Actions)) && i < reverseStart+count; i++ {
ri := uint64(len(blk.Actions)) - 1 - i
selp := blk.Actions[ri]
actHash := selp.Hash()
sender, _ := address.FromBytes(selp.SrcPubkey().Hash())
res = append([]*iotexapi.ActionInfo{
{
Action: selp.Proto(),
ActHash: hex.EncodeToString(actHash[:]),
BlkHash: blkHash,
BlkHeight: blkHeight,
Sender: sender.String(),
Timestamp: ts,
},
}, res...)
}
return res
}
func (api *Server) getLogsInBlock(filter *LogFilter, start, count uint64) ([]*iotextypes.Log, error) {
if count == 0 {
return nil, status.Error(codes.InvalidArgument, "count must be greater than zero")
}
// filter logs within start --> end
var logs []*iotextypes.Log
end := start + count - 1
if end > api.bc.TipHeight() {
end = api.bc.TipHeight()
}
for i := start; i <= end; i++ {
receipts, err := api.dao.GetReceipts(i)
if err != nil {
return logs, status.Error(codes.InvalidArgument, err.Error())
}
logs = append(logs, filter.MatchLogs(receipts)...)
}
return logs, nil
}
// TODO: Since GasConsumed on the receipt may not be enough for the gas limit, we use binary search for the gas estimate. Need a better way to address it later.
func (api *Server) estimateActionGasConsumptionForExecution(exec *iotextypes.Execution, sender string) (*iotexapi.EstimateActionGasConsumptionResponse, error) {
sc := &action.Execution{}
if err := sc.LoadProto(exec); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
state, err := accountutil.AccountState(api.sf, sender)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
nonce := state.Nonce + 1
callerAddr, err := address.FromString(sender)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
sc, _ = action.NewExecution(
sc.Contract(),
nonce,
sc.Amount(),
api.cfg.Genesis.BlockGasLimit,
big.NewInt(0),
sc.Data(),
)
ctx, err := api.bc.Context()
if err != nil {
return nil, err
}
_, receipt, err := api.sf.SimulateExecution(ctx, callerAddr, sc, api.dao.GetBlockHash)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if receipt.Status != uint64(iotextypes.ReceiptStatus_Success) {
return nil, status.Error(codes.Internal, "execution simulation gets failure status")
}
estimatedGas := receipt.GasConsumed
enough, err := api.isGasLimitEnough(callerAddr, sc, nonce, estimatedGas)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !enough {
low, high := estimatedGas, api.cfg.Genesis.BlockGasLimit
estimatedGas = high
for low <= high {
mid := (low + high) / 2
enough, err = api.isGasLimitEnough(callerAddr, sc, nonce, mid)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if enough {
estimatedGas = mid
break
}
low = mid + 1
}
}
return &iotexapi.EstimateActionGasConsumptionResponse{
Gas: estimatedGas,
}, nil
}
func (api *Server) estimateActionGasConsumptionForTransfer(transfer *iotextypes.Transfer) (*iotexapi.EstimateActionGasConsumptionResponse, error) {
payloadSize := uint64(len(transfer.Payload))
return &iotexapi.EstimateActionGasConsumptionResponse{
Gas: payloadSize*action.TransferPayloadGas + action.TransferBaseIntrinsicGas,
}, nil
}
func (api *Server) isGasLimitEnough(
caller address.Address,
sc *action.Execution,
nonce uint64,
gasLimit uint64,
) (bool, error) {
sc, _ = action.NewExecution(
sc.Contract(),
nonce,
sc.Amount(),
gasLimit,
big.NewInt(0),
sc.Data(),
)
ctx, err := api.bc.Context()
if err != nil {
return false, err
}
_, receipt, err := api.sf.SimulateExecution(ctx, caller, sc, api.dao.GetBlockHash)
if err != nil {
return false, err
}
return receipt.Status == uint64(iotextypes.ReceiptStatus_Success), nil
}
func (api *Server) getProductivityByEpoch(
rp *rolldpos.Protocol,
epochNum uint64,
tipHeight uint64,
abps state.CandidateList,
) (uint64, map[string]uint64, error) {
num, produce, err := rp.ProductivityByEpoch(epochNum, tipHeight, func(start uint64, end uint64) (map[string]uint64, error) {
return blockchain.Productivity(api.bc, start, end)
})
if err != nil {
return 0, nil, status.Error(codes.NotFound, err.Error())
}
// check if there is any active block producer who didn't prodcue any block
for _, abp := range abps {
if _, ok := produce[abp.Address]; !ok {
produce[abp.Address] = 0
}
}
return num, produce, nil
}
| 1 | 22,089 | also fetch block hash of this height and return hash | iotexproject-iotex-core | go |
@@ -75,6 +75,9 @@ namespace Samples.HttpMessageHandler
private static async Task SendHttpClientRequestAsync(bool tracingDisabled)
{
+ // Insert a call to the Tracer.Instance to include an AssemblyRef to Datadog.Trace assembly in the final executable
+ var ins = Tracer.Instance;
+
Console.WriteLine($"[HttpClient] sending request to {Url}");
var clientRequestContent = new StringContent(RequestContent, Utf8);
| 1 | using System;
using System.IO;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Datadog.Trace;
namespace Samples.HttpMessageHandler
{
public static class Program
{
private const string RequestContent = "PING";
private const string ResponseContent = "PONG";
private static readonly Encoding Utf8 = Encoding.UTF8;
private static string Url;
public static void Main(string[] args)
{
bool tracingDisabled = args.Any(arg => arg.Equals("TracingDisabled", StringComparison.OrdinalIgnoreCase));
Console.WriteLine($"TracingDisabled {tracingDisabled}");
bool useHttpClient = args.Any(arg => arg.Equals("HttpClient", StringComparison.OrdinalIgnoreCase));
Console.WriteLine($"HttpClient {useHttpClient}");
bool useWebClient = args.Any(arg => arg.Equals("WebClient", StringComparison.OrdinalIgnoreCase));
Console.WriteLine($"WebClient {useWebClient}");
string port = args.FirstOrDefault(arg => arg.StartsWith("Port="))?.Split('=')[1] ?? "9000";
Console.WriteLine($"Port {port}");
Url = $"http://localhost:{port}/Samples.HttpMessageHandler/";
Console.WriteLine();
Console.WriteLine($"Starting HTTP listener at {Url}");
using (var listener = new HttpListener())
{
listener.Prefixes.Add(Url);
listener.Start();
// handle http requests in a background thread
var listenerThread = new Thread(HandleHttpRequests);
listenerThread.Start(listener);
if (args.Length == 0 || args.Any(arg => arg.Equals("HttpClient", StringComparison.OrdinalIgnoreCase)))
{
// send an http request using HttpClient
Console.WriteLine();
Console.WriteLine("Sending request with HttpClient.");
SendHttpClientRequestAsync(tracingDisabled).GetAwaiter().GetResult();
}
if (args.Length == 0 || args.Any(arg => arg.Equals("WebClient", StringComparison.OrdinalIgnoreCase)))
{
// send an http request using WebClient
Console.WriteLine();
Console.WriteLine("Sending request with WebClient.");
SendWebClientRequest(tracingDisabled);
}
Console.WriteLine();
Console.WriteLine("Stopping HTTP listener.");
listener.Stop();
}
// Force process to end, otherwise the background listener thread lives forever in .NET Core.
// Apparently listener.GetContext() doesn't throw an exception if listener.Stop() is called,
// like it does in .NET Framework.
Environment.Exit(0);
}
private static async Task SendHttpClientRequestAsync(bool tracingDisabled)
{
Console.WriteLine($"[HttpClient] sending request to {Url}");
var clientRequestContent = new StringContent(RequestContent, Utf8);
using (var client = new HttpClient())
{
if (tracingDisabled)
{
client.DefaultRequestHeaders.Add(HttpHeaderNames.TracingEnabled, "false");
}
using (var responseMessage = await client.PostAsync(Url, clientRequestContent))
{
// read response content and headers
var responseContent = await responseMessage.Content.ReadAsStringAsync();
Console.WriteLine($"[HttpClient] response content: {responseContent}");
foreach (var header in responseMessage.Headers)
{
var name = header.Key;
var values = string.Join(",", header.Value);
Console.WriteLine($"[HttpClient] response header: {name}={values}");
}
}
}
}
private static void SendWebClientRequest(bool tracingDisabled)
{
Console.WriteLine($"[WebClient] sending request to {Url}");
using (var webClient = new WebClient())
{
webClient.Encoding = Utf8;
if (tracingDisabled)
{
webClient.Headers.Add(HttpHeaderNames.TracingEnabled, "false");
}
var responseContent = webClient.DownloadString(Url);
Console.WriteLine($"[WebClient] response content: {responseContent}");
foreach (string headerName in webClient.ResponseHeaders)
{
string headerValue = webClient.ResponseHeaders[headerName];
Console.WriteLine($"[WebClient] response header: {headerName}={headerValue}");
}
}
}
private static void HandleHttpRequests(object state)
{
var listener = (HttpListener)state;
while (listener.IsListening)
{
try
{
var context = listener.GetContext();
Console.WriteLine("[HttpListener] received request");
// read request content and headers
using (var reader = new StreamReader(context.Request.InputStream, context.Request.ContentEncoding))
{
string requestContent = reader.ReadToEnd();
Console.WriteLine($"[HttpListener] request content: {requestContent}");
foreach (string headerName in context.Request.Headers)
{
string headerValue = context.Request.Headers[headerName];
Console.WriteLine($"[HttpListener] request header: {headerName}={headerValue}");
}
}
// write response content
byte[] responseBytes = Utf8.GetBytes(ResponseContent);
context.Response.ContentEncoding = Utf8;
context.Response.ContentLength64 = responseBytes.Length;
context.Response.OutputStream.Write(responseBytes, 0, responseBytes.Length);
// we must close the response
context.Response.Close();
}
catch (HttpListenerException)
{
// listener was stopped,
// ignore to let the loop end and the method return
}
}
}
}
}
| 1 | 15,636 | Why do we need this in this sample app and not the others? | DataDog-dd-trace-dotnet | .cs |
@@ -25,6 +25,9 @@ public interface Span extends AutoCloseable, TraceContext {
Span setAttribute(String key, Number value);
Span setAttribute(String key, String value);
+ Span addEvent(String name);
+ Span addEvent(String name, long timestamp);
+
Span setStatus(Status status);
@Override | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote.tracing;
public interface Span extends AutoCloseable, TraceContext {
Span setName(String name);
Span setAttribute(String key, boolean value);
Span setAttribute(String key, Number value);
Span setAttribute(String key, String value);
Span setStatus(Status status);
@Override
void close();
enum Kind {
CLIENT("client"),
SERVER("server"),
PRODUCER("producer"),
CONSUMER("consumer"),
;
// The nice name is the name expected in an OT trace.
private final String niceName;
private Kind(String niceName) {
this.niceName = niceName;
}
@Override
public String toString() {
return niceName;
}
}
}
| 1 | 17,759 | We don't need this additional method. | SeleniumHQ-selenium | py |
@@ -97,6 +97,6 @@ class CreateUserCommand extends Command
$output->writeln(sprintf('<error>Can\'t find role %s</error>', $role));
}
- return 1;
+ return 0;
}
} | 1 | <?php
/**
* Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types = 1);
namespace Ergonode\Account\Application\Command;
use Ergonode\Account\Domain\Query\RoleQueryInterface;
use Ergonode\Account\Domain\ValueObject\Password;
use Ergonode\Core\Domain\ValueObject\Language;
use Ergonode\EventSourcing\Infrastructure\Bus\CommandBusInterface;
use Ergonode\SharedKernel\Domain\Aggregate\RoleId;
use Ergonode\SharedKernel\Domain\ValueObject\Email;
use Symfony\Component\Console\Command\Command;
use Symfony\Component\Console\Input\InputArgument;
use Symfony\Component\Console\Input\InputInterface;
use Symfony\Component\Console\Output\OutputInterface;
/**
*/
class CreateUserCommand extends Command
{
private const NAME = 'ergonode:user:create';
/**
* @var CommandBusInterface
*/
private CommandBusInterface $commandBus;
/**
* @var RoleQueryInterface
*/
private RoleQueryInterface $query;
/**
* @param CommandBusInterface $commandBus
* @param RoleQueryInterface $query
*/
public function __construct(CommandBusInterface $commandBus, RoleQueryInterface $query)
{
parent::__construct(static::NAME);
$this->query = $query;
$this->commandBus = $commandBus;
}
/**
* Command configuration
*/
public function configure(): void
{
$this->setDescription('Creates a new valid user');
$this->addArgument('email', InputArgument::REQUIRED, 'user email.');
$this->addArgument('first_name', InputArgument::REQUIRED, 'First name');
$this->addArgument('last_name', InputArgument::REQUIRED, 'Last name');
$this->addArgument('password', InputArgument::REQUIRED, 'Password');
$this->addArgument('language', InputArgument::REQUIRED, 'Language');
$this->addArgument('role', InputArgument::OPTIONAL, 'Role', 'Admin');
}
/**
* @param InputInterface $input
* @param OutputInterface $output
*
* @return int
*
* @throws \Exception
*/
public function execute(InputInterface $input, OutputInterface $output): int
{
$firstName = $input->getArgument('first_name');
$lastName = $input->getArgument('last_name');
$role = $input->getArgument('role');
$email = new Email($input->getArgument('email'));
$password = new Password($input->getArgument('password'));
$language = new Language($input->getArgument('language'));
$roleId = array_search($role, $this->query->getDictionary(), true);
if ($roleId) {
$command = new \Ergonode\Account\Domain\Command\User\CreateUserCommand(
$firstName,
$lastName,
$email,
$language,
$password,
new RoleId($roleId)
);
$this->commandBus->dispatch($command);
$output->writeln('<info>User created.</info>');
} else {
$output->writeln(sprintf('<error>Can\'t find role %s</error>', $role));
}
return 1;
}
}
| 1 | 8,969 | Shouldn't this return code conditionally? The above line seems like an error occurred. | ergonode-backend | php |
@@ -24,6 +24,12 @@ import (
"k8s.io/apimachinery/pkg/types"
)
+// SupportedDiskType is a map containing the valid disk type
+var SupportedDiskType = map[string]bool{
+ string(apis.TypeSparseCPV): true,
+ string(apis.TypeDiskCPV): true,
+}
+
// SPC encapsulates StoragePoolClaim api object.
type SPC struct {
// actual spc object | 1 | /*
Copyright 2019 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
"time"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"k8s.io/apimachinery/pkg/types"
)
// SPC encapsulates StoragePoolClaim api object.
type SPC struct {
// actual spc object
Object *apis.StoragePoolClaim
}
// SPCList holds the list of StoragePoolClaim api
type SPCList struct {
// list of storagepoolclaims
ObjectList *apis.StoragePoolClaimList
}
// Builder is the builder object for SPC.
type Builder struct {
Spc *SPC
}
// ListBuilder is the builder object for SPCList.
type ListBuilder struct {
SpcList *SPCList
}
// Predicate defines an abstraction to determine conditional checks against the provided spc instance.
type Predicate func(*SPC) bool
type predicateList []Predicate
// all returns true if all the predicates succeed against the provided csp instance.
func (l predicateList) all(c *SPC) bool {
for _, pred := range l {
if !pred(c) {
return false
}
}
return true
}
// HasAnnotation returns true if provided annotation key and value are present in the provided spc instance.
func HasAnnotation(key, value string) Predicate {
return func(c *SPC) bool {
val, ok := c.Object.GetAnnotations()[key]
if ok {
return val == value
}
return false
}
}
// Filter will filter the csp instances if all the predicates succeed against that spc.
func (l *SPCList) Filter(p ...Predicate) *SPCList {
var plist predicateList
plist = append(plist, p...)
if len(plist) == 0 {
return l
}
filtered := NewListBuilder().List()
for _, spcAPI := range l.ObjectList.Items {
spcAPI := spcAPI // pin it
SPC := BuilderForAPIObject(&spcAPI).Spc
if plist.all(SPC) {
filtered.ObjectList.Items = append(filtered.ObjectList.Items, *SPC.Object)
}
}
return filtered
}
// NewBuilder returns an empty instance of the Builder object.
func NewBuilder() *Builder {
return &Builder{
Spc: &SPC{&apis.StoragePoolClaim{}},
}
}
// BuilderForObject returns an instance of the Builder object based on spc object
func BuilderForObject(SPC *SPC) *Builder {
return &Builder{
Spc: SPC,
}
}
// BuilderForAPIObject returns an instance of the Builder object based on spc api object.
func BuilderForAPIObject(spc *apis.StoragePoolClaim) *Builder {
return &Builder{
Spc: &SPC{spc},
}
}
// WithName sets the Name field of spc with provided argument value.
func (sb *Builder) WithName(name string) *Builder {
sb.Spc.Object.Name = name
sb.Spc.Object.Spec.Name = name
return sb
}
// WithGenerateName appends a random string after the name
func (sb *Builder) WithGenerateName(name string) *Builder {
name = name + "-" + fmt.Sprintf("%d", time.Now().UnixNano())
return sb.WithName(name)
}
// WithDiskType sets the Type field of spc with provided argument value.
func (sb *Builder) WithDiskType(diskType string) *Builder {
sb.Spc.Object.Spec.Type = diskType
return sb
}
// WithPoolType sets the poolType field of spc with provided argument value.
func (sb *Builder) WithPoolType(poolType string) *Builder {
sb.Spc.Object.Spec.PoolSpec.PoolType = poolType
return sb
}
// WithOverProvisioning sets the OverProvisioning field of spc with provided argument value.
func (sb *Builder) WithOverProvisioning(val bool) *Builder {
sb.Spc.Object.Spec.PoolSpec.OverProvisioning = val
return sb
}
// WithPool sets the poolType field of spc with provided argument value.
func (sb *Builder) WithPool(poolType string) *Builder {
sb.Spc.Object.Spec.PoolSpec.PoolType = poolType
return sb
}
// WithMaxPool sets the maxpool field of spc with provided argument value.
func (sb *Builder) WithMaxPool(val int) *Builder {
maxPool := newInt(val)
sb.Spc.Object.Spec.MaxPools = maxPool
return sb
}
// newInt returns a pointer to the int value.
func newInt(val int) *int {
newVal := val
return &newVal
}
// Build returns the SPC object built by this builder.
func (sb *Builder) Build() *SPC {
return sb.Spc
}
// NewListBuilder returns a new instance of ListBuilder object.
func NewListBuilder() *ListBuilder {
return &ListBuilder{SpcList: &SPCList{ObjectList: &apis.StoragePoolClaimList{}}}
}
// WithUIDs builds a list of StoragePoolClaims based on the provided pool UIDs
func (b *ListBuilder) WithUIDs(poolUIDs ...string) *ListBuilder {
for _, uid := range poolUIDs {
obj := &SPC{&apis.StoragePoolClaim{}}
obj.Object.SetUID(types.UID(uid))
b.SpcList.ObjectList.Items = append(b.SpcList.ObjectList.Items, *obj.Object)
}
return b
}
// WithList builds the list based on the provided *SPCList instances.
func (b *ListBuilder) WithList(pools *SPCList) *ListBuilder {
if pools == nil {
return b
}
b.SpcList.ObjectList.Items = append(b.SpcList.ObjectList.Items, pools.ObjectList.Items...)
return b
}
// WithAPIList builds the list based on the provided *apis.CStorPoolList.
func (b *ListBuilder) WithAPIList(pools *apis.StoragePoolClaimList) *ListBuilder {
if pools == nil {
return b
}
for _, pool := range pools.Items {
pool := pool //pin it
b.SpcList.ObjectList.Items = append(b.SpcList.ObjectList.Items, pool)
}
return b
}
// List returns the list of csp instances that were built by this builder.
func (b *ListBuilder) List() *SPCList {
return b.SpcList
}
// Len returns the length og SPCList.
func (l *SPCList) Len() int {
return len(l.ObjectList.Items)
}
// IsEmpty returns false if the SPCList is empty.
func (l *SPCList) IsEmpty() bool {
return len(l.ObjectList.Items) == 0
}
// GetPoolUIDs retuns the UIDs of the pools available in the list.
func (l *SPCList) GetPoolUIDs() []string {
uids := []string{}
for _, pool := range l.ObjectList.Items {
uids = append(uids, string(pool.GetUID()))
}
return uids
}
| 1 | 16,202 | What is the need to use string as key? Can we use the apis type as the key? | openebs-maya | go |
@@ -867,6 +867,7 @@ class Uppy {
info (message, type, duration) {
const isComplexMessage = typeof message === 'object'
+ duration = typeof duration === 'undefined' ? 3000 : duration
this.setState({
info: { | 1 | const Utils = require('../core/Utils')
const Translator = require('../core/Translator')
const UppySocket = require('./UppySocket')
const ee = require('namespace-emitter')
const cuid = require('cuid')
const throttle = require('lodash.throttle')
const prettyBytes = require('prettier-bytes')
const match = require('mime-match')
// const deepFreeze = require('deep-freeze-strict')
/**
* Main Uppy core
*
* @param {object} opts general options, like locales, to show modal or not to show
*/
class Uppy {
constructor (opts) {
const defaultLocale = {
strings: {
youCanOnlyUploadX: {
0: 'You can only upload %{smart_count} file',
1: 'You can only upload %{smart_count} files'
},
youHaveToAtLeastSelectX: {
0: 'You have to select at least %{smart_count} file',
1: 'You have to select at least %{smart_count} files'
},
exceedsSize: 'This file exceeds maximum allowed size of',
youCanOnlyUploadFileTypes: 'You can only upload:',
uppyServerError: 'Connection with Uppy Server failed'
}
}
// set default options
const defaultOptions = {
id: 'uppy',
autoProceed: true,
debug: false,
restrictions: {
maxFileSize: false,
maxNumberOfFiles: false,
minNumberOfFiles: false,
allowedFileTypes: false
},
meta: {},
onBeforeFileAdded: (currentFile, files) => Promise.resolve(),
onBeforeUpload: (files, done) => Promise.resolve(),
locale: defaultLocale
}
// Merge default options with the ones set by user
this.opts = Object.assign({}, defaultOptions, opts)
// // Dictates in what order different plugin types are ran:
// this.types = [ 'presetter', 'orchestrator', 'progressindicator',
// 'acquirer', 'modifier', 'uploader', 'presenter', 'debugger']
this.locale = Object.assign({}, defaultLocale, this.opts.locale)
this.locale.strings = Object.assign({}, defaultLocale.strings, this.opts.locale.strings)
// i18n
this.translator = new Translator({locale: this.locale})
this.i18n = this.translator.translate.bind(this.translator)
// Container for different types of plugins
this.plugins = {}
this.translator = new Translator({locale: this.opts.locale})
this.i18n = this.translator.translate.bind(this.translator)
this.getState = this.getState.bind(this)
this.getPlugin = this.getPlugin.bind(this)
this.updateMeta = this.updateMeta.bind(this)
this.initSocket = this.initSocket.bind(this)
this.log = this.log.bind(this)
this.info = this.info.bind(this)
this.hideInfo = this.hideInfo.bind(this)
this.addFile = this.addFile.bind(this)
this.removeFile = this.removeFile.bind(this)
this.pauseResume = this.pauseResume.bind(this)
this.calculateProgress = this.calculateProgress.bind(this)
this.resetProgress = this.resetProgress.bind(this)
this.pauseAll = this.pauseAll.bind(this)
this.resumeAll = this.resumeAll.bind(this)
this.retryAll = this.retryAll.bind(this)
this.cancelAll = this.cancelAll.bind(this)
this.retryUpload = this.retryUpload.bind(this)
// this.bus = this.emitter = ee()
this.emitter = ee()
this.on = this.emitter.on.bind(this.emitter)
this.off = this.emitter.off.bind(this.emitter)
this.once = this.emitter.once.bind(this.emitter)
this.emit = this.emitter.emit.bind(this.emitter)
this.preProcessors = []
this.uploaders = []
this.postProcessors = []
this.state = {
plugins: {},
files: {},
capabilities: {
resumableUploads: false
},
totalProgress: 0,
meta: Object.assign({}, this.opts.meta),
info: {
isHidden: true,
type: 'info',
message: ''
}
}
// for debugging and testing
// this.updateNum = 0
if (this.opts.debug) {
global.UppyState = this.state
global.uppyLog = ''
// global.UppyAddFile = this.addFile.bind(this)
global._uppy = this
}
}
/**
* Iterate on all plugins and run `update` on them. Called each time state changes
*
*/
updateAll (state) {
this.iteratePlugins(plugin => {
plugin.update(state)
})
}
/**
* Updates state
*
* @param {patch} object
*/
setState (patch) {
const prevState = Object.assign({}, this.state)
const nextState = Object.assign({}, this.state, patch)
this.state = nextState
this.emit('core:state-update', prevState, nextState, patch)
this.updateAll(this.state)
}
/**
* Returns current state
*
*/
getState () {
// use deepFreeze for debugging
// return deepFreeze(this.state)
return this.state
}
resetProgress () {
const defaultProgress = {
percentage: 0,
bytesUploaded: 0,
uploadComplete: false,
uploadStarted: false
}
const files = Object.assign({}, this.state.files)
const updatedFiles = {}
Object.keys(files).forEach(fileID => {
const updatedFile = Object.assign({}, files[fileID])
updatedFile.progress = Object.assign({}, updatedFile.progress, defaultProgress)
updatedFiles[fileID] = updatedFile
})
this.setState({
files: updatedFiles,
totalProgress: 0
})
// TODO Document on the website
this.emit('core:reset-progress')
}
addPreProcessor (fn) {
this.preProcessors.push(fn)
}
removePreProcessor (fn) {
const i = this.preProcessors.indexOf(fn)
if (i !== -1) {
this.preProcessors.splice(i, 1)
}
}
addPostProcessor (fn) {
this.postProcessors.push(fn)
}
removePostProcessor (fn) {
const i = this.postProcessors.indexOf(fn)
if (i !== -1) {
this.postProcessors.splice(i, 1)
}
}
addUploader (fn) {
this.uploaders.push(fn)
}
removeUploader (fn) {
const i = this.uploaders.indexOf(fn)
if (i !== -1) {
this.uploaders.splice(i, 1)
}
}
setMeta (data) {
const newMeta = Object.assign({}, this.state.meta, data)
this.log('Adding metadata:')
this.log(data)
this.setState({meta: newMeta})
}
updateMeta (data, fileID) {
const updatedFiles = Object.assign({}, this.state.files)
if (!updatedFiles[fileID]) {
this.log('Was trying to set metadata for a file that’s not with us anymore: ', fileID)
return
}
const newMeta = Object.assign({}, updatedFiles[fileID].meta, data)
updatedFiles[fileID] = Object.assign({}, updatedFiles[fileID], {
meta: newMeta
})
this.setState({files: updatedFiles})
}
/**
* Check if minNumberOfFiles restriction is reached before uploading
*
* @return {boolean}
* @private
*/
checkMinNumberOfFiles () {
const {minNumberOfFiles} = this.opts.restrictions
if (Object.keys(this.state.files).length < minNumberOfFiles) {
this.info(`${this.i18n('youHaveToAtLeastSelectX', {smart_count: minNumberOfFiles})}`, 'error', 5000)
return false
}
return true
}
/**
* Check if file passes a set of restrictions set in options: maxFileSize,
* maxNumberOfFiles and allowedFileTypes
*
* @param {object} file object to check
* @return {boolean}
* @private
*/
checkRestrictions (file) {
const {maxFileSize, maxNumberOfFiles, allowedFileTypes} = this.opts.restrictions
if (maxNumberOfFiles) {
if (Object.keys(this.state.files).length + 1 > maxNumberOfFiles) {
this.info(`${this.i18n('youCanOnlyUploadX', {smart_count: maxNumberOfFiles})}`, 'error', 5000)
return false
}
}
if (allowedFileTypes) {
const isCorrectFileType = allowedFileTypes.filter(match(file.type.mime)).length > 0
if (!isCorrectFileType) {
const allowedFileTypesString = allowedFileTypes.join(', ')
this.info(`${this.i18n('youCanOnlyUploadFileTypes')} ${allowedFileTypesString}`, 'error', 5000)
return false
}
}
if (maxFileSize) {
if (file.data.size > maxFileSize) {
this.info(`${this.i18n('exceedsSize')} ${prettyBytes(maxFileSize)}`, 'error', 5000)
return false
}
}
return true
}
/**
* Add a new file to `state.files`. This will run `onBeforeFileAdded`,
* try to guess file type in a clever way, check file against restrictions,
* and start an upload if `autoProceed === true`.
*
* @param {object} file object to add
*/
addFile (file) {
// Wrap this in a Promise `.then()` handler so errors will reject the Promise
// instead of throwing.
const beforeFileAdded = Promise.resolve()
.then(() => this.opts.onBeforeFileAdded(file, this.getState().files))
return beforeFileAdded.catch((err) => {
const message = typeof err === 'object' ? err.message : err
this.info(message, 'error', 5000)
return Promise.reject(new Error(`onBeforeFileAdded: ${message}`))
}).then(() => {
return Utils.getFileType(file).then((fileType) => {
const updatedFiles = Object.assign({}, this.state.files)
const fileName = file.name || 'noname'
const fileExtension = Utils.getFileNameAndExtension(fileName)[1]
const isRemote = file.isRemote || false
const fileID = Utils.generateFileID(file)
const fileTypeGeneral = fileType[0]
const fileTypeSpecific = fileType[1]
const newFile = {
source: file.source || '',
id: fileID,
name: fileName,
extension: fileExtension || '',
meta: Object.assign({}, { name: fileName }, this.getState().meta),
type: {
general: fileTypeGeneral,
specific: fileTypeSpecific,
mime: fileType.join('/')
},
data: file.data,
progress: {
percentage: 0,
bytesUploaded: 0,
bytesTotal: file.data.size || 0,
uploadComplete: false,
uploadStarted: false
},
size: file.data.size || 'N/A',
isRemote: isRemote,
remote: file.remote || '',
preview: file.preview
}
const isFileAllowed = this.checkRestrictions(newFile)
if (!isFileAllowed) return Promise.reject(new Error('File not allowed'))
updatedFiles[fileID] = newFile
this.setState({files: updatedFiles})
this.emit('core:file-added', newFile)
this.log(`Added file: ${fileName}, ${fileID}, mime type: ${fileType}`)
if (this.opts.autoProceed && !this.scheduledAutoProceed) {
this.scheduledAutoProceed = setTimeout(() => {
this.scheduledAutoProceed = null
this.upload().catch((err) => {
console.error(err.stack || err.message || err)
})
}, 4)
}
})
})
}
removeFile (fileID) {
const updatedFiles = Object.assign({}, this.getState().files)
const removedFile = updatedFiles[fileID]
delete updatedFiles[fileID]
this.setState({files: updatedFiles})
this.calculateTotalProgress()
this.emit('core:file-removed', fileID)
// Clean up object URLs.
if (removedFile.preview && Utils.isObjectURL(removedFile.preview)) {
URL.revokeObjectURL(removedFile.preview)
}
this.log(`Removed file: ${fileID}`)
}
/**
* Get a file object.
*
* @param {string} fileID The ID of the file object to return.
*/
getFile (fileID) {
return this.getState().files[fileID]
}
/**
* Generate a preview image for the given file, if possible.
*/
generatePreview (file) {
if (Utils.isPreviewSupported(file.type.specific) && !file.isRemote) {
Utils.createThumbnail(file, 200).then((thumbnail) => {
this.setPreviewURL(file.id, thumbnail)
}).catch((err) => {
console.warn(err.stack || err.message)
})
}
}
/**
* Set the preview URL for a file.
*/
setPreviewURL (fileID, preview) {
const { files } = this.state
this.setState({
files: Object.assign({}, files, {
[fileID]: Object.assign({}, files[fileID], {
preview: preview
})
})
})
}
pauseResume (fileID) {
const updatedFiles = Object.assign({}, this.getState().files)
if (updatedFiles[fileID].uploadComplete) return
const wasPaused = updatedFiles[fileID].isPaused || false
const isPaused = !wasPaused
const updatedFile = Object.assign({}, updatedFiles[fileID], {
isPaused: isPaused
})
updatedFiles[fileID] = updatedFile
this.setState({files: updatedFiles})
this.emit('core:upload-pause', fileID, isPaused)
return isPaused
}
pauseAll () {
const updatedFiles = Object.assign({}, this.getState().files)
const inProgressUpdatedFiles = Object.keys(updatedFiles).filter((file) => {
return !updatedFiles[file].progress.uploadComplete &&
updatedFiles[file].progress.uploadStarted
})
inProgressUpdatedFiles.forEach((file) => {
const updatedFile = Object.assign({}, updatedFiles[file], {
isPaused: true
})
updatedFiles[file] = updatedFile
})
this.setState({files: updatedFiles})
this.emit('core:pause-all')
}
resumeAll () {
const updatedFiles = Object.assign({}, this.getState().files)
const inProgressUpdatedFiles = Object.keys(updatedFiles).filter((file) => {
return !updatedFiles[file].progress.uploadComplete &&
updatedFiles[file].progress.uploadStarted
})
inProgressUpdatedFiles.forEach((file) => {
const updatedFile = Object.assign({}, updatedFiles[file], {
isPaused: false,
error: null
})
updatedFiles[file] = updatedFile
})
this.setState({files: updatedFiles})
this.emit('core:resume-all')
}
retryAll () {
const updatedFiles = Object.assign({}, this.getState().files)
const filesToRetry = Object.keys(updatedFiles).filter(file => {
return updatedFiles[file].error
})
filesToRetry.forEach((file) => {
const updatedFile = Object.assign({}, updatedFiles[file], {
isPaused: false,
error: null
})
updatedFiles[file] = updatedFile
})
this.setState({
files: updatedFiles,
error: null
})
this.emit('core:retry-all', filesToRetry)
const uploadID = this.createUpload(filesToRetry)
return this.runUpload(uploadID)
}
retryUpload (fileID) {
const updatedFiles = Object.assign({}, this.state.files)
const updatedFile = Object.assign({}, updatedFiles[fileID],
{ error: null, isPaused: false }
)
updatedFiles[fileID] = updatedFile
this.setState({
files: updatedFiles
})
this.emit('core:upload-retry', fileID)
const uploadID = this.createUpload([ fileID ])
return this.runUpload(uploadID)
}
reset () {
this.cancelAll()
}
cancelAll () {
this.emit('core:cancel-all')
this.setState({ files: {}, totalProgress: 0 })
}
calculateProgress (data) {
const fileID = data.id
const updatedFiles = Object.assign({}, this.getState().files)
// skip progress event for a file that’s been removed
if (!updatedFiles[fileID]) {
this.log('Trying to set progress for a file that’s not with us anymore: ', fileID)
return
}
const updatedFile = Object.assign({}, updatedFiles[fileID],
Object.assign({}, {
progress: Object.assign({}, updatedFiles[fileID].progress, {
bytesUploaded: data.bytesUploaded,
bytesTotal: data.bytesTotal,
percentage: Math.floor((data.bytesUploaded / data.bytesTotal * 100).toFixed(2))
})
}
))
updatedFiles[data.id] = updatedFile
this.setState({
files: updatedFiles
})
this.calculateTotalProgress()
}
calculateTotalProgress () {
// calculate total progress, using the number of files currently uploading,
// multiplied by 100 and the summ of individual progress of each file
const files = Object.assign({}, this.getState().files)
const inProgress = Object.keys(files).filter((file) => {
return files[file].progress.uploadStarted
})
const progressMax = inProgress.length * 100
let progressAll = 0
inProgress.forEach((file) => {
progressAll = progressAll + files[file].progress.percentage
})
const totalProgress = progressMax === 0 ? 0 : Math.floor((progressAll * 100 / progressMax).toFixed(2))
this.setState({
totalProgress: totalProgress
})
}
/**
* Registers listeners for all global actions, like:
* `file-add`, `file-remove`, `upload-progress`, `reset`
*
*/
actions () {
// this.bus.on('*', (payload) => {
// console.log('emitted: ', this.event)
// console.log('with payload: ', payload)
// })
// stress-test re-rendering
// setInterval(() => {
// this.setState({bla: 'bla'})
// }, 20)
// this.on('core:state-update', (prevState, nextState, patch) => {
// if (this.withDevTools) {
// this.devTools.send('UPPY_STATE_UPDATE', nextState)
// }
// })
this.on('core:error', (error) => {
this.setState({ error })
})
this.on('core:upload-error', (fileID, error) => {
const updatedFiles = Object.assign({}, this.state.files)
const updatedFile = Object.assign({}, updatedFiles[fileID],
{ error: error }
)
updatedFiles[fileID] = updatedFile
this.setState({ files: updatedFiles, error: error })
const fileName = this.state.files[fileID].name
let message = `Failed to upload ${fileName}`
if (typeof error === 'object' && error.message) {
message = { message: message, details: error.message }
}
this.info(message, 'error', 5000)
})
this.on('core:upload', () => {
this.setState({ error: null })
})
this.on('core:file-add', (data) => {
this.addFile(data)
})
this.on('core:file-added', (file) => {
this.generatePreview(file)
})
this.on('core:file-remove', (fileID) => {
this.removeFile(fileID)
})
this.on('core:upload-started', (fileID, upload) => {
const updatedFiles = Object.assign({}, this.getState().files)
const updatedFile = Object.assign({}, updatedFiles[fileID],
Object.assign({}, {
progress: Object.assign({}, updatedFiles[fileID].progress, {
uploadStarted: Date.now(),
uploadComplete: false,
percentage: 0,
bytesUploaded: 0
})
}
))
updatedFiles[fileID] = updatedFile
this.setState({files: updatedFiles})
})
// upload progress events can occur frequently, especially when you have a good
// connection to the remote server. Therefore, we are throtteling them to
// prevent accessive function calls.
// see also: https://github.com/tus/tus-js-client/commit/9940f27b2361fd7e10ba58b09b60d82422183bbb
const throttledCalculateProgress = throttle(this.calculateProgress, 100, {leading: true, trailing: false})
this.on('core:upload-progress', (data) => {
throttledCalculateProgress(data)
})
this.on('core:upload-success', (fileID, uploadResp, uploadURL) => {
const updatedFiles = Object.assign({}, this.getState().files)
const updatedFile = Object.assign({}, updatedFiles[fileID], {
progress: Object.assign({}, updatedFiles[fileID].progress, {
uploadComplete: true,
percentage: 100
}),
uploadURL: uploadURL,
isPaused: false
})
updatedFiles[fileID] = updatedFile
this.setState({
files: updatedFiles
})
this.calculateTotalProgress()
})
this.on('core:update-meta', (data, fileID) => {
this.updateMeta(data, fileID)
})
this.on('core:preprocess-progress', (fileID, progress) => {
const files = Object.assign({}, this.getState().files)
files[fileID] = Object.assign({}, files[fileID], {
progress: Object.assign({}, files[fileID].progress, {
preprocess: progress
})
})
this.setState({ files: files })
})
this.on('core:preprocess-complete', (fileID) => {
const files = Object.assign({}, this.getState().files)
files[fileID] = Object.assign({}, files[fileID], {
progress: Object.assign({}, files[fileID].progress)
})
delete files[fileID].progress.preprocess
this.setState({ files: files })
})
this.on('core:postprocess-progress', (fileID, progress) => {
const files = Object.assign({}, this.getState().files)
files[fileID] = Object.assign({}, files[fileID], {
progress: Object.assign({}, files[fileID].progress, {
postprocess: progress
})
})
this.setState({ files: files })
})
this.on('core:postprocess-complete', (fileID) => {
const files = Object.assign({}, this.getState().files)
files[fileID] = Object.assign({}, files[fileID], {
progress: Object.assign({}, files[fileID].progress)
})
delete files[fileID].progress.postprocess
// TODO should we set some kind of `fullyComplete` property on the file object
// so it's easier to see that the file is upload…fully complete…rather than
// what we have to do now (`uploadComplete && !postprocess`)
this.setState({ files: files })
})
// show informer if offline
if (typeof window !== 'undefined') {
window.addEventListener('online', () => this.updateOnlineStatus())
window.addEventListener('offline', () => this.updateOnlineStatus())
setTimeout(() => this.updateOnlineStatus(), 3000)
}
}
updateOnlineStatus () {
const online =
typeof window.navigator.onLine !== 'undefined'
? window.navigator.onLine
: true
if (!online) {
this.emit('is-offline')
this.info('No internet connection', 'error', 0)
this.wasOffline = true
} else {
this.emit('is-online')
if (this.wasOffline) {
this.emit('back-online')
this.info('Connected!', 'success', 3000)
this.wasOffline = false
}
}
}
getID () {
return this.opts.id
}
/**
* Registers a plugin with Core
*
* @param {Class} Plugin object
* @param {Object} options object that will be passed to Plugin later
* @return {Object} self for chaining
*/
use (Plugin, opts) {
if (typeof Plugin !== 'function') {
let msg = `Expected a plugin class, but got ${Plugin === null ? 'null' : typeof Plugin}.` +
' Please verify that the plugin was imported and spelled correctly.'
throw new TypeError(msg)
}
// Instantiate
const plugin = new Plugin(this, opts)
const pluginId = plugin.id
this.plugins[plugin.type] = this.plugins[plugin.type] || []
if (!pluginId) {
throw new Error('Your plugin must have an id')
}
if (!plugin.type) {
throw new Error('Your plugin must have a type')
}
let existsPluginAlready = this.getPlugin(pluginId)
if (existsPluginAlready) {
let msg = `Already found a plugin named '${existsPluginAlready.id}'.
Tried to use: '${pluginId}'.
Uppy is currently limited to running one of every plugin.
Share your use case with us over at
https://github.com/transloadit/uppy/issues/
if you want us to reconsider.`
throw new Error(msg)
}
this.plugins[plugin.type].push(plugin)
plugin.install()
return this
}
/**
* Find one Plugin by name
*
* @param string name description
*/
getPlugin (name) {
let foundPlugin = false
this.iteratePlugins((plugin) => {
const pluginName = plugin.id
if (pluginName === name) {
foundPlugin = plugin
return false
}
})
return foundPlugin
}
/**
* Iterate through all `use`d plugins
*
* @param function method description
*/
iteratePlugins (method) {
Object.keys(this.plugins).forEach((pluginType) => {
this.plugins[pluginType].forEach(method)
})
}
/**
* Uninstall and remove a plugin.
*
* @param {Plugin} instance The plugin instance to remove.
*/
removePlugin (instance) {
const list = this.plugins[instance.type]
if (instance.uninstall) {
instance.uninstall()
}
const index = list.indexOf(instance)
if (index !== -1) {
list.splice(index, 1)
}
}
/**
* Uninstall all plugins and close down this Uppy instance.
*/
close () {
this.reset()
if (this.withDevTools) {
this.devToolsUnsubscribe()
}
this.iteratePlugins((plugin) => {
plugin.uninstall()
})
if (this.socket) {
this.socket.close()
}
}
/**
* Set info message in `state.info`, so that UI plugins like `Informer`
* can display the message
*
* @param {string} msg Message to be displayed by the informer
*/
info (message, type, duration) {
const isComplexMessage = typeof message === 'object'
this.setState({
info: {
isHidden: false,
type: type || 'info',
message: isComplexMessage ? message.message : message,
details: isComplexMessage ? message.details : null
}
})
this.emit('core:info-visible')
window.clearTimeout(this.infoTimeoutID)
if (duration === 0) {
this.infoTimeoutID = undefined
return
}
// hide the informer after `duration` milliseconds
this.infoTimeoutID = setTimeout(this.hideInfo, duration)
}
hideInfo () {
const newInfo = Object.assign({}, this.state.info, {
isHidden: true
})
this.setState({
info: newInfo
})
this.emit('core:info-hidden')
}
/**
* Logs stuff to console, only if `debug` is set to true. Silent in production.
*
* @param {String|Object} msg to log
* @param {String} type optional `error` or `warning`
*/
log (msg, type) {
if (!this.opts.debug) {
return
}
let message = `[Uppy] [${Utils.getTimeStamp()}] ${msg}`
global.uppyLog = global.uppyLog + '\n' + 'DEBUG LOG: ' + msg
if (type === 'error') {
console.error(message)
return
}
if (type === 'warning') {
console.warn(message)
return
}
if (msg === `${msg}`) {
console.log(message)
} else {
message = `[Uppy] [${Utils.getTimeStamp()}]`
console.log(message)
console.dir(msg)
}
}
initSocket (opts) {
if (!this.socket) {
this.socket = new UppySocket(opts)
}
return this.socket
}
/**
* Initializes actions, installs all plugins (by iterating on them and calling `install`), sets options
*
*/
run () {
this.log('Core is run, initializing actions...')
this.actions()
return this
}
/**
* Restore an upload by its ID.
*/
restore (uploadID) {
this.log(`Core: attempting to restore upload "${uploadID}"`)
if (!this.state.currentUploads[uploadID]) {
this.removeUpload(uploadID)
return Promise.reject(new Error('Nonexistent upload'))
}
return this.runUpload(uploadID)
}
/**
* Create an upload for a bunch of files.
*
* @param {Array<string>} fileIDs File IDs to include in this upload.
* @return {string} ID of this upload.
*/
createUpload (fileIDs) {
const uploadID = cuid()
this.emit('core:upload', {
id: uploadID,
fileIDs: fileIDs
})
this.setState({
currentUploads: Object.assign({}, this.state.currentUploads, {
[uploadID]: {
fileIDs: fileIDs,
step: 0
}
})
})
return uploadID
}
/**
* Remove an upload, eg. if it has been canceled or completed.
*
* @param {string} uploadID The ID of the upload.
*/
removeUpload (uploadID) {
const currentUploads = Object.assign({}, this.state.currentUploads)
delete currentUploads[uploadID]
this.setState({
currentUploads: currentUploads
})
}
/**
* Run an upload. This picks up where it left off in case the upload is being restored.
*
* @private
*/
runUpload (uploadID) {
const uploadData = this.state.currentUploads[uploadID]
const fileIDs = uploadData.fileIDs
const restoreStep = uploadData.step
const steps = [
...this.preProcessors,
...this.uploaders,
...this.postProcessors
]
let lastStep = Promise.resolve()
steps.forEach((fn, step) => {
// Skip this step if we are restoring and have already completed this step before.
if (step < restoreStep) {
return
}
lastStep = lastStep.then(() => {
const currentUpload = Object.assign({}, this.state.currentUploads[uploadID], {
step: step
})
this.setState({
currentUploads: Object.assign({}, this.state.currentUploads, {
[uploadID]: currentUpload
})
})
// TODO give this the `currentUpload` object as its only parameter maybe?
// Otherwise when more metadata may be added to the upload this would keep getting more parameters
return fn(fileIDs, uploadID)
})
})
// Not returning the `catch`ed promise, because we still want to return a rejected
// promise from this method if the upload failed.
lastStep.catch((err) => {
this.emit('core:error', err)
this.removeUpload(uploadID)
})
return lastStep.then(() => {
this.emit('core:success', fileIDs)
this.removeUpload(uploadID)
})
}
/**
* Start an upload for all the files that are not currently being uploaded.
*
* @return {Promise}
*/
upload () {
if (!this.plugins.uploader) {
this.log('No uploader type plugins are used', 'warning')
}
const isMinNumberOfFilesReached = this.checkMinNumberOfFiles()
if (!isMinNumberOfFilesReached) {
return Promise.reject(new Error('Minimum number of files has not been reached'))
}
const beforeUpload = Promise.resolve()
.then(() => this.opts.onBeforeUpload(this.state.files))
return beforeUpload.catch((err) => {
const message = typeof err === 'object' ? err.message : err
this.info(message, 'error', 5000)
return Promise.reject(new Error(`onBeforeUpload: ${message}`))
}).then(() => {
const waitingFileIDs = []
Object.keys(this.state.files).forEach((fileID) => {
const file = this.getFile(fileID)
if (!file.progress.uploadStarted || file.isRemote) {
waitingFileIDs.push(file.id)
}
})
const uploadID = this.createUpload(waitingFileIDs)
return this.runUpload(uploadID)
})
}
}
module.exports = function (opts) {
return new Uppy(opts)
}
| 1 | 10,091 | how about a default parameter instead? | transloadit-uppy | js |
@@ -624,11 +624,11 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi
}
func (r *ReconcileClusterDeployment) reconcileInstallingClusterProvision(cd *hivev1.ClusterDeployment, releaseImage string, logger log.FieldLogger) (reconcile.Result, error) {
- // Return early and stop processing if Agent install strategy is in play. The controllers that
+ // Return early and stop processing if ClusterInstallRef is in play. The controllers that
// handle this portion of the API currently live in the assisted service repo, rather than hive.
// This will hopefully change in the future.
- if cd.Spec.Provisioning != nil && cd.Spec.Provisioning.InstallStrategy != nil && cd.Spec.Provisioning.InstallStrategy.Agent != nil {
- logger.Info("skipping processing of agent install strategy cluster")
+ if cd.Spec.ClusterInstallRef != nil {
+ logger.Info("ClusterInstallRef specified, skipping creation of ClusterProvision")
return reconcile.Result{}, nil
}
| 1 | package clusterdeployment
import (
"context"
"fmt"
"os"
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
routev1 "github.com/openshift/api/route/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/client-go/util/workqueue"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
librarygocontroller "github.com/openshift/library-go/pkg/controller"
apihelpers "github.com/openshift/hive/apis/helpers"
hivev1 "github.com/openshift/hive/apis/hive/v1"
hiveintv1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1"
"github.com/openshift/hive/pkg/constants"
hivemetrics "github.com/openshift/hive/pkg/controller/metrics"
controllerutils "github.com/openshift/hive/pkg/controller/utils"
"github.com/openshift/hive/pkg/imageset"
"github.com/openshift/hive/pkg/remoteclient"
k8slabels "github.com/openshift/hive/pkg/util/labels"
)
// controllerKind contains the schema.GroupVersionKind for this controller type.
var controllerKind = hivev1.SchemeGroupVersion.WithKind("ClusterDeployment")
const (
ControllerName = hivev1.ClusterDeploymentControllerName
defaultRequeueTime = 10 * time.Second
maxProvisions = 3
platformAuthFailureReason = "PlatformAuthError"
platformAuthSuccessReason = "PlatformAuthSuccess"
clusterImageSetNotFoundReason = "ClusterImageSetNotFound"
clusterImageSetFoundReason = "ClusterImageSetFound"
defaultDNSNotReadyTimeout = 10 * time.Minute
dnsNotReadyReason = "DNSNotReady"
dnsNotReadyTimedoutReason = "DNSNotReadyTimedOut"
dnsUnsupportedPlatformReason = "DNSUnsupportedPlatform"
dnsZoneResourceConflictReason = "DNSZoneResourceConflict"
dnsReadyReason = "DNSReady"
dnsReadyAnnotation = "hive.openshift.io/dnsready"
installAttemptsLimitReachedReason = "InstallAttemptsLimitReached"
installOnlyOnceSetReason = "InstallOnlyOnceSet"
provisionNotStoppedReason = "ProvisionNotStopped"
deleteAfterAnnotation = "hive.openshift.io/delete-after" // contains a duration after which the cluster should be cleaned up.
tryInstallOnceAnnotation = "hive.openshift.io/try-install-once"
regionUnknown = "unknown"
)
// Add creates a new ClusterDeployment controller and adds it to the manager with default RBAC.
func Add(mgr manager.Manager) error {
logger := log.WithField("controller", ControllerName)
concurrentReconciles, clientRateLimiter, queueRateLimiter, err := controllerutils.GetControllerConfig(mgr.GetClient(), ControllerName)
if err != nil {
logger.WithError(err).Error("could not get controller configurations")
return err
}
return AddToManager(mgr, NewReconciler(mgr, logger, clientRateLimiter), concurrentReconciles, queueRateLimiter)
}
// NewReconciler returns a new reconcile.Reconciler
func NewReconciler(mgr manager.Manager, logger log.FieldLogger, rateLimiter flowcontrol.RateLimiter) reconcile.Reconciler {
r := &ReconcileClusterDeployment{
Client: controllerutils.NewClientWithMetricsOrDie(mgr, ControllerName, &rateLimiter),
scheme: mgr.GetScheme(),
logger: logger,
expectations: controllerutils.NewExpectations(logger),
watchingClusterInstall: map[string]struct{}{},
validateCredentialsForClusterDeployment: controllerutils.ValidateCredentialsForClusterDeployment,
}
r.remoteClusterAPIClientBuilder = func(cd *hivev1.ClusterDeployment) remoteclient.Builder {
return remoteclient.NewBuilder(r.Client, cd, ControllerName)
}
protectedDeleteEnvVar := os.Getenv(constants.ProtectedDeleteEnvVar)
if protectedDelete, err := strconv.ParseBool(protectedDeleteEnvVar); protectedDelete && err == nil {
logger.Info("Protected Delete enabled")
r.protectedDelete = true
}
return r
}
// AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler
func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error {
cdReconciler, ok := r.(*ReconcileClusterDeployment)
if !ok {
return errors.New("reconciler supplied is not a ReconcileClusterDeployment")
}
c, err := controller.New("clusterdeployment-controller", mgr, controller.Options{
Reconciler: r,
MaxConcurrentReconciles: concurrentReconciles,
RateLimiter: rateLimiter,
})
if err != nil {
log.WithField("controller", ControllerName).WithError(err).Error("could not create controller")
return err
}
// Inject watcher to the clusterdeployment reconciler.
controllerutils.InjectWatcher(cdReconciler, c)
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &hivev1.ClusterDeployment{},
clusterInstallIndexFieldName, indexClusterInstall); err != nil {
log.WithField("controller", ControllerName).WithError(err).Error("Error indexing cluster deployment for cluster install")
return err
}
// Watch for changes to ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}}, &handler.EnqueueRequestForObject{})
if err != nil {
log.WithField("controller", ControllerName).WithError(err).Error("Error watching cluster deployment")
return err
}
// Watch for provisions
if err := cdReconciler.watchClusterProvisions(c); err != nil {
return err
}
// Watch for jobs created by a ClusterDeployment:
err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
log.WithField("controller", ControllerName).WithError(err).Error("Error watching cluster deployment job")
return err
}
// Watch for pods created by an install job
err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, handler.EnqueueRequestsFromMapFunc(selectorPodWatchHandler))
if err != nil {
log.WithField("controller", ControllerName).WithError(err).Error("Error watching cluster deployment pods")
return err
}
// Watch for deprovision requests created by a ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeprovision{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
log.WithField("controller", ControllerName).WithError(err).Error("Error watching deprovision request created by cluster deployment")
return err
}
// Watch for dnszones created by a ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.DNSZone{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
log.WithField("controller", ControllerName).WithError(err).Error("Error watching cluster deployment dnszones")
return err
}
// Watch for changes to ClusterSyncs
if err := c.Watch(
&source.Kind{Type: &hiveintv1alpha1.ClusterSync{}},
&handler.EnqueueRequestForOwner{OwnerType: &hivev1.ClusterDeployment{}},
); err != nil {
return errors.Wrap(err, "cannot start watch on ClusterSyncs")
}
return nil
}
var _ reconcile.Reconciler = &ReconcileClusterDeployment{}
// ReconcileClusterDeployment reconciles a ClusterDeployment object
type ReconcileClusterDeployment struct {
client.Client
scheme *runtime.Scheme
logger log.FieldLogger
// watcher allows the reconciler to add new watches
// at runtime.
watcher controllerutils.Watcher
// this is a set of cluster install contracts that are currently
// being watched. this allows the reconciler to only add Watch for
// these once.
watchingClusterInstall map[string]struct{}
// A TTLCache of clusterprovision creates each clusterdeployment expects to see
expectations controllerutils.ExpectationsInterface
// remoteClusterAPIClientBuilder is a function pointer to the function that gets a builder for building a client
// for the remote cluster's API server
remoteClusterAPIClientBuilder func(cd *hivev1.ClusterDeployment) remoteclient.Builder
// validateCredentialsForClusterDeployment is what this controller will call to validate
// that the platform creds are good (used for testing)
validateCredentialsForClusterDeployment func(client.Client, *hivev1.ClusterDeployment, log.FieldLogger) (bool, error)
protectedDelete bool
}
// Reconcile reads that state of the cluster for a ClusterDeployment object and makes changes based on the state read
// and what is in the ClusterDeployment.Spec
func (r *ReconcileClusterDeployment) Reconcile(ctx context.Context, request reconcile.Request) (result reconcile.Result, returnErr error) {
cdLog := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName)
cdLog.Info("reconciling cluster deployment")
recobsrv := hivemetrics.NewReconcileObserver(ControllerName, cdLog)
defer recobsrv.ObserveControllerReconcileTime()
// Fetch the ClusterDeployment instance
cd := &hivev1.ClusterDeployment{}
err := r.Get(context.TODO(), request.NamespacedName, cd)
if err != nil {
if apierrors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
cdLog.Info("cluster deployment Not Found")
r.expectations.DeleteExpectations(request.NamespacedName.String())
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
cdLog.WithError(err).Error("Error getting cluster deployment")
return reconcile.Result{}, err
}
// Ensure owner references are correctly set
err = controllerutils.ReconcileOwnerReferences(cd, generateOwnershipUniqueKeys(cd), r, r.scheme, r.logger)
if err != nil {
cdLog.WithError(err).Error("Error reconciling object ownership")
return reconcile.Result{}, err
}
return r.reconcile(request, cd, cdLog)
}
func (r *ReconcileClusterDeployment) SetWatcher(w controllerutils.Watcher) {
r.watcher = w
}
func generateOwnershipUniqueKeys(owner hivev1.MetaRuntimeObject) []*controllerutils.OwnershipUniqueKey {
return []*controllerutils.OwnershipUniqueKey{
{
TypeToList: &hivev1.ClusterProvisionList{},
LabelSelector: map[string]string{constants.ClusterDeploymentNameLabel: owner.GetName()},
Controlled: true,
},
{
TypeToList: &corev1.PersistentVolumeClaimList{},
LabelSelector: map[string]string{
constants.ClusterDeploymentNameLabel: owner.GetName(),
constants.PVCTypeLabel: constants.PVCTypeInstallLogs,
},
Controlled: true,
},
{
TypeToList: &batchv1.JobList{},
LabelSelector: map[string]string{
constants.ClusterDeploymentNameLabel: owner.GetName(),
constants.JobTypeLabel: constants.JobTypeImageSet,
},
Controlled: true,
},
{
TypeToList: &hivev1.ClusterDeprovisionList{},
LabelSelector: map[string]string{
constants.ClusterDeploymentNameLabel: owner.GetName(),
},
Controlled: true,
},
{
TypeToList: &hivev1.DNSZoneList{},
LabelSelector: map[string]string{
constants.ClusterDeploymentNameLabel: owner.GetName(),
constants.DNSZoneTypeLabel: constants.DNSZoneTypeChild,
},
Controlled: true,
},
{
TypeToList: &corev1.SecretList{},
LabelSelector: map[string]string{
constants.ClusterDeploymentNameLabel: owner.GetName(),
constants.SecretTypeLabel: constants.SecretTypeMergedPullSecret,
},
Controlled: true,
},
{
TypeToList: &corev1.SecretList{},
LabelSelector: map[string]string{
constants.ClusterDeploymentNameLabel: owner.GetName(),
constants.SecretTypeLabel: constants.SecretTypeKubeConfig,
},
Controlled: false,
},
{
TypeToList: &corev1.SecretList{},
LabelSelector: map[string]string{
constants.ClusterDeploymentNameLabel: owner.GetName(),
constants.SecretTypeLabel: constants.SecretTypeKubeAdminCreds,
},
Controlled: false,
},
}
}
func (r *ReconcileClusterDeployment) addAdditionalKubeconfigCAs(cd *hivev1.ClusterDeployment,
cdLog log.FieldLogger) error {
adminKubeconfigSecret := &corev1.Secret{}
if err := r.Get(context.Background(), types.NamespacedName{Namespace: cd.Namespace, Name: cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name}, adminKubeconfigSecret); err != nil {
cdLog.WithError(err).Error("failed to get admin kubeconfig secret")
return err
}
originalSecret := adminKubeconfigSecret.DeepCopy()
rawData, hasRawData := adminKubeconfigSecret.Data[constants.RawKubeconfigSecretKey]
if !hasRawData {
adminKubeconfigSecret.Data[constants.RawKubeconfigSecretKey] = adminKubeconfigSecret.Data[constants.KubeconfigSecretKey]
rawData = adminKubeconfigSecret.Data[constants.KubeconfigSecretKey]
}
var err error
adminKubeconfigSecret.Data[constants.KubeconfigSecretKey], err = controllerutils.AddAdditionalKubeconfigCAs(rawData)
if err != nil {
cdLog.WithError(err).Errorf("error adding additional CAs to admin kubeconfig")
return err
}
if reflect.DeepEqual(originalSecret.Data, adminKubeconfigSecret.Data) {
cdLog.Debug("secret data has not changed, no need to update")
return nil
}
cdLog.Info("admin kubeconfig has been modified, updating")
err = r.Update(context.TODO(), adminKubeconfigSecret)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error updating admin kubeconfig secret")
return err
}
return nil
}
func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (result reconcile.Result, returnErr error) {
// Set platform label on the ClusterDeployment
if platform := getClusterPlatform(cd); cd.Labels[hivev1.HiveClusterPlatformLabel] != platform {
if cd.Labels == nil {
cd.Labels = make(map[string]string)
}
if cd.Labels[hivev1.HiveClusterPlatformLabel] != "" {
cdLog.Warnf("changing the value of %s from %s to %s", hivev1.HiveClusterPlatformLabel,
cd.Labels[hivev1.HiveClusterPlatformLabel], platform)
}
cd.Labels[hivev1.HiveClusterPlatformLabel] = platform
err := r.Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to set cluster platform label")
}
return reconcile.Result{}, err
}
// Set region label on the ClusterDeployment
if region := getClusterRegion(cd); cd.Spec.Platform.BareMetal == nil && cd.Spec.Platform.AgentBareMetal == nil &&
cd.Labels[hivev1.HiveClusterRegionLabel] != region {
if cd.Labels == nil {
cd.Labels = make(map[string]string)
}
if cd.Labels[hivev1.HiveClusterRegionLabel] != "" {
cdLog.Warnf("changing the value of %s from %s to %s", hivev1.HiveClusterRegionLabel,
cd.Labels[hivev1.HiveClusterRegionLabel], region)
}
cd.Labels[hivev1.HiveClusterRegionLabel] = region
err := r.Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to set cluster region label")
}
return reconcile.Result{}, err
}
if cd.DeletionTimestamp != nil {
if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
// Make sure we have no deprovision underway metric even though this was probably cleared when we
// removed the finalizer.
clearDeprovisionUnderwaySecondsMetric(cd, cdLog)
return reconcile.Result{}, nil
}
// Deprovision still underway, report metric for this cluster.
hivemetrics.MetricClusterDeploymentDeprovisioningUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(
time.Since(cd.DeletionTimestamp.Time).Seconds())
return r.syncDeletedClusterDeployment(cd, cdLog)
}
// Check for the delete-after annotation, and if the cluster has expired, delete it
deleteAfter, ok := cd.Annotations[deleteAfterAnnotation]
if ok {
cdLog.Debugf("found delete after annotation: %s", deleteAfter)
dur, err := time.ParseDuration(deleteAfter)
if err != nil {
cdLog.WithError(err).WithField("deleteAfter", deleteAfter).Infof("error parsing %s as a duration", deleteAfterAnnotation)
return reconcile.Result{}, fmt.Errorf("error parsing %s as a duration: %v", deleteAfterAnnotation, err)
}
if !cd.CreationTimestamp.IsZero() {
expiry := cd.CreationTimestamp.Add(dur)
cdLog.Debugf("cluster expires at: %s", expiry)
if time.Now().After(expiry) {
cdLog.WithField("expiry", expiry).Info("cluster has expired, issuing delete")
err := r.Delete(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error deleting expired cluster")
}
return reconcile.Result{}, err
}
defer func() {
// We have an expiry time but we're not expired yet. Set requeueAfter to the expiry time
// so that we requeue cluster for deletion once reconcile has completed
result, returnErr = controllerutils.EnsureRequeueAtLeastWithin(
time.Until(cd.CreationTimestamp.Add(dur)),
result,
returnErr,
)
}()
}
}
if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
cdLog.Debugf("adding clusterdeployment finalizer")
if err := r.addClusterDeploymentFinalizer(cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding finalizer")
return reconcile.Result{}, err
}
metricClustersCreated.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
return reconcile.Result{}, nil
}
if cd.Spec.Installed {
// set installedTimestamp for adopted clusters
if cd.Status.InstalledTimestamp == nil {
cd.Status.InstalledTimestamp = &cd.ObjectMeta.CreationTimestamp
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not set cluster installed timestamp")
return reconcile.Result{Requeue: true}, nil
}
}
// update SyncSetFailedCondition status condition
cdLog.Debug("Check if any syncsets are failing")
if err := r.setSyncSetFailedCondition(cd, cdLog); err != nil {
cdLog.WithError(err).Error("Error updating SyncSetFailedCondition status condition")
return reconcile.Result{}, err
}
switch {
case cd.Spec.Provisioning != nil:
if r, err := r.reconcileInstalledClusterProvision(cd, cdLog); err != nil {
return r, err
}
}
if cd.Spec.ClusterMetadata != nil &&
cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name != "" {
if err := r.addAdditionalKubeconfigCAs(cd, cdLog); err != nil {
return reconcile.Result{}, err
}
// Add cluster deployment as additional owner reference to admin secrets
if err := r.addOwnershipToSecret(cd, cdLog, cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name); err != nil {
return reconcile.Result{}, err
}
if cd.Spec.ClusterMetadata.AdminPasswordSecretRef.Name != "" {
if err := r.addOwnershipToSecret(cd, cdLog, cd.Spec.ClusterMetadata.AdminPasswordSecretRef.Name); err != nil {
return reconcile.Result{}, err
}
}
if cd.Status.WebConsoleURL == "" || cd.Status.APIURL == "" {
return r.setClusterStatusURLs(cd, cdLog)
}
}
return reconcile.Result{}, nil
}
// If the ClusterDeployment is being relocated to another Hive instance, stop any current provisioning and do not
// do any more reconciling.
switch _, relocateStatus, err := controllerutils.IsRelocating(cd); {
case err != nil:
return reconcile.Result{}, errors.Wrap(err, "could not determine relocate status")
case relocateStatus == hivev1.RelocateOutgoing:
result, err := r.stopProvisioning(cd, cdLog)
if result == nil {
result = &reconcile.Result{}
}
return *result, err
}
// Sanity check the platform/cloud credentials.
validCreds, err := r.validatePlatformCreds(cd, cdLog)
if err != nil {
cdLog.WithError(err).Error("unable to validate platform credentials")
return reconcile.Result{}, err
}
// Make sure the condition is set properly.
_, err = r.setAuthenticationFailure(cd, validCreds, cdLog)
if err != nil {
cdLog.WithError(err).Error("unable to update clusterdeployment")
return reconcile.Result{}, err
}
// If the platform credentials are no good, return error and go into backoff
authCondition := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.AuthenticationFailureClusterDeploymentCondition)
if authCondition != nil && authCondition.Status == corev1.ConditionTrue {
authError := errors.New(authCondition.Message)
cdLog.WithError(authError).Error("cannot proceed with provision while platform credentials authentication is failing.")
return reconcile.Result{}, authError
}
imageSet, err := r.getClusterImageSet(cd, cdLog)
if err != nil {
cdLog.WithError(err).Error("failed to get cluster image set for the clusterdeployment")
return reconcile.Result{}, err
}
releaseImage := r.getReleaseImage(cd, imageSet, cdLog)
cdLog.Debug("loading pull secrets")
pullSecret, err := r.mergePullSecrets(cd, cdLog)
if err != nil {
cdLog.WithError(err).Error("Error merging pull secrets")
return reconcile.Result{}, err
}
// Update the pull secret object if required
switch updated, err := r.updatePullSecretInfo(pullSecret, cd, cdLog); {
case err != nil:
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "Error updating the merged pull secret")
return reconcile.Result{}, err
case updated:
// The controller will not automatically requeue the cluster deployment
// since the controller is not watching for secrets. So, requeue manually.
return reconcile.Result{Requeue: true}, nil
}
switch result, err := r.resolveInstallerImage(cd, releaseImage, cdLog); {
case err != nil:
return reconcile.Result{}, err
case result != nil:
return *result, nil
}
if !r.expectations.SatisfiedExpectations(request.String()) {
cdLog.Debug("waiting for expectations to be satisfied")
return reconcile.Result{}, nil
}
if cd.Spec.ManageDNS {
dnsZone, err := r.ensureManagedDNSZone(cd, cdLog)
if err != nil {
return reconcile.Result{}, err
}
if dnsZone == nil {
// dnsNotReady condition was set.
if isSet, _ := isDNSNotReadyConditionSet(cd); isSet {
// dnsNotReadyReason is why the dnsNotReady condition was set, therefore requeue so that we check to see if it times out.
// add defaultRequeueTime to avoid the race condition where the controller is reconciled at the exact time of the timeout (unlikely, but possible).
return reconcile.Result{RequeueAfter: defaultDNSNotReadyTimeout + defaultRequeueTime}, nil
}
return reconcile.Result{}, nil
}
updated, err := r.setDNSDelayMetric(cd, dnsZone, cdLog)
if updated || err != nil {
return reconcile.Result{}, err
}
}
switch {
case cd.Spec.Provisioning != nil:
return r.reconcileInstallingClusterProvision(cd, releaseImage, cdLog)
case cd.Spec.ClusterInstallRef != nil:
return r.reconcileInstallingClusterInstall(cd, cdLog)
default:
return reconcile.Result{}, errors.New("invalid provisioning configuration")
}
}
func (r *ReconcileClusterDeployment) reconcileInstallingClusterProvision(cd *hivev1.ClusterDeployment, releaseImage string, logger log.FieldLogger) (reconcile.Result, error) {
// Return early and stop processing if Agent install strategy is in play. The controllers that
// handle this portion of the API currently live in the assisted service repo, rather than hive.
// This will hopefully change in the future.
if cd.Spec.Provisioning != nil && cd.Spec.Provisioning.InstallStrategy != nil && cd.Spec.Provisioning.InstallStrategy.Agent != nil {
logger.Info("skipping processing of agent install strategy cluster")
return reconcile.Result{}, nil
}
if cd.Status.ProvisionRef == nil {
return r.startNewProvision(cd, releaseImage, logger)
}
return r.reconcileExistingProvision(cd, logger)
}
func (r *ReconcileClusterDeployment) reconcileInstalledClusterProvision(cd *hivev1.ClusterDeployment, logger log.FieldLogger) (reconcile.Result, error) {
// delete failed provisions which are more than 7 days old
existingProvisions, err := r.existingProvisions(cd, logger)
if err != nil {
return reconcile.Result{}, err
}
r.deleteOldFailedProvisions(existingProvisions, logger)
logger.Debug("cluster is already installed, no processing of provision needed")
r.cleanupInstallLogPVC(cd, logger)
return reconcile.Result{}, nil
}
func (r *ReconcileClusterDeployment) reconcileInstallingClusterInstall(cd *hivev1.ClusterDeployment, logger log.FieldLogger) (reconcile.Result, error) {
ref := cd.Spec.ClusterInstallRef
gvk := schema.GroupVersionKind{
Group: ref.Group,
Version: ref.Version,
Kind: ref.Kind,
}
if err := r.watchClusterInstall(gvk, logger); err != nil {
logger.WithField("gvk", gvk.String()).WithError(err).Error("failed to watch for cluster install contract")
return reconcile.Result{}, err
}
return r.reconcileExistingInstallingClusterInstall(cd, logger)
}
func isDNSNotReadyConditionSet(cd *hivev1.ClusterDeployment) (bool, *hivev1.ClusterDeploymentCondition) {
dnsNotReadyCondition := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.DNSNotReadyCondition)
return dnsNotReadyCondition != nil &&
dnsNotReadyCondition.Status == corev1.ConditionTrue &&
(dnsNotReadyCondition.Reason == dnsNotReadyReason || dnsNotReadyCondition.Reason == dnsNotReadyTimedoutReason),
dnsNotReadyCondition
}
func addEnvVarIfFound(name string, envVars []corev1.EnvVar) []corev1.EnvVar {
value, found := os.LookupEnv(name)
if !found {
return envVars
}
tmp := corev1.EnvVar{
Name: name,
Value: value,
}
return append(envVars, tmp)
}
// getReleaseImage looks for a a release image in clusterdeployment or its corresponding imageset in the following order:
// 1 - specified in the cluster deployment spec.images.releaseImage
// 2 - referenced in the cluster deployment spec.imageSet
func (r *ReconcileClusterDeployment) getReleaseImage(cd *hivev1.ClusterDeployment, imageSet *hivev1.ClusterImageSet, cdLog log.FieldLogger) string {
if cd.Spec.Provisioning != nil && cd.Spec.Provisioning.ReleaseImage != "" {
return cd.Spec.Provisioning.ReleaseImage
}
if imageSet != nil {
return imageSet.Spec.ReleaseImage
}
return ""
}
func (r *ReconcileClusterDeployment) getClusterImageSet(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (*hivev1.ClusterImageSet, error) {
imageSetKey := types.NamespacedName{}
switch {
case cd.Spec.Provisioning != nil:
imageSetKey.Name = getClusterImageSetFromProvisioning(cd)
if imageSetKey.Name == "" {
return nil, nil
}
case cd.Spec.ClusterInstallRef != nil:
isName, err := getClusterImageSetFromClusterInstall(r.Client, cd)
if err != nil {
return nil, err
}
imageSetKey.Name = isName
default:
cdLog.Warning("clusterdeployment references no clusterimageset")
if err := r.setImageSetNotFoundCondition(cd, "unknown", true, cdLog); err != nil {
return nil, err
}
}
imageSet := &hivev1.ClusterImageSet{}
err := r.Get(context.TODO(), imageSetKey, imageSet)
if apierrors.IsNotFound(err) {
cdLog.WithField("clusterimageset", imageSetKey.Name).
Warning("clusterdeployment references non-existent clusterimageset")
if err := r.setImageSetNotFoundCondition(cd, imageSetKey.Name, true, cdLog); err != nil {
return nil, err
}
return nil, err
}
if err != nil {
cdLog.WithError(err).WithField("clusterimageset", imageSetKey.Name).
Error("unexpected error retrieving clusterimageset")
return nil, err
}
if err := r.setImageSetNotFoundCondition(cd, imageSetKey.Name, false, cdLog); err != nil {
return nil, err
}
return imageSet, nil
}
func (r *ReconcileClusterDeployment) statusUpdate(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
err := r.Status().Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "cannot update clusterdeployment status")
}
return err
}
const (
imagesResolvedReason = "ImagesResolved"
imagesResolvedMsg = "Images required for cluster deployment installations are resolved"
)
func (r *ReconcileClusterDeployment) resolveInstallerImage(cd *hivev1.ClusterDeployment, releaseImage string, cdLog log.FieldLogger) (*reconcile.Result, error) {
areImagesResolved := cd.Status.InstallerImage != nil && cd.Status.CLIImage != nil
jobKey := client.ObjectKey{Namespace: cd.Namespace, Name: imageset.GetImageSetJobName(cd.Name)}
jobLog := cdLog.WithField("job", jobKey.Name)
existingJob := &batchv1.Job{}
switch err := r.Get(context.Background(), jobKey, existingJob); {
// The job does not exist. If the images have been resolved, continue reconciling. Otherwise, create the job.
case apierrors.IsNotFound(err):
if areImagesResolved {
return nil, r.setInstallImagesNotResolvedCondition(cd, corev1.ConditionFalse, imagesResolvedReason, imagesResolvedMsg, cdLog)
}
job := imageset.GenerateImageSetJob(cd, releaseImage, controllerutils.InstallServiceAccountName,
os.Getenv("HTTP_PROXY"),
os.Getenv("HTTPS_PROXY"),
os.Getenv("NO_PROXY"))
cdLog.WithField("derivedObject", job.Name).Debug("Setting labels on derived object")
job.Labels = k8slabels.AddLabel(job.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
job.Labels = k8slabels.AddLabel(job.Labels, constants.JobTypeLabel, constants.JobTypeImageSet)
if err := controllerutil.SetControllerReference(cd, job, r.scheme); err != nil {
cdLog.WithError(err).Error("error setting controller reference on job")
return nil, err
}
jobLog.WithField("releaseImage", releaseImage).Info("creating imageset job")
err = controllerutils.SetupClusterInstallServiceAccount(r, cd.Namespace, cdLog)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error setting up service account and role")
return nil, err
}
if err := r.Create(context.TODO(), job); err != nil {
jobLog.WithError(err).Log(controllerutils.LogLevel(err), "error creating job")
return nil, err
}
// kickstartDuration calculates the delay between creation of cd and start of imageset job
kickstartDuration := time.Since(cd.CreationTimestamp.Time)
cdLog.WithField("elapsed", kickstartDuration.Seconds()).Info("calculated time to imageset job seconds")
metricImageSetDelaySeconds.Observe(float64(kickstartDuration.Seconds()))
return &reconcile.Result{}, nil
// There was an error getting the job. Return the error.
case err != nil:
jobLog.WithError(err).Error("cannot get job")
return nil, err
// The job exists and is in the process of getting deleted. If the images were resolved, then continue reconciling.
// If the images were not resolved, requeue and wait for the delete to complete.
case !existingJob.DeletionTimestamp.IsZero():
if areImagesResolved {
return nil, r.setInstallImagesNotResolvedCondition(cd, corev1.ConditionFalse, imagesResolvedReason, imagesResolvedMsg, cdLog)
}
jobLog.Debug("imageset job is being deleted. Will recreate once deleted")
return &reconcile.Result{RequeueAfter: defaultRequeueTime}, err
// If job exists and is finished, delete it. If the images were not resolved, then the job will be re-created.
case controllerutils.IsFinished(existingJob):
jobLog.WithField("successful", controllerutils.IsSuccessful(existingJob)).
Warning("Finished job found. Deleting.")
if err := r.Delete(
context.Background(),
existingJob,
client.PropagationPolicy(metav1.DeletePropagationForeground),
); err != nil {
jobLog.WithError(err).Log(controllerutils.LogLevel(err), "cannot delete imageset job")
return nil, err
}
if areImagesResolved {
return nil, r.setInstallImagesNotResolvedCondition(cd, corev1.ConditionFalse, imagesResolvedReason, imagesResolvedMsg, cdLog)
}
// the job has failed to update the images and therefore
// we need to update the InstallImagesResolvedCondition to reflect why it failed.
for _, jcond := range existingJob.Status.Conditions {
if jcond.Type != batchv1.JobFailed {
continue
}
msg := fmt.Sprintf("The job %s/%s to resolve the image failed because of (%s) %s",
existingJob.Namespace, existingJob.Name,
jcond.Reason, jcond.Message,
)
return &reconcile.Result{}, r.setInstallImagesNotResolvedCondition(cd, corev1.ConditionTrue, "JobToResolveImagesFailed", msg, cdLog)
}
return &reconcile.Result{}, nil
// The job exists and is in progress. Wait for the job to finish before doing any more reconciliation.
default:
jobLog.Debug("job exists and is in progress")
return &reconcile.Result{}, nil
}
}
func (r *ReconcileClusterDeployment) setInstallImagesNotResolvedCondition(cd *hivev1.ClusterDeployment, status corev1.ConditionStatus, reason string, message string, cdLog log.FieldLogger) error {
conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.InstallImagesNotResolvedCondition,
status,
reason,
message,
controllerutils.UpdateConditionIfReasonOrMessageChange)
if !changed {
return nil
}
cd.Status.Conditions = conditions
cdLog.Debugf("setting InstallImagesNotResolvedCondition to %v", status)
return r.Status().Update(context.TODO(), cd)
}
func (r *ReconcileClusterDeployment) setDNSNotReadyCondition(cd *hivev1.ClusterDeployment, status corev1.ConditionStatus, reason string, message string, cdLog log.FieldLogger) error {
conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.DNSNotReadyCondition,
status,
reason,
message,
controllerutils.UpdateConditionIfReasonOrMessageChange)
if !changed {
return nil
}
cd.Status.Conditions = conditions
cdLog.Debugf("setting DNSNotReadyCondition to %v", status)
return r.Status().Update(context.TODO(), cd)
}
func (r *ReconcileClusterDeployment) setAuthenticationFailure(cd *hivev1.ClusterDeployment, authSuccessful bool, cdLog log.FieldLogger) (bool, error) {
var status corev1.ConditionStatus
var reason, message string
if authSuccessful {
status = corev1.ConditionFalse
reason = platformAuthSuccessReason
message = "Platform credentials passed authentication check"
} else {
status = corev1.ConditionTrue
reason = platformAuthFailureReason
message = "Platform credentials failed authentication check"
}
conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.AuthenticationFailureClusterDeploymentCondition,
status,
reason,
message,
controllerutils.UpdateConditionIfReasonOrMessageChange)
if !changed {
return false, nil
}
cd.Status.Conditions = conditions
return changed, r.Status().Update(context.TODO(), cd)
}
func (r *ReconcileClusterDeployment) setInstallLaunchErrorCondition(cd *hivev1.ClusterDeployment, status corev1.ConditionStatus, reason string, message string, cdLog log.FieldLogger) error {
conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.InstallLaunchErrorCondition,
status,
reason,
message,
controllerutils.UpdateConditionIfReasonOrMessageChange)
if !changed {
return nil
}
cd.Status.Conditions = conditions
cdLog.WithField("status", status).Debug("setting InstallLaunchErrorCondition")
return r.Status().Update(context.TODO(), cd)
}
func (r *ReconcileClusterDeployment) setDeprovisionLaunchErrorCondition(cd *hivev1.ClusterDeployment, status corev1.ConditionStatus, reason string, message string, cdLog log.FieldLogger) error {
conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.DeprovisionLaunchErrorCondition,
status,
reason,
message,
controllerutils.UpdateConditionIfReasonOrMessageChange)
if !changed {
return nil
}
cd.Status.Conditions = conditions
cdLog.WithField("status", status).Debug("setting DeprovisionLaunchErrorCondition")
return r.Status().Update(context.TODO(), cd)
}
func (r *ReconcileClusterDeployment) setImageSetNotFoundCondition(cd *hivev1.ClusterDeployment, name string, isNotFound bool, cdLog log.FieldLogger) error {
status := corev1.ConditionFalse
reason := clusterImageSetFoundReason
message := fmt.Sprintf("ClusterImageSet %s is available", name)
if isNotFound {
status = corev1.ConditionTrue
reason = clusterImageSetNotFoundReason
message = fmt.Sprintf("ClusterImageSet %s is not available", name)
}
conds, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.ClusterImageSetNotFoundCondition,
status,
reason,
message,
controllerutils.UpdateConditionNever)
if !changed {
return nil
}
cdLog.Infof("setting ClusterImageSetNotFoundCondition to %v", status)
cd.Status.Conditions = conds
err := r.Status().Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "cannot update status conditions")
}
return err
}
// setClusterStatusURLs fetches the openshift console route from the remote cluster and uses it to determine
// the correct APIURL and WebConsoleURL, and then set them in the Status. Typically only called if these Status fields
// are unset.
func (r *ReconcileClusterDeployment) setClusterStatusURLs(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (reconcile.Result, error) {
server, err := remoteclient.InitialURL(r.Client, cd)
if err != nil {
cdLog.WithError(err).Error("could not get API URL from kubeconfig")
return reconcile.Result{}, err
}
cdLog.Debugf("found cluster API URL in kubeconfig: %s", server)
cd.Status.APIURL = server
remoteClient, unreachable, requeue := remoteclient.ConnectToRemoteCluster(
cd,
r.remoteClusterAPIClientBuilder(cd),
r.Client,
cdLog,
)
if unreachable {
return reconcile.Result{Requeue: requeue}, nil
}
routeObject := &routev1.Route{}
if err := remoteClient.Get(
context.Background(),
client.ObjectKey{Namespace: "openshift-console", Name: "console"},
routeObject,
); err != nil {
cdLog.WithError(err).Info("error fetching remote route object")
return reconcile.Result{Requeue: true}, nil
}
cdLog.Debugf("read remote route object: %s", routeObject)
cd.Status.WebConsoleURL = "https://" + routeObject.Spec.Host
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not set cluster status URLs")
return reconcile.Result{Requeue: true}, nil
}
return reconcile.Result{}, nil
}
// ensureManagedDNSZoneDeleted is a safety check to ensure that the child managed DNSZone
// linked to the parent cluster deployment gets a deletionTimestamp when the parent is deleted.
// Normally we expect Kube garbage collection to do this for us, but in rare cases we've seen it
// not working as intended.
func (r *ReconcileClusterDeployment) ensureManagedDNSZoneDeleted(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (gone bool, returnErr error) {
if !cd.Spec.ManageDNS {
return true, nil
}
dnsZone := &hivev1.DNSZone{}
dnsZoneNamespacedName := types.NamespacedName{Namespace: cd.Namespace, Name: controllerutils.DNSZoneName(cd.Name)}
switch err := r.Get(context.TODO(), dnsZoneNamespacedName, dnsZone); {
case apierrors.IsNotFound(err):
cdLog.Debug("dnszone has been removed from storage")
return true, nil
case err != nil:
cdLog.WithError(err).Error("error looking up managed dnszone")
return false, err
case !dnsZone.DeletionTimestamp.IsZero():
cdLog.Debug("dnszone has been deleted but is still in storage")
return false, nil
}
if err := r.Delete(context.TODO(), dnsZone); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error deleting managed dnszone")
return false, err
}
return false, nil
}
func (r *ReconcileClusterDeployment) ensureClusterDeprovisioned(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (deprovisioned bool, returnErr error) {
// Skips creation of deprovision request if PreserveOnDelete is true and cluster is installed
if cd.Spec.PreserveOnDelete {
if cd.Spec.Installed {
cdLog.Warn("skipping creation of deprovisioning request for installed cluster due to PreserveOnDelete=true")
return true, nil
}
// Overriding PreserveOnDelete because we might have deleted the cluster deployment before it finished
// installing, which can cause AWS resources to leak
cdLog.Info("PreserveOnDelete=true but creating deprovisioning request as cluster was never successfully provisioned")
}
if cd.Spec.ClusterMetadata == nil {
cdLog.Warn("skipping uninstall for cluster that never had clusterID set")
return true, nil
}
// We do not yet support deprovision for BareMetal, for now skip deprovision and remove finalizer.
if cd.Spec.Platform.BareMetal != nil {
cdLog.Info("skipping deprovision for BareMetal cluster, removing finalizer")
return true, nil
}
if cd.Spec.ClusterInstallRef != nil {
cdLog.Info("skipping deprovision as it should be done by deleting the obj in cluster install reference")
return true, nil
}
// Generate a deprovision request
request, err := generateDeprovision(cd)
if err != nil {
cdLog.WithError(err).Error("error generating deprovision request")
return false, err
}
cdLog.WithField("derivedObject", request.Name).Debug("Setting label on derived object")
request.Labels = k8slabels.AddLabel(request.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
err = controllerutil.SetControllerReference(cd, request, r.scheme)
if err != nil {
cdLog.Errorf("error setting controller reference on deprovision request: %v", err)
return false, err
}
// Check if deprovision request already exists:
existingRequest := &hivev1.ClusterDeprovision{}
switch err = r.Get(context.TODO(), types.NamespacedName{Name: cd.Name, Namespace: cd.Namespace}, existingRequest); {
case apierrors.IsNotFound(err):
cdLog.Info("creating deprovision request for cluster deployment")
switch err = r.Create(context.TODO(), request); {
case apierrors.IsAlreadyExists(err):
cdLog.Info("deprovision request already exists")
return false, nil
case err != nil:
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error creating deprovision request")
// Check if namespace is terminated, if so we can give up, remove the finalizer, and let
// the cluster go away.
ns := &corev1.Namespace{}
err = r.Get(context.TODO(), types.NamespacedName{Name: cd.Namespace}, ns)
if err != nil {
cdLog.WithError(err).Error("error checking for deletionTimestamp on namespace")
return false, err
}
if ns.DeletionTimestamp != nil {
cdLog.Warn("detected a namespace deleted before deprovision request could be created, giving up on deprovision and removing finalizer")
return true, err
}
return false, err
default:
return false, nil
}
case err != nil:
cdLog.WithError(err).Error("error getting deprovision request")
return false, err
}
authenticationFailureCondition := controllerutils.FindClusterDeprovisionCondition(existingRequest.Status.Conditions, hivev1.AuthenticationFailureClusterDeprovisionCondition)
if authenticationFailureCondition != nil {
err := r.setDeprovisionLaunchErrorCondition(cd,
authenticationFailureCondition.Status,
authenticationFailureCondition.Reason,
authenticationFailureCondition.Message,
cdLog)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update deprovisionLaunchErrorCondition")
return false, err
}
}
if !existingRequest.Status.Completed {
cdLog.Debug("deprovision request not yet completed")
return false, nil
}
return true, nil
}
func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (reconcile.Result, error) {
switch _, relocateStatus, err := controllerutils.IsRelocating(cd); {
case err != nil:
cdLog.WithError(err).Error("could not determine relocate status")
return reconcile.Result{}, errors.Wrap(err, "could not determine relocate status")
case relocateStatus == hivev1.RelocateComplete:
cdLog.Info("clusterdeployment relocated, removing finalizer")
err := r.removeClusterDeploymentFinalizer(cd, cdLog)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer")
}
return reconcile.Result{}, err
case relocateStatus != "":
cdLog.Debug("ClusterDeployment is in the middle of a relocate. Wait until relocate has been completed or aborted before doing finalization.")
return reconcile.Result{}, nil
}
if controllerutils.IsDeleteProtected(cd) {
cdLog.Error("deprovision blocked for ClusterDeployment with protected delete on")
return reconcile.Result{}, nil
}
dnsZoneGone, err := r.ensureManagedDNSZoneDeleted(cd, cdLog)
if err != nil {
return reconcile.Result{}, err
}
// Wait for outstanding provision to be removed before creating deprovision request
switch result, err := r.stopProvisioning(cd, cdLog); {
case result != nil:
return *result, err
case err != nil:
return reconcile.Result{}, err
}
deprovisioned, err := r.ensureClusterDeprovisioned(cd, cdLog)
if err != nil {
return reconcile.Result{}, err
}
switch {
case !deprovisioned:
return reconcile.Result{}, nil
case !dnsZoneGone:
return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil
default:
cdLog.Infof("DNSZone gone and deprovision request completed, removing finalizer")
if err := r.removeClusterDeploymentFinalizer(cd, cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer")
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
}
func (r *ReconcileClusterDeployment) addClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment) error {
cd = cd.DeepCopy()
controllerutils.AddFinalizer(cd, hivev1.FinalizerDeprovision)
return r.Update(context.TODO(), cd)
}
func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
cd = cd.DeepCopy()
controllerutils.DeleteFinalizer(cd, hivev1.FinalizerDeprovision)
if err := r.Update(context.TODO(), cd); err != nil {
return err
}
clearDeprovisionUnderwaySecondsMetric(cd, cdLog)
// Increment the clusters deleted counter:
metricClustersDeleted.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
return nil
}
// setDNSDelayMetric will calculate the amount of time elapsed from clusterdeployment creation
// to when the dnszone became ready, and set a metric to report the delay.
// Will return a bool indicating whether the clusterdeployment has been modified, and whether any error was encountered.
func (r *ReconcileClusterDeployment) setDNSDelayMetric(cd *hivev1.ClusterDeployment, dnsZone *hivev1.DNSZone, cdLog log.FieldLogger) (bool, error) {
modified := false
initializeAnnotations(cd)
if _, ok := cd.Annotations[dnsReadyAnnotation]; ok {
// already have recorded the dnsdelay metric
return modified, nil
}
readyTimestamp := dnsReadyTransitionTime(dnsZone)
if readyTimestamp == nil {
msg := "did not find timestamp for when dnszone became ready"
cdLog.WithField("dnszone", dnsZone.Name).Error(msg)
return modified, fmt.Errorf(msg)
}
dnsDelayDuration := readyTimestamp.Sub(cd.CreationTimestamp.Time)
cdLog.WithField("duration", dnsDelayDuration.Seconds()).Info("DNS ready")
cd.Annotations[dnsReadyAnnotation] = dnsDelayDuration.String()
if err := r.Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to save annotation marking DNS becoming ready")
return modified, err
}
modified = true
metricDNSDelaySeconds.Observe(float64(dnsDelayDuration.Seconds()))
return modified, nil
}
func (r *ReconcileClusterDeployment) ensureManagedDNSZone(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (*hivev1.DNSZone, error) {
switch p := cd.Spec.Platform; {
case p.AWS != nil:
case p.GCP != nil:
case p.Azure != nil:
default:
cdLog.Error("cluster deployment platform does not support managed DNS")
if err := r.setDNSNotReadyCondition(cd, corev1.ConditionTrue, dnsUnsupportedPlatformReason, "Managed DNS is not supported on specified platform", cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update DNSNotReadyCondition for DNSUnsupportedPlatform reason")
return nil, err
}
return nil, errors.New("managed DNS not supported on platform")
}
dnsZone := &hivev1.DNSZone{}
dnsZoneNamespacedName := types.NamespacedName{Namespace: cd.Namespace, Name: controllerutils.DNSZoneName(cd.Name)}
logger := cdLog.WithField("zone", dnsZoneNamespacedName.String())
switch err := r.Get(context.TODO(), dnsZoneNamespacedName, dnsZone); {
case apierrors.IsNotFound(err):
logger.Info("creating new DNSZone for cluster deployment")
return nil, r.createManagedDNSZone(cd, logger)
case err != nil:
logger.WithError(err).Error("failed to fetch DNS zone")
return nil, err
}
if !metav1.IsControlledBy(dnsZone, cd) {
cdLog.Error("DNS zone already exists but is not owned by cluster deployment")
if err := r.setDNSNotReadyCondition(cd, corev1.ConditionTrue, dnsZoneResourceConflictReason, "Existing DNS zone not owned by cluster deployment", cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update DNSNotReadyCondition")
return nil, err
}
return nil, errors.New("Existing unowned DNS zone")
}
availableCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.ZoneAvailableDNSZoneCondition)
insufficientCredentialsCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.InsufficientCredentialsCondition)
authenticationFailureCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.AuthenticationFailureCondition)
var (
status corev1.ConditionStatus
reason, message string
)
switch {
case availableCondition != nil && availableCondition.Status == corev1.ConditionTrue:
status = corev1.ConditionFalse
reason = dnsReadyReason
message = "DNS Zone available"
case insufficientCredentialsCondition != nil && insufficientCredentialsCondition.Status == corev1.ConditionTrue:
status = corev1.ConditionTrue
reason = "InsufficientCredentials"
message = insufficientCredentialsCondition.Message
case authenticationFailureCondition != nil && authenticationFailureCondition.Status == corev1.ConditionTrue:
status = corev1.ConditionTrue
reason = "AuthenticationFailure"
message = authenticationFailureCondition.Message
default:
status = corev1.ConditionTrue
reason = dnsNotReadyReason
message = "DNS Zone not yet available"
isDNSNotReadyConditionSet, dnsNotReadyCondition := isDNSNotReadyConditionSet(cd)
if isDNSNotReadyConditionSet {
// Timeout if it has been in this state for longer than allowed.
timeSinceLastTransition := time.Since(dnsNotReadyCondition.LastTransitionTime.Time)
if timeSinceLastTransition >= defaultDNSNotReadyTimeout {
// We've timed out, set the dnsNotReadyTimedoutReason for the DNSNotReady condition
cdLog.WithField("timeout", defaultDNSNotReadyTimeout).Warn("Timed out waiting on managed dns creation")
reason = dnsNotReadyTimedoutReason
message = "DNS Zone timed out in DNSNotReady state"
}
}
}
if err := r.setDNSNotReadyCondition(cd, status, reason, message, cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update DNSNotReadyCondition")
return nil, err
}
if reason != dnsReadyReason {
return nil, nil
}
return dnsZone, nil
}
func (r *ReconcileClusterDeployment) createManagedDNSZone(cd *hivev1.ClusterDeployment, logger log.FieldLogger) error {
dnsZone := &hivev1.DNSZone{
ObjectMeta: metav1.ObjectMeta{
Name: controllerutils.DNSZoneName(cd.Name),
Namespace: cd.Namespace,
},
Spec: hivev1.DNSZoneSpec{
Zone: cd.Spec.BaseDomain,
LinkToParentDomain: true,
},
}
switch {
case cd.Spec.Platform.AWS != nil:
additionalTags := make([]hivev1.AWSResourceTag, 0, len(cd.Spec.Platform.AWS.UserTags))
for k, v := range cd.Spec.Platform.AWS.UserTags {
additionalTags = append(additionalTags, hivev1.AWSResourceTag{Key: k, Value: v})
}
region := ""
if strings.HasPrefix(cd.Spec.Platform.AWS.Region, constants.AWSChinaRegionPrefix) {
region = constants.AWSChinaRoute53Region
}
dnsZone.Spec.AWS = &hivev1.AWSDNSZoneSpec{
CredentialsSecretRef: cd.Spec.Platform.AWS.CredentialsSecretRef,
CredentialsAssumeRole: cd.Spec.Platform.AWS.CredentialsAssumeRole,
AdditionalTags: additionalTags,
Region: region,
}
case cd.Spec.Platform.GCP != nil:
dnsZone.Spec.GCP = &hivev1.GCPDNSZoneSpec{
CredentialsSecretRef: cd.Spec.Platform.GCP.CredentialsSecretRef,
}
case cd.Spec.Platform.Azure != nil:
dnsZone.Spec.Azure = &hivev1.AzureDNSZoneSpec{
CredentialsSecretRef: cd.Spec.Platform.Azure.CredentialsSecretRef,
ResourceGroupName: cd.Spec.Platform.Azure.BaseDomainResourceGroupName,
}
}
logger.WithField("derivedObject", dnsZone.Name).Debug("Setting labels on derived object")
dnsZone.Labels = k8slabels.AddLabel(dnsZone.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
dnsZone.Labels = k8slabels.AddLabel(dnsZone.Labels, constants.DNSZoneTypeLabel, constants.DNSZoneTypeChild)
if err := controllerutil.SetControllerReference(cd, dnsZone, r.scheme); err != nil {
logger.WithError(err).Error("error setting controller reference on dnszone")
return err
}
err := r.Create(context.TODO(), dnsZone)
if err != nil {
logger.WithError(err).Log(controllerutils.LogLevel(err), "cannot create DNS zone")
return err
}
logger.Info("dns zone created")
return nil
}
func selectorPodWatchHandler(a client.Object) []reconcile.Request {
retval := []reconcile.Request{}
pod := a.(*corev1.Pod)
if pod == nil {
// Wasn't a Pod, bail out. This should not happen.
log.Errorf("Error converting MapObject.Object to Pod. Value: %+v", a)
return retval
}
if pod.Labels == nil {
return retval
}
cdName, ok := pod.Labels[constants.ClusterDeploymentNameLabel]
if !ok {
return retval
}
retval = append(retval, reconcile.Request{NamespacedName: types.NamespacedName{
Name: cdName,
Namespace: pod.Namespace,
}})
return retval
}
// GetInstallLogsPVCName returns the expected name of the persistent volume claim for cluster install failure logs.
// TODO: Remove this function and all calls to it. It's being left here for compatibility until the install log PVs are removed from all the installs.
func GetInstallLogsPVCName(cd *hivev1.ClusterDeployment) string {
return apihelpers.GetResourceName(cd.Name, "install-logs")
}
// cleanupInstallLogPVC will immediately delete the PVC (should it exist) if the cluster was installed successfully, without retries.
// If there were retries, it will delete the PVC if it has been more than 7 days since the job was completed.
// TODO: Remove this function and all calls to it. It's being left here for compatibility until the install log PVs are removed from all the installs.
func (r *ReconcileClusterDeployment) cleanupInstallLogPVC(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
if !cd.Spec.Installed {
return nil
}
pvc := &corev1.PersistentVolumeClaim{}
err := r.Get(context.TODO(), types.NamespacedName{Name: GetInstallLogsPVCName(cd), Namespace: cd.Namespace}, pvc)
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
cdLog.WithError(err).Error("error looking up install logs PVC")
return err
}
// Also check if we've already deleted it, pvc won't be deleted until the install pod is, and that is retained
// for one day.
if pvc.DeletionTimestamp != nil {
return nil
}
pvcLog := cdLog.WithField("pvc", pvc.Name)
switch {
case cd.Status.InstallRestarts == 0:
pvcLog.Info("deleting logs PersistentVolumeClaim for installed cluster with no restarts")
case cd.Status.InstalledTimestamp == nil:
pvcLog.Warn("deleting logs PersistentVolumeClaim for cluster with errors but no installed timestamp")
// Otherwise, delete if more than 7 days have passed.
case time.Since(cd.Status.InstalledTimestamp.Time) > (7 * 24 * time.Hour):
pvcLog.Info("deleting logs PersistentVolumeClaim for cluster that was installed after restarts more than 7 days ago")
default:
cdLog.WithField("pvc", pvc.Name).Debug("preserving logs PersistentVolumeClaim for cluster with install restarts for 7 days")
return nil
}
if err := r.Delete(context.TODO(), pvc); err != nil {
pvcLog.WithError(err).Log(controllerutils.LogLevel(err), "error deleting install logs PVC")
return err
}
return nil
}
func generateDeprovision(cd *hivev1.ClusterDeployment) (*hivev1.ClusterDeprovision, error) {
req := &hivev1.ClusterDeprovision{
ObjectMeta: metav1.ObjectMeta{
Name: cd.Name,
Namespace: cd.Namespace,
},
Spec: hivev1.ClusterDeprovisionSpec{
InfraID: cd.Spec.ClusterMetadata.InfraID,
ClusterID: cd.Spec.ClusterMetadata.ClusterID,
},
}
switch {
case cd.Spec.Platform.AWS != nil:
req.Spec.Platform.AWS = &hivev1.AWSClusterDeprovision{
Region: cd.Spec.Platform.AWS.Region,
CredentialsSecretRef: &cd.Spec.Platform.AWS.CredentialsSecretRef,
CredentialsAssumeRole: cd.Spec.Platform.AWS.CredentialsAssumeRole,
}
case cd.Spec.Platform.Azure != nil:
req.Spec.Platform.Azure = &hivev1.AzureClusterDeprovision{
CredentialsSecretRef: &cd.Spec.Platform.Azure.CredentialsSecretRef,
}
case cd.Spec.Platform.GCP != nil:
req.Spec.Platform.GCP = &hivev1.GCPClusterDeprovision{
Region: cd.Spec.Platform.GCP.Region,
CredentialsSecretRef: &cd.Spec.Platform.GCP.CredentialsSecretRef,
}
case cd.Spec.Platform.OpenStack != nil:
req.Spec.Platform.OpenStack = &hivev1.OpenStackClusterDeprovision{
Cloud: cd.Spec.Platform.OpenStack.Cloud,
CredentialsSecretRef: &cd.Spec.Platform.OpenStack.CredentialsSecretRef,
CertificatesSecretRef: cd.Spec.Platform.OpenStack.CertificatesSecretRef,
}
case cd.Spec.Platform.VSphere != nil:
req.Spec.Platform.VSphere = &hivev1.VSphereClusterDeprovision{
CredentialsSecretRef: cd.Spec.Platform.VSphere.CredentialsSecretRef,
CertificatesSecretRef: cd.Spec.Platform.VSphere.CertificatesSecretRef,
VCenter: cd.Spec.Platform.VSphere.VCenter,
}
case cd.Spec.Platform.Ovirt != nil:
req.Spec.Platform.Ovirt = &hivev1.OvirtClusterDeprovision{
CredentialsSecretRef: cd.Spec.Platform.Ovirt.CredentialsSecretRef,
CertificatesSecretRef: cd.Spec.Platform.Ovirt.CertificatesSecretRef,
ClusterID: cd.Spec.Platform.Ovirt.ClusterID,
}
default:
return nil, errors.New("unsupported cloud provider for deprovision")
}
return req, nil
}
func generatePullSecretObj(pullSecret string, pullSecretName string, cd *hivev1.ClusterDeployment) *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: pullSecretName,
Namespace: cd.Namespace,
},
Type: corev1.SecretTypeDockerConfigJson,
StringData: map[string]string{
corev1.DockerConfigJsonKey: pullSecret,
},
}
}
func dnsReadyTransitionTime(dnsZone *hivev1.DNSZone) *time.Time {
readyCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.ZoneAvailableDNSZoneCondition)
if readyCondition != nil && readyCondition.Status == corev1.ConditionTrue {
return &readyCondition.LastTransitionTime.Time
}
return nil
}
func clearDeprovisionUnderwaySecondsMetric(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) {
cleared := hivemetrics.MetricClusterDeploymentDeprovisioningUnderwaySeconds.Delete(map[string]string{
"cluster_deployment": cd.Name,
"namespace": cd.Namespace,
"cluster_type": hivemetrics.GetClusterDeploymentType(cd),
})
if cleared {
cdLog.Debug("cleared metric: %v", hivemetrics.MetricClusterDeploymentDeprovisioningUnderwaySeconds)
}
}
// initializeAnnotations() initializes the annotations if it is not already
func initializeAnnotations(cd *hivev1.ClusterDeployment) {
if cd.Annotations == nil {
cd.Annotations = map[string]string{}
}
}
// mergePullSecrets merges the global pull secret JSON (if defined) with the cluster's pull secret JSON (if defined)
// An error will be returned if neither is defined
func (r *ReconcileClusterDeployment) mergePullSecrets(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (string, error) {
var localPullSecret string
var err error
// For code readability let's call the pull secret in cluster deployment config as local pull secret
if cd.Spec.PullSecretRef != nil {
localPullSecret, err = controllerutils.LoadSecretData(r.Client, cd.Spec.PullSecretRef.Name, cd.Namespace, corev1.DockerConfigJsonKey)
if err != nil {
return "", errors.Wrap(err, "local pull secret could not be retrieved")
}
}
// Check if global pull secret from env as it comes from hive config
globalPullSecretName := os.Getenv(constants.GlobalPullSecret)
var globalPullSecret string
if len(globalPullSecretName) != 0 {
globalPullSecret, err = controllerutils.LoadSecretData(r.Client, globalPullSecretName, controllerutils.GetHiveNamespace(), corev1.DockerConfigJsonKey)
if err != nil {
return "", errors.Wrap(err, "global pull secret could not be retrieved")
}
}
switch {
case globalPullSecret != "" && localPullSecret != "":
// Merge local pullSecret and globalPullSecret. If both pull secrets have same registry name
// then the merged pull secret will have registry secret from local pull secret
pullSecret, err := controllerutils.MergeJsons(globalPullSecret, localPullSecret, cdLog)
if err != nil {
errMsg := "unable to merge global pull secret with local pull secret"
cdLog.WithError(err).Error(errMsg)
return "", errors.Wrap(err, errMsg)
}
return pullSecret, nil
case globalPullSecret != "":
return globalPullSecret, nil
case localPullSecret != "":
return localPullSecret, nil
default:
errMsg := "clusterdeployment must specify pull secret since hiveconfig does not specify a global pull secret"
cdLog.Error(errMsg)
return "", errors.New(errMsg)
}
}
// updatePullSecretInfo creates or updates the merged pull secret for the clusterdeployment.
// It returns true when the merged pull secret has been created or updated.
func (r *ReconcileClusterDeployment) updatePullSecretInfo(pullSecret string, cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (bool, error) {
var err error
pullSecretObjExists := true
existingPullSecretObj := &corev1.Secret{}
mergedSecretName := constants.GetMergedPullSecretName(cd)
err = r.Get(context.TODO(), types.NamespacedName{Name: mergedSecretName, Namespace: cd.Namespace}, existingPullSecretObj)
if err != nil {
if apierrors.IsNotFound(err) {
cdLog.Info("Existing pull secret object not found")
pullSecretObjExists = false
} else {
return false, errors.Wrap(err, "Error getting pull secret from cluster deployment")
}
}
if pullSecretObjExists {
existingPullSecret, ok := existingPullSecretObj.Data[corev1.DockerConfigJsonKey]
if !ok {
return false, fmt.Errorf("Pull secret %s did not contain key %s", mergedSecretName, corev1.DockerConfigJsonKey)
}
if string(existingPullSecret) == pullSecret {
cdLog.Debug("Existing and the new merged pull secret are same")
return false, nil
}
cdLog.Info("Existing merged pull secret hash did not match with latest merged pull secret")
existingPullSecretObj.Data[corev1.DockerConfigJsonKey] = []byte(pullSecret)
err = r.Update(context.TODO(), existingPullSecretObj)
if err != nil {
return false, errors.Wrap(err, "error updating merged pull secret object")
}
cdLog.WithField("secretName", mergedSecretName).Info("Updated the merged pull secret object successfully")
} else {
// create a new pull secret object
newPullSecretObj := generatePullSecretObj(
pullSecret,
mergedSecretName,
cd,
)
cdLog.WithField("derivedObject", newPullSecretObj.Name).Debug("Setting labels on derived object")
newPullSecretObj.Labels = k8slabels.AddLabel(newPullSecretObj.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
newPullSecretObj.Labels = k8slabels.AddLabel(newPullSecretObj.Labels, constants.SecretTypeLabel, constants.SecretTypeMergedPullSecret)
err = controllerutil.SetControllerReference(cd, newPullSecretObj, r.scheme)
if err != nil {
cdLog.Errorf("error setting controller reference on new merged pull secret: %v", err)
return false, err
}
err = r.Create(context.TODO(), newPullSecretObj)
if err != nil {
return false, errors.Wrap(err, "error creating new pull secret object")
}
cdLog.WithField("secretName", mergedSecretName).Info("Created the merged pull secret object successfully")
}
return true, nil
}
func calculateNextProvisionTime(failureTime time.Time, retries int, cdLog log.FieldLogger) time.Time {
// (2^currentRetries) * 60 seconds up to a max of 24 hours.
const sleepCap = 24 * time.Hour
const retryCap = 11 // log_2_(24*60)
if retries >= retryCap {
return failureTime.Add(sleepCap)
}
return failureTime.Add((1 << uint(retries)) * time.Minute)
}
func (r *ReconcileClusterDeployment) existingProvisions(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) ([]*hivev1.ClusterProvision, error) {
provisionList := &hivev1.ClusterProvisionList{}
if err := r.List(
context.TODO(),
provisionList,
client.InNamespace(cd.Namespace),
client.MatchingLabels(map[string]string{constants.ClusterDeploymentNameLabel: cd.Name}),
); err != nil {
cdLog.WithError(err).Warn("could not list provisions for clusterdeployment")
return nil, err
}
provisions := make([]*hivev1.ClusterProvision, len(provisionList.Items))
for i := range provisionList.Items {
provisions[i] = &provisionList.Items[i]
}
return provisions, nil
}
func (r *ReconcileClusterDeployment) getFirstProvision(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) *hivev1.ClusterProvision {
provisions, err := r.existingProvisions(cd, cdLog)
if err != nil {
return nil
}
for _, provision := range provisions {
if provision.Spec.Attempt == 0 {
return provision
}
}
cdLog.Warn("could not find the first provision for clusterdeployment")
return nil
}
func (r *ReconcileClusterDeployment) adoptProvision(cd *hivev1.ClusterDeployment, provision *hivev1.ClusterProvision, cdLog log.FieldLogger) error {
pLog := cdLog.WithField("provision", provision.Name)
cd.Status.ProvisionRef = &corev1.LocalObjectReference{Name: provision.Name}
if cd.Status.InstallStartedTimestamp == nil {
n := metav1.Now()
cd.Status.InstallStartedTimestamp = &n
}
if err := r.Status().Update(context.TODO(), cd); err != nil {
pLog.WithError(err).Log(controllerutils.LogLevel(err), "could not adopt provision")
return err
}
pLog.Info("adopted provision")
return nil
}
func (r *ReconcileClusterDeployment) deleteStaleProvisions(provs []*hivev1.ClusterProvision, cdLog log.FieldLogger) {
// Cap the number of existing provisions. Always keep the earliest provision as
// it is used to determine the total time that it took to install. Take off
// one extra to make room for the new provision being started.
amountToDelete := len(provs) - maxProvisions
if amountToDelete <= 0 {
return
}
cdLog.Infof("Deleting %d old provisions", amountToDelete)
sort.Slice(provs, func(i, j int) bool { return provs[i].Spec.Attempt < provs[j].Spec.Attempt })
for _, provision := range provs[1 : amountToDelete+1] {
pLog := cdLog.WithField("provision", provision.Name)
pLog.Info("Deleting old provision")
if err := r.Delete(context.TODO(), provision); err != nil {
pLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to delete old provision")
}
}
}
// deleteOldFailedProvisions deletes the failed provisions which are more than 7 days old
func (r *ReconcileClusterDeployment) deleteOldFailedProvisions(provs []*hivev1.ClusterProvision, cdLog log.FieldLogger) {
cdLog.Debugf("Deleting failed provisions which are more than 7 days old")
for _, provision := range provs {
if provision.Spec.Stage == hivev1.ClusterProvisionStageFailed && time.Since(provision.CreationTimestamp.Time) > (7*24*time.Hour) {
pLog := cdLog.WithField("provision", provision.Name)
pLog.Info("Deleting failed provision")
if err := r.Delete(context.TODO(), provision); err != nil {
pLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to delete failed provision")
}
}
}
}
// validatePlatformCreds ensure the platform/cloud credentials are at least good enough to authenticate with
func (r *ReconcileClusterDeployment) validatePlatformCreds(cd *hivev1.ClusterDeployment, logger log.FieldLogger) (bool, error) {
return r.validateCredentialsForClusterDeployment(r.Client, cd, logger)
}
// checkForFailedSync returns true if it finds that the ClusterSync has the Failed condition set
func checkForFailedSync(clusterSync *hiveintv1alpha1.ClusterSync) bool {
for _, cond := range clusterSync.Status.Conditions {
if cond.Type == hiveintv1alpha1.ClusterSyncFailed {
return cond.Status == corev1.ConditionTrue
}
}
return false
}
// setSyncSetFailedCondition updates the hivev1.SyncSetFailedCondition
func (r *ReconcileClusterDeployment) setSyncSetFailedCondition(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
var (
status corev1.ConditionStatus
reason, message string
)
clusterSync := &hiveintv1alpha1.ClusterSync{}
switch err := r.Get(context.Background(), types.NamespacedName{Namespace: cd.Namespace, Name: cd.Name}, clusterSync); {
case apierrors.IsNotFound(err):
if paused, err := strconv.ParseBool(cd.Annotations[constants.SyncsetPauseAnnotation]); err == nil && paused {
cdLog.Info("SyncSet is paused. ClusterSync will not be created")
status = corev1.ConditionTrue
reason = "SyncSetPaused"
message = "SyncSet is paused. ClusterSync will not be created"
} else {
cdLog.Info("ClusterSync has not yet been created")
status = corev1.ConditionTrue
reason = "MissingClusterSync"
message = "ClusterSync has not yet been created"
}
case err != nil:
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not get ClusterSync")
return err
case checkForFailedSync(clusterSync):
status = corev1.ConditionTrue
reason = "SyncSetApplyFailure"
message = "One of the SyncSet applies has failed"
default:
status = corev1.ConditionFalse
reason = "SyncSetApplySuccess"
message = "SyncSet apply is successful"
}
conds, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.SyncSetFailedCondition,
status,
reason,
message,
controllerutils.UpdateConditionIfReasonOrMessageChange,
)
if !changed {
return nil
}
cd.Status.Conditions = conds
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error updating syncset failed condition")
return err
}
return nil
}
// addOwnershipToSecret adds cluster deployment as an additional non-controlling owner to secret
func (r *ReconcileClusterDeployment) addOwnershipToSecret(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger, name string) error {
cdLog = cdLog.WithField("secret", name)
secret := &corev1.Secret{}
if err := r.Get(context.Background(), types.NamespacedName{Namespace: cd.Namespace, Name: name}, secret); err != nil {
cdLog.WithError(err).Error("failed to get secret")
return err
}
labelAdded := false
// Add the label for cluster deployment for reconciling later, and add the owner reference
if secret.Labels[constants.ClusterDeploymentNameLabel] != cd.Name {
cdLog.Debug("Setting label on derived object")
secret.Labels = k8slabels.AddLabel(secret.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
labelAdded = true
}
cdRef := metav1.OwnerReference{
APIVersion: cd.APIVersion,
Kind: cd.Kind,
Name: cd.Name,
UID: cd.UID,
BlockOwnerDeletion: pointer.BoolPtr(true),
}
cdRefChanged := librarygocontroller.EnsureOwnerRef(secret, cdRef)
if cdRefChanged {
cdLog.Debug("ownership added for cluster deployment")
}
if cdRefChanged || labelAdded {
cdLog.Info("secret has been modified, updating")
if err := r.Update(context.TODO(), secret); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error updating secret")
return err
}
}
return nil
}
// getClusterPlatform returns the platform of a given ClusterDeployment
func getClusterPlatform(cd *hivev1.ClusterDeployment) string {
switch {
case cd.Spec.Platform.AWS != nil:
return constants.PlatformAWS
case cd.Spec.Platform.Azure != nil:
return constants.PlatformAzure
case cd.Spec.Platform.GCP != nil:
return constants.PlatformGCP
case cd.Spec.Platform.OpenStack != nil:
return constants.PlatformOpenStack
case cd.Spec.Platform.VSphere != nil:
return constants.PlatformVSphere
case cd.Spec.Platform.BareMetal != nil:
return constants.PlatformBaremetal
case cd.Spec.Platform.AgentBareMetal != nil:
return constants.PlatformAgentBaremetal
}
return constants.PlatformUnknown
}
// getClusterRegion returns the region of a given ClusterDeployment
func getClusterRegion(cd *hivev1.ClusterDeployment) string {
switch {
case cd.Spec.Platform.AWS != nil:
return cd.Spec.Platform.AWS.Region
case cd.Spec.Platform.Azure != nil:
return cd.Spec.Platform.Azure.Region
case cd.Spec.Platform.GCP != nil:
return cd.Spec.Platform.GCP.Region
}
return regionUnknown
}
| 1 | 17,646 | i don't think we need to add duplicate check for clusterinstallref here, the function already assumes that it was invoked for clusterprovision | openshift-hive | go |
@@ -159,7 +159,10 @@ class HybridTaskCascadeRoIHead(CascadeRoIHead):
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(
x[:len(bbox_roi_extractor.featmap_strides)], rois)
- if self.with_semantic and 'bbox' in self.semantic_fusion:
+
+ # bbox_feats.shape[0] > 0 is mean the number of proposal is not 0.
+ if self.with_semantic and 'bbox' in self.semantic_fusion and \
+ bbox_feats.shape[0] > 0:
bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: | 1 | import numpy as np
import torch
import torch.nn.functional as F
from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
merge_aug_masks, multiclass_nms)
from ..builder import HEADS, build_head, build_roi_extractor
from .cascade_roi_head import CascadeRoIHead
@HEADS.register_module()
class HybridTaskCascadeRoIHead(CascadeRoIHead):
"""Hybrid task cascade roi head including one bbox head and one mask head.
https://arxiv.org/abs/1901.07518
"""
def __init__(self,
num_stages,
stage_loss_weights,
semantic_roi_extractor=None,
semantic_head=None,
semantic_fusion=('bbox', 'mask'),
interleaved=True,
mask_info_flow=True,
**kwargs):
super(HybridTaskCascadeRoIHead,
self).__init__(num_stages, stage_loss_weights, **kwargs)
assert self.with_bbox
assert not self.with_shared_head # shared head is not supported
if semantic_head is not None:
self.semantic_roi_extractor = build_roi_extractor(
semantic_roi_extractor)
self.semantic_head = build_head(semantic_head)
self.semantic_fusion = semantic_fusion
self.interleaved = interleaved
self.mask_info_flow = mask_info_flow
@property
def with_semantic(self):
"""bool: whether the head has semantic head"""
if hasattr(self, 'semantic_head') and self.semantic_head is not None:
return True
else:
return False
def forward_dummy(self, x, proposals):
"""Dummy forward function."""
outs = ()
# semantic head
if self.with_semantic:
_, semantic_feat = self.semantic_head(x)
else:
semantic_feat = None
# bbox heads
rois = bbox2roi([proposals])
for i in range(self.num_stages):
bbox_results = self._bbox_forward(
i, x, rois, semantic_feat=semantic_feat)
outs = outs + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# mask heads
if self.with_mask:
mask_rois = rois[:100]
mask_roi_extractor = self.mask_roi_extractor[-1]
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor(
[semantic_feat], mask_rois)
mask_feats += mask_semantic_feat
last_feat = None
for i in range(self.num_stages):
mask_head = self.mask_head[i]
if self.mask_info_flow:
mask_pred, last_feat = mask_head(mask_feats, last_feat)
else:
mask_pred = mask_head(mask_feats)
outs = outs + (mask_pred, )
return outs
def _bbox_forward_train(self,
stage,
x,
sampling_results,
gt_bboxes,
gt_labels,
rcnn_train_cfg,
semantic_feat=None):
"""Run forward function and calculate loss for box head in training."""
bbox_head = self.bbox_head[stage]
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(
stage, x, rois, semantic_feat=semantic_feat)
bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, rcnn_train_cfg)
loss_bbox = bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(
loss_bbox=loss_bbox,
rois=rois,
bbox_targets=bbox_targets,
)
return bbox_results
def _mask_forward_train(self,
stage,
x,
sampling_results,
gt_masks,
rcnn_train_cfg,
semantic_feat=None):
"""Run forward function and calculate loss for mask head in
training."""
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
pos_rois)
# semantic feature fusion
# element-wise sum for original features and pooled semantic features
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
pos_rois)
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
mask_semantic_feat = F.adaptive_avg_pool2d(
mask_semantic_feat, mask_feats.shape[-2:])
mask_feats += mask_semantic_feat
# mask information flow
# forward all previous mask heads to obtain last_feat, and fuse it
# with the normal mask feature
if self.mask_info_flow:
last_feat = None
for i in range(stage):
last_feat = self.mask_head[i](
mask_feats, last_feat, return_logits=False)
mask_pred = mask_head(mask_feats, last_feat, return_feat=False)
else:
mask_pred = mask_head(mask_feats, return_feat=False)
mask_targets = mask_head.get_targets(sampling_results, gt_masks,
rcnn_train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels)
mask_results = dict(loss_mask=loss_mask)
return mask_results
def _bbox_forward(self, stage, x, rois, semantic_feat=None):
"""Box head forward function used in both training and testing."""
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(
x[:len(bbox_roi_extractor.featmap_strides)], rois)
if self.with_semantic and 'bbox' in self.semantic_fusion:
bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
bbox_semantic_feat = F.adaptive_avg_pool2d(
bbox_semantic_feat, bbox_feats.shape[-2:])
bbox_feats += bbox_semantic_feat
cls_score, bbox_pred = bbox_head(bbox_feats)
bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred)
return bbox_results
def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None):
"""Mask head forward function for testing."""
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
mask_rois = bbox2roi([bboxes])
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
mask_rois)
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
mask_semantic_feat = F.adaptive_avg_pool2d(
mask_semantic_feat, mask_feats.shape[-2:])
mask_feats += mask_semantic_feat
if self.mask_info_flow:
last_feat = None
last_pred = None
for i in range(stage):
mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat)
if last_pred is not None:
mask_pred = mask_pred + last_pred
last_pred = mask_pred
mask_pred = mask_head(mask_feats, last_feat, return_feat=False)
if last_pred is not None:
mask_pred = mask_pred + last_pred
else:
mask_pred = mask_head(mask_feats)
return mask_pred
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
gt_semantic_seg=None):
"""
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposal_list (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None, list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None, Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
gt_semantic_seg (None, list[Tensor]): semantic segmentation masks
used if the architecture supports semantic segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# semantic segmentation part
# 2 outputs: segmentation prediction and embedded features
losses = dict()
if self.with_semantic:
semantic_pred, semantic_feat = self.semantic_head(x)
loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg)
losses['loss_semantic_seg'] = loss_seg
else:
semantic_feat = None
for i in range(self.num_stages):
self.current_stage = i
rcnn_train_cfg = self.train_cfg[i]
lw = self.stage_loss_weights[i]
# assign gts and sample proposals
sampling_results = []
bbox_assigner = self.bbox_assigner[i]
bbox_sampler = self.bbox_sampler[i]
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
for j in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[j],
gt_bboxes[j],
gt_bboxes_ignore[j],
gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
bbox_results = \
self._bbox_forward_train(
i, x, sampling_results, gt_bboxes, gt_labels,
rcnn_train_cfg, semantic_feat)
roi_labels = bbox_results['bbox_targets'][0]
for name, value in bbox_results['loss_bbox'].items():
losses[f's{i}.{name}'] = (
value * lw if 'loss' in name else value)
# mask head forward and loss
if self.with_mask:
# interleaved execution: use regressed bboxes by the box branch
# to train the mask branch
if self.interleaved:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
with torch.no_grad():
proposal_list = self.bbox_head[i].refine_bboxes(
bbox_results['rois'], roi_labels,
bbox_results['bbox_pred'], pos_is_gts, img_metas)
# re-assign and sample 512 RoIs from 512 RoIs
sampling_results = []
for j in range(num_imgs):
assign_result = bbox_assigner.assign(
proposal_list[j], gt_bboxes[j],
gt_bboxes_ignore[j], gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
mask_results = self._mask_forward_train(
i, x, sampling_results, gt_masks, rcnn_train_cfg,
semantic_feat)
for name, value in mask_results['loss_mask'].items():
losses[f's{i}.{name}'] = (
value * lw if 'loss' in name else value)
# refine bboxes (same as Cascade R-CNN)
if i < self.num_stages - 1 and not self.interleaved:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
with torch.no_grad():
proposal_list = self.bbox_head[i].refine_bboxes(
bbox_results['rois'], roi_labels,
bbox_results['bbox_pred'], pos_is_gts, img_metas)
return losses
def simple_test(self, x, proposal_list, img_metas, rescale=False):
"""Test without augmentation.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (batch_size, c, h, w).
proposal_list (list(Tensor)): Proposals from rpn head.
Each has shape (num_proposals, 5), last dimension
5 represent (x1, y1, x2, y2, score).
img_metas (list[dict]): Meta information of images.
rescale (bool): Whether to rescale the results to
the original image. Default: True.
Returns:
list[list[np.ndarray]] or list[tuple]: When no mask branch,
it is bbox results of each image and classes with type
`list[list[np.ndarray]]`. The outer list
corresponds to each image. The inner list
corresponds to each class. When the model has mask branch,
it contains bbox results and mask results.
The outer list corresponds to each image, and first element
of tuple is bbox results, second element is mask results.
"""
if self.with_semantic:
_, semantic_feat = self.semantic_head(x)
else:
semantic_feat = None
num_imgs = len(proposal_list)
img_shapes = tuple(meta['img_shape'] for meta in img_metas)
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
# "ms" in variable names means multi-stage
ms_bbox_result = {}
ms_segm_result = {}
ms_scores = []
rcnn_test_cfg = self.test_cfg
rois = bbox2roi(proposal_list)
if rois.shape[0] == 0:
# There is no proposal in the whole batch
bbox_results = [[
np.zeros((0, 5), dtype=np.float32)
for _ in range(self.bbox_head[-1].num_classes)
]] * num_imgs
if self.with_mask:
mask_classes = self.mask_head[-1].num_classes
segm_results = [[[] for _ in range(mask_classes)]
for _ in range(num_imgs)]
results = list(zip(bbox_results, segm_results))
else:
results = bbox_results
return results
for i in range(self.num_stages):
bbox_head = self.bbox_head[i]
bbox_results = self._bbox_forward(
i, x, rois, semantic_feat=semantic_feat)
# split batch bbox prediction back to each image
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
num_proposals_per_img = tuple(len(p) for p in proposal_list)
rois = rois.split(num_proposals_per_img, 0)
cls_score = cls_score.split(num_proposals_per_img, 0)
bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
ms_scores.append(cls_score)
if i < self.num_stages - 1:
refine_rois_list = []
for j in range(num_imgs):
if rois[j].shape[0] > 0:
bbox_label = cls_score[j][:, :-1].argmax(dim=1)
refine_rois = bbox_head.regress_by_class(
rois[j], bbox_label, bbox_pred[j], img_metas[j])
refine_rois_list.append(refine_rois)
rois = torch.cat(refine_rois_list)
# average scores of each image by stages
cls_score = [
sum([score[i] for score in ms_scores]) / float(len(ms_scores))
for i in range(num_imgs)
]
# apply bbox post-processing to each image individually
det_bboxes = []
det_labels = []
for i in range(num_imgs):
det_bbox, det_label = self.bbox_head[-1].get_bboxes(
rois[i],
cls_score[i],
bbox_pred[i],
img_shapes[i],
scale_factors[i],
rescale=rescale,
cfg=rcnn_test_cfg)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
bbox_result = [
bbox2result(det_bboxes[i], det_labels[i],
self.bbox_head[-1].num_classes)
for i in range(num_imgs)
]
ms_bbox_result['ensemble'] = bbox_result
if self.with_mask:
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
mask_classes = self.mask_head[-1].num_classes
segm_results = [[[] for _ in range(mask_classes)]
for _ in range(num_imgs)]
else:
if rescale and not isinstance(scale_factors[0], float):
scale_factors = [
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
for scale_factor in scale_factors
]
_bboxes = [
det_bboxes[i][:, :4] *
scale_factors[i] if rescale else det_bboxes[i]
for i in range(num_imgs)
]
mask_rois = bbox2roi(_bboxes)
aug_masks = []
mask_roi_extractor = self.mask_roi_extractor[-1]
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor(
[semantic_feat], mask_rois)
mask_feats += mask_semantic_feat
last_feat = None
num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes)
for i in range(self.num_stages):
mask_head = self.mask_head[i]
if self.mask_info_flow:
mask_pred, last_feat = mask_head(mask_feats, last_feat)
else:
mask_pred = mask_head(mask_feats)
# split batch mask prediction back to each image
mask_pred = mask_pred.split(num_bbox_per_img, 0)
aug_masks.append(
[mask.sigmoid().cpu().numpy() for mask in mask_pred])
# apply mask post-processing to each image individually
segm_results = []
for i in range(num_imgs):
if det_bboxes[i].shape[0] == 0:
segm_results.append(
[[]
for _ in range(self.mask_head[-1].num_classes)])
else:
aug_mask = [mask[i] for mask in aug_masks]
merged_mask = merge_aug_masks(
aug_mask, [[img_metas[i]]] * self.num_stages,
rcnn_test_cfg)
segm_result = self.mask_head[-1].get_seg_masks(
merged_mask, _bboxes[i], det_labels[i],
rcnn_test_cfg, ori_shapes[i], scale_factors[i],
rescale)
segm_results.append(segm_result)
ms_segm_result['ensemble'] = segm_results
if self.with_mask:
results = list(
zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))
else:
results = ms_bbox_result['ensemble']
return results
def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
if self.with_semantic:
semantic_feats = [
self.semantic_head(feat)[1] for feat in img_feats
]
else:
semantic_feats = [None] * len(img_metas)
rcnn_test_cfg = self.test_cfg
aug_bboxes = []
aug_scores = []
for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
# "ms" in variable names means multi-stage
ms_scores = []
rois = bbox2roi([proposals])
if rois.shape[0] == 0:
# There is no proposal in the single image
aug_bboxes.append(rois.new_zeros(0, 4))
aug_scores.append(rois.new_zeros(0, 1))
continue
for i in range(self.num_stages):
bbox_head = self.bbox_head[i]
bbox_results = self._bbox_forward(
i, x, rois, semantic_feat=semantic)
ms_scores.append(bbox_results['cls_score'])
if i < self.num_stages - 1:
bbox_label = bbox_results['cls_score'].argmax(dim=1)
rois = bbox_head.regress_by_class(
rois, bbox_label, bbox_results['bbox_pred'],
img_meta[0])
cls_score = sum(ms_scores) / float(len(ms_scores))
bboxes, scores = self.bbox_head[-1].get_bboxes(
rois,
cls_score,
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
bbox_result = bbox2result(det_bboxes, det_labels,
self.bbox_head[-1].num_classes)
if self.with_mask:
if det_bboxes.shape[0] == 0:
segm_result = [[[]
for _ in range(self.mask_head[-1].num_classes)]
]
else:
aug_masks = []
aug_img_metas = []
for x, img_meta, semantic in zip(img_feats, img_metas,
semantic_feats):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip, flip_direction)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor[-1](
x[:len(self.mask_roi_extractor[-1].featmap_strides)],
mask_rois)
if self.with_semantic:
semantic_feat = semantic
mask_semantic_feat = self.semantic_roi_extractor(
[semantic_feat], mask_rois)
if mask_semantic_feat.shape[-2:] != mask_feats.shape[
-2:]:
mask_semantic_feat = F.adaptive_avg_pool2d(
mask_semantic_feat, mask_feats.shape[-2:])
mask_feats += mask_semantic_feat
last_feat = None
for i in range(self.num_stages):
mask_head = self.mask_head[i]
if self.mask_info_flow:
mask_pred, last_feat = mask_head(
mask_feats, last_feat)
else:
mask_pred = mask_head(mask_feats)
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
aug_img_metas.append(img_meta)
merged_masks = merge_aug_masks(aug_masks, aug_img_metas,
self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
rcnn_test_cfg,
ori_shape,
scale_factor=1.0,
rescale=False)
return [(bbox_result, segm_result)]
else:
return [bbox_result]
| 1 | 25,548 | \`bbox_feats.shape[0] > 0\` requires the number of proposal is not 0. | open-mmlab-mmdetection | py |
@@ -202,9 +202,8 @@ public class RewriteManifestsAction
.createDataset(Lists.transform(manifests, ManifestFile::path), Encoders.STRING())
.toDF("manifest");
- String entriesMetadataTable = metadataTableName(MetadataTableType.ENTRIES);
- Dataset<Row> manifestEntryDF = spark.read().format("iceberg")
- .load(entriesMetadataTable)
+ Dataset<Row> manifestEntryDF = BaseSparkAction.loadMetadataTable(spark, table.name(), table().location(),
+ MetadataTableType.ENTRIES)
.filter("status < 2") // select only live entries
.selectExpr("input_file_name() as manifest", "snapshot_id", "sequence_number", "data_file");
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.actions;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.HasTableOperations;
import org.apache.iceberg.ManifestFile;
import org.apache.iceberg.ManifestFiles;
import org.apache.iceberg.ManifestWriter;
import org.apache.iceberg.MetadataTableType;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.RewriteManifests;
import org.apache.iceberg.Snapshot;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.exceptions.ValidationException;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.spark.SparkDataFile;
import org.apache.iceberg.spark.SparkUtil;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.PropertyUtil;
import org.apache.iceberg.util.Tasks;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.api.java.function.MapPartitionsFunction;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoder;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.internal.SQLConf;
import org.apache.spark.sql.types.StructType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An action that rewrites manifests in a distributed manner and co-locates metadata for partitions.
* <p>
* By default, this action rewrites all manifests for the current partition spec and writes the result
* to the metadata folder. The behavior can be modified by passing a custom predicate to {@link #rewriteIf(Predicate)}
* and a custom spec id to {@link #specId(int)}. In addition, there is a way to configure a custom location
* for new manifests via {@link #stagingLocation}.
*/
public class RewriteManifestsAction
extends BaseSnapshotUpdateAction<RewriteManifestsAction, RewriteManifestsActionResult> {
private static final Logger LOG = LoggerFactory.getLogger(RewriteManifestsAction.class);
private final SparkSession spark;
private final JavaSparkContext sparkContext;
private final Encoder<ManifestFile> manifestEncoder;
private final Table table;
private final int formatVersion;
private final FileIO fileIO;
private final long targetManifestSizeBytes;
private PartitionSpec spec = null;
private Predicate<ManifestFile> predicate = manifest -> true;
private String stagingLocation = null;
private boolean useCaching = true;
RewriteManifestsAction(SparkSession spark, Table table) {
this.spark = spark;
this.sparkContext = new JavaSparkContext(spark.sparkContext());
this.manifestEncoder = Encoders.javaSerialization(ManifestFile.class);
this.table = table;
this.spec = table.spec();
this.targetManifestSizeBytes = PropertyUtil.propertyAsLong(
table.properties(),
TableProperties.MANIFEST_TARGET_SIZE_BYTES,
TableProperties.MANIFEST_TARGET_SIZE_BYTES_DEFAULT);
this.fileIO = SparkUtil.serializableFileIO(table);
// default the staging location to the metadata location
TableOperations ops = ((HasTableOperations) table).operations();
Path metadataFilePath = new Path(ops.metadataFileLocation("file"));
this.stagingLocation = metadataFilePath.getParent().toString();
// use the current table format version for new manifests
this.formatVersion = ops.current().formatVersion();
}
@Override
protected RewriteManifestsAction self() {
return this;
}
@Override
protected Table table() {
return table;
}
public RewriteManifestsAction specId(int specId) {
Preconditions.checkArgument(table.specs().containsKey(specId), "Invalid spec id %d", specId);
this.spec = table.specs().get(specId);
return this;
}
/**
* Rewrites only manifests that match the given predicate.
*
* @param newPredicate a predicate
* @return this for method chaining
*/
public RewriteManifestsAction rewriteIf(Predicate<ManifestFile> newPredicate) {
this.predicate = newPredicate;
return this;
}
/**
* Passes a location where the manifests should be written.
*
* @param newStagingLocation a staging location
* @return this for method chaining
*/
public RewriteManifestsAction stagingLocation(String newStagingLocation) {
this.stagingLocation = newStagingLocation;
return this;
}
/**
* Configures whether the action should cache manifest entries used in multiple jobs.
*
* @param newUseCaching a flag whether to use caching
* @return this for method chaining
*/
public RewriteManifestsAction useCaching(boolean newUseCaching) {
this.useCaching = newUseCaching;
return this;
}
@Override
public RewriteManifestsActionResult execute() {
List<ManifestFile> matchingManifests = findMatchingManifests();
if (matchingManifests.isEmpty()) {
return RewriteManifestsActionResult.empty();
}
long totalSizeBytes = 0L;
int numEntries = 0;
for (ManifestFile manifest : matchingManifests) {
ValidationException.check(hasFileCounts(manifest), "No file counts in manifest: %s", manifest.path());
totalSizeBytes += manifest.length();
numEntries += manifest.addedFilesCount() + manifest.existingFilesCount() + manifest.deletedFilesCount();
}
int targetNumManifests = targetNumManifests(totalSizeBytes);
int targetNumManifestEntries = targetNumManifestEntries(numEntries, targetNumManifests);
Dataset<Row> manifestEntryDF = buildManifestEntryDF(matchingManifests);
List<ManifestFile> newManifests;
if (spec.fields().size() < 1) {
newManifests = writeManifestsForUnpartitionedTable(manifestEntryDF, targetNumManifests);
} else {
newManifests = writeManifestsForPartitionedTable(manifestEntryDF, targetNumManifests, targetNumManifestEntries);
}
replaceManifests(matchingManifests, newManifests);
return new RewriteManifestsActionResult(matchingManifests, newManifests);
}
private Dataset<Row> buildManifestEntryDF(List<ManifestFile> manifests) {
Dataset<Row> manifestDF = spark
.createDataset(Lists.transform(manifests, ManifestFile::path), Encoders.STRING())
.toDF("manifest");
String entriesMetadataTable = metadataTableName(MetadataTableType.ENTRIES);
Dataset<Row> manifestEntryDF = spark.read().format("iceberg")
.load(entriesMetadataTable)
.filter("status < 2") // select only live entries
.selectExpr("input_file_name() as manifest", "snapshot_id", "sequence_number", "data_file");
Column joinCond = manifestDF.col("manifest").equalTo(manifestEntryDF.col("manifest"));
return manifestEntryDF
.join(manifestDF, joinCond, "left_semi")
.select("snapshot_id", "sequence_number", "data_file");
}
private List<ManifestFile> writeManifestsForUnpartitionedTable(Dataset<Row> manifestEntryDF, int numManifests) {
Broadcast<FileIO> io = sparkContext.broadcast(fileIO);
StructType sparkType = (StructType) manifestEntryDF.schema().apply("data_file").dataType();
// we rely only on the target number of manifests for unpartitioned tables
// as we should not worry about having too much metadata per partition
long maxNumManifestEntries = Long.MAX_VALUE;
return manifestEntryDF
.repartition(numManifests)
.mapPartitions(
toManifests(io, maxNumManifestEntries, stagingLocation, formatVersion, spec, sparkType),
manifestEncoder
)
.collectAsList();
}
private List<ManifestFile> writeManifestsForPartitionedTable(
Dataset<Row> manifestEntryDF, int numManifests,
int targetNumManifestEntries) {
Broadcast<FileIO> io = sparkContext.broadcast(fileIO);
StructType sparkType = (StructType) manifestEntryDF.schema().apply("data_file").dataType();
// we allow the actual size of manifests to be 10% higher if the estimation is not precise enough
long maxNumManifestEntries = (long) (1.1 * targetNumManifestEntries);
return withReusableDS(manifestEntryDF, df -> {
Column partitionColumn = df.col("data_file.partition");
return df.repartitionByRange(numManifests, partitionColumn)
.sortWithinPartitions(partitionColumn)
.mapPartitions(
toManifests(io, maxNumManifestEntries, stagingLocation, formatVersion, spec, sparkType),
manifestEncoder
)
.collectAsList();
});
}
private <T, U> U withReusableDS(Dataset<T> ds, Function<Dataset<T>, U> func) {
Dataset<T> reusableDS;
if (useCaching) {
reusableDS = ds.cache();
} else {
int parallelism = SQLConf.get().numShufflePartitions();
reusableDS = ds.repartition(parallelism).map((MapFunction<T, T>) value -> value, ds.exprEnc());
}
try {
return func.apply(reusableDS);
} finally {
if (useCaching) {
reusableDS.unpersist(false);
}
}
}
private List<ManifestFile> findMatchingManifests() {
Snapshot currentSnapshot = table.currentSnapshot();
if (currentSnapshot == null) {
return ImmutableList.of();
}
return currentSnapshot.dataManifests().stream()
.filter(manifest -> manifest.partitionSpecId() == spec.specId() && predicate.test(manifest))
.collect(Collectors.toList());
}
private int targetNumManifests(long totalSizeBytes) {
return (int) ((totalSizeBytes + targetManifestSizeBytes - 1) / targetManifestSizeBytes);
}
private int targetNumManifestEntries(int numEntries, int numManifests) {
return (numEntries + numManifests - 1) / numManifests;
}
private boolean hasFileCounts(ManifestFile manifest) {
return manifest.addedFilesCount() != null &&
manifest.existingFilesCount() != null &&
manifest.deletedFilesCount() != null;
}
private void replaceManifests(Iterable<ManifestFile> deletedManifests, Iterable<ManifestFile> addedManifests) {
try {
boolean snapshotIdInheritanceEnabled = PropertyUtil.propertyAsBoolean(
table.properties(),
TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED,
TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT);
RewriteManifests rewriteManifests = table.rewriteManifests();
deletedManifests.forEach(rewriteManifests::deleteManifest);
addedManifests.forEach(rewriteManifests::addManifest);
commit(rewriteManifests);
if (!snapshotIdInheritanceEnabled) {
// delete new manifests as they were rewritten before the commit
deleteFiles(Iterables.transform(addedManifests, ManifestFile::path));
}
} catch (Exception e) {
// delete all new manifests because the rewrite failed
deleteFiles(Iterables.transform(addedManifests, ManifestFile::path));
throw e;
}
}
private void deleteFiles(Iterable<String> locations) {
Tasks.foreach(locations)
.noRetry()
.suppressFailureWhenFinished()
.onFailure((location, exc) -> LOG.warn("Failed to delete: {}", location, exc))
.run(fileIO::deleteFile);
}
private static ManifestFile writeManifest(
List<Row> rows, int startIndex, int endIndex, Broadcast<FileIO> io,
String location, int format, PartitionSpec spec, StructType sparkType) throws IOException {
String manifestName = "optimized-m-" + UUID.randomUUID();
Path manifestPath = new Path(location, manifestName);
OutputFile outputFile = io.value().newOutputFile(FileFormat.AVRO.addExtension(manifestPath.toString()));
Types.StructType dataFileType = DataFile.getType(spec.partitionType());
SparkDataFile wrapper = new SparkDataFile(dataFileType, sparkType);
ManifestWriter writer = ManifestFiles.write(format, spec, outputFile, null);
try {
for (int index = startIndex; index < endIndex; index++) {
Row row = rows.get(index);
long snapshotId = row.getLong(0);
long sequenceNumber = row.getLong(1);
Row file = row.getStruct(2);
writer.existing(wrapper.wrap(file), snapshotId, sequenceNumber);
}
} finally {
writer.close();
}
return writer.toManifestFile();
}
private static MapPartitionsFunction<Row, ManifestFile> toManifests(
Broadcast<FileIO> io, long maxNumManifestEntries, String location,
int format, PartitionSpec spec, StructType sparkType) {
return (MapPartitionsFunction<Row, ManifestFile>) rows -> {
List<Row> rowsAsList = Lists.newArrayList(rows);
if (rowsAsList.isEmpty()) {
return Collections.emptyIterator();
}
List<ManifestFile> manifests = Lists.newArrayList();
if (rowsAsList.size() <= maxNumManifestEntries) {
manifests.add(writeManifest(rowsAsList, 0, rowsAsList.size(), io, location, format, spec, sparkType));
} else {
int midIndex = rowsAsList.size() / 2;
manifests.add(writeManifest(rowsAsList, 0, midIndex, io, location, format, spec, sparkType));
manifests.add(writeManifest(rowsAsList, midIndex, rowsAsList.size(), io, location, format, spec, sparkType));
}
return manifests.iterator();
};
}
}
| 1 | 27,335 | Same here. Any way to fit on one line? | apache-iceberg | java |
@@ -170,6 +170,9 @@ bool TestShard::commitLogs(std::unique_ptr<LogIterator> iter) {
data_.emplace_back(currLogId_, log.toString());
VLOG(1) << idStr_ << "Write: " << log << ", LogId: " << currLogId_
<< " state machine log size: " << data_.size();
+ if (log.startsWith("set ")) {
+ singleRegister_ = std::stoi(log.subpiece(4).toString());
+ }
break;
}
} | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "kvstore/raftex/test/TestShard.h"
#include "kvstore/raftex/RaftexService.h"
#include "kvstore/wal/FileBasedWal.h"
#include "kvstore/raftex/Host.h"
namespace nebula {
namespace raftex {
namespace test {
std::string encodeLearner(const HostAddr& addr) {
std::string str;
CommandType type = CommandType::ADD_LEARNER;
str.append(reinterpret_cast<const char*>(&type), 1);
str.append(reinterpret_cast<const char*>(&addr), sizeof(HostAddr));
return str;
}
HostAddr decodeLearner(const folly::StringPiece& log) {
HostAddr learner;
memcpy(&learner.first, log.begin() + 1, sizeof(learner.first));
memcpy(&learner.second, log.begin() + 1 + sizeof(learner.first), sizeof(learner.second));
return learner;
}
std::string compareAndSet(const std::string& log) {
switch (log[0]) {
case 'T':
return log.substr(1);
default:
return std::string();
}
}
std::string encodeTransferLeader(const HostAddr& addr) {
std::string str;
CommandType type = CommandType::TRANSFER_LEADER;
str.append(reinterpret_cast<const char*>(&type), 1);
str.append(reinterpret_cast<const char*>(&addr), sizeof(HostAddr));
return str;
}
HostAddr decodeTransferLeader(const folly::StringPiece& log) {
HostAddr leader;
memcpy(&leader.first, log.begin() + 1, sizeof(leader.first));
memcpy(&leader.second, log.begin() + 1 + sizeof(leader.first), sizeof(leader.second));
return leader;
}
std::string encodeSnapshotRow(LogID logId, const std::string& row) {
std::string rawData;
rawData.reserve(sizeof(LogID) + row.size());
rawData.append(reinterpret_cast<const char*>(&logId), sizeof(logId));
rawData.append(row.data(), row.size());
return rawData;
}
std::pair<LogID, std::string> decodeSnapshotRow(const std::string& rawData) {
LogID id = *reinterpret_cast<const LogID*>(rawData.data());
auto str = rawData.substr(sizeof(LogID));
return std::make_pair(id, std::move(str));
}
std::string encodeAddPeer(const HostAddr& addr) {
std::string str;
CommandType type = CommandType::ADD_PEER;
str.append(reinterpret_cast<const char*>(&type), 1);
str.append(reinterpret_cast<const char*>(&addr), sizeof(HostAddr));
return str;
}
HostAddr decodeAddPeer(const folly::StringPiece& log) {
HostAddr addr;
memcpy(&addr.first, log.begin() + 1, sizeof(addr.first));
memcpy(&addr.second, log.begin() + 1 + sizeof(addr.first), sizeof(addr.second));
return addr;
}
std::string encodeRemovePeer(const HostAddr& addr) {
std::string str;
CommandType type = CommandType::REMOVE_PEER;
str.append(reinterpret_cast<const char*>(&type), 1);
str.append(reinterpret_cast<const char*>(&addr), sizeof(HostAddr));
return str;
}
HostAddr decodeRemovePeer(const folly::StringPiece& log) {
HostAddr addr;
memcpy(&addr.first, log.begin() + 1, sizeof(addr.first));
memcpy(&addr.second, log.begin() + 1 + sizeof(addr.first), sizeof(addr.second));
return addr;
}
TestShard::TestShard(size_t idx,
std::shared_ptr<RaftexService> svc,
PartitionID partId,
HostAddr addr,
const folly::StringPiece walRoot,
std::shared_ptr<folly::IOThreadPoolExecutor> ioPool,
std::shared_ptr<thread::GenericThreadPool> workers,
std::shared_ptr<folly::Executor> handlersPool,
std::shared_ptr<SnapshotManager> snapshotMan,
std::function<void(size_t idx, const char*, TermID)>
leadershipLostCB,
std::function<void(size_t idx, const char*, TermID)>
becomeLeaderCB)
: RaftPart(1, // clusterId
1, // spaceId
partId,
addr,
walRoot,
ioPool,
workers,
handlersPool,
snapshotMan)
, idx_(idx)
, service_(svc)
, leadershipLostCB_(leadershipLostCB)
, becomeLeaderCB_(becomeLeaderCB) {
}
void TestShard::onLostLeadership(TermID term) {
if (leadershipLostCB_) {
leadershipLostCB_(idx_, idStr(), term);
}
}
void TestShard::onElected(TermID term) {
if (becomeLeaderCB_) {
becomeLeaderCB_(idx_, idStr(), term);
}
}
bool TestShard::commitLogs(std::unique_ptr<LogIterator> iter) {
LogID firstId = -1;
LogID lastId = -1;
int32_t commitLogsNum = 0;
while (iter->valid()) {
if (firstId < 0) {
firstId = iter->logId();
}
lastId = iter->logId();
auto log = iter->logMsg();
if (!log.empty()) {
switch (static_cast<CommandType>(log[0])) {
case CommandType::TRANSFER_LEADER: {
auto nLeader = decodeTransferLeader(log);
commitTransLeader(nLeader);
break;
}
case CommandType::REMOVE_PEER: {
auto peer = decodeRemovePeer(log);
commitRemovePeer(peer);
break;
}
case CommandType::ADD_PEER:
case CommandType::ADD_LEARNER: {
break;
}
default: {
folly::RWSpinLock::WriteHolder wh(&lock_);
currLogId_ = iter->logId();
data_.emplace_back(currLogId_, log.toString());
VLOG(1) << idStr_ << "Write: " << log << ", LogId: " << currLogId_
<< " state machine log size: " << data_.size();
break;
}
}
commitLogsNum++;
}
++(*iter);
}
VLOG(2) << "TestShard: " << idStr_ << "Committed log " << firstId << " to " << lastId;
if (lastId > -1) {
lastCommittedLogId_ = lastId;
}
if (commitLogsNum > 0) {
commitTimes_++;
}
return true;
}
std::pair<int64_t, int64_t> TestShard::commitSnapshot(const std::vector<std::string>& data,
LogID committedLogId,
TermID committedLogTerm,
bool finished) {
folly::RWSpinLock::WriteHolder wh(&lock_);
int64_t count = 0;
int64_t size = 0;
for (auto& row : data) {
count++;
size += row.size();
auto idData = decodeSnapshotRow(row);
VLOG(1) << idStr_ << "Commit row logId " << idData.first << ", log " << idData.second;
data_.emplace_back(idData.first, std::move(idData.second));
}
if (finished) {
lastCommittedLogId_ = committedLogId;
LOG(INFO) << idStr_ << "Commit the snapshot committedLogId " << committedLogId
<< ", term " << committedLogTerm;
}
return std::make_pair(count, size);
}
void TestShard::cleanup() {
folly::RWSpinLock::WriteHolder wh(&lock_);
data_.clear();
lastCommittedLogId_ = 0;
}
size_t TestShard::getNumLogs() const {
return data_.size();
}
bool TestShard::getLogMsg(size_t index, folly::StringPiece& msg) {
folly::RWSpinLock::ReadHolder rh(&lock_);
if (index > data_.size()) {
return false;
}
msg = data_[index].second;
return true;
}
} // namespace test
} // namespace raftex
} // namespace nebula
| 1 | 28,849 | using folly::to is better ? | vesoft-inc-nebula | cpp |
@@ -376,7 +376,6 @@ def _init_profiles():
private_profile.setter = ProfileSetter( # type: ignore[attr-defined]
private_profile)
assert private_profile.isOffTheRecord()
- private_profile.setter.init_profile()
def _init_site_specific_quirks(): | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2020 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Bridge from QWebEngineSettings to our own settings.
Module attributes:
ATTRIBUTES: A mapping from internal setting names to QWebEngineSetting enum
constants.
"""
import os
import operator
import typing
from PyQt5.QtGui import QFont
from PyQt5.QtWebEngineWidgets import (QWebEngineSettings, QWebEngineProfile,
QWebEnginePage)
from qutebrowser.browser.webengine import spell, webenginequtescheme
from qutebrowser.config import config, websettings
from qutebrowser.config.websettings import AttributeInfo as Attr
from qutebrowser.utils import (utils, standarddir, qtutils, message, log,
urlmatch, usertypes)
# The default QWebEngineProfile
default_profile = typing.cast(QWebEngineProfile, None)
# The QWebEngineProfile used for private (off-the-record) windows
private_profile = None # type: typing.Optional[QWebEngineProfile]
# The global WebEngineSettings object
global_settings = typing.cast('WebEngineSettings', None)
parsed_user_agent = None
class _SettingsWrapper:
"""Expose a QWebEngineSettings interface which acts on all profiles.
For read operations, the default profile value is always used.
"""
def __init__(self):
self._settings = [default_profile.settings()]
if private_profile:
self._settings.append(private_profile.settings())
def setAttribute(self, attribute, on):
for settings in self._settings:
settings.setAttribute(attribute, on)
def setFontFamily(self, which, family):
for settings in self._settings:
settings.setFontFamily(which, family)
def setFontSize(self, fonttype, size):
for settings in self._settings:
settings.setFontSize(fonttype, size)
def setDefaultTextEncoding(self, encoding):
for settings in self._settings:
settings.setDefaultTextEncoding(encoding)
def setUnknownUrlSchemePolicy(self, policy):
for settings in self._settings:
settings.setUnknownUrlSchemePolicy(policy)
def testAttribute(self, attribute):
return self._settings[0].testAttribute(attribute)
def fontSize(self, fonttype):
return self._settings[0].fontSize(fonttype)
def fontFamily(self, which):
return self._settings[0].fontFamily(which)
def defaultTextEncoding(self):
return self._settings[0].defaultTextEncoding()
def unknownUrlSchemePolicy(self):
return self._settings[0].unknownUrlSchemePolicy()
class WebEngineSettings(websettings.AbstractSettings):
"""A wrapper for the config for QWebEngineSettings."""
_ATTRIBUTES = {
'content.xss_auditing':
Attr(QWebEngineSettings.XSSAuditingEnabled),
'content.images':
Attr(QWebEngineSettings.AutoLoadImages),
'content.javascript.enabled':
Attr(QWebEngineSettings.JavascriptEnabled),
'content.javascript.can_open_tabs_automatically':
Attr(QWebEngineSettings.JavascriptCanOpenWindows),
'content.javascript.can_access_clipboard':
Attr(QWebEngineSettings.JavascriptCanAccessClipboard),
'content.plugins':
Attr(QWebEngineSettings.PluginsEnabled),
'content.hyperlink_auditing':
Attr(QWebEngineSettings.HyperlinkAuditingEnabled),
'content.local_content_can_access_remote_urls':
Attr(QWebEngineSettings.LocalContentCanAccessRemoteUrls),
'content.local_content_can_access_file_urls':
Attr(QWebEngineSettings.LocalContentCanAccessFileUrls),
'content.webgl':
Attr(QWebEngineSettings.WebGLEnabled),
'content.local_storage':
Attr(QWebEngineSettings.LocalStorageEnabled),
'content.desktop_capture':
Attr(QWebEngineSettings.ScreenCaptureEnabled,
converter=lambda val: True if val == 'ask' else val),
# 'ask' is handled via the permission system,
# or a hardcoded dialog on Qt < 5.10
'input.spatial_navigation':
Attr(QWebEngineSettings.SpatialNavigationEnabled),
'input.links_included_in_focus_chain':
Attr(QWebEngineSettings.LinksIncludedInFocusChain),
'scrolling.smooth':
Attr(QWebEngineSettings.ScrollAnimatorEnabled),
}
_FONT_SIZES = {
'fonts.web.size.minimum':
QWebEngineSettings.MinimumFontSize,
'fonts.web.size.minimum_logical':
QWebEngineSettings.MinimumLogicalFontSize,
'fonts.web.size.default':
QWebEngineSettings.DefaultFontSize,
'fonts.web.size.default_fixed':
QWebEngineSettings.DefaultFixedFontSize,
}
_FONT_FAMILIES = {
'fonts.web.family.standard': QWebEngineSettings.StandardFont,
'fonts.web.family.fixed': QWebEngineSettings.FixedFont,
'fonts.web.family.serif': QWebEngineSettings.SerifFont,
'fonts.web.family.sans_serif': QWebEngineSettings.SansSerifFont,
'fonts.web.family.cursive': QWebEngineSettings.CursiveFont,
'fonts.web.family.fantasy': QWebEngineSettings.FantasyFont,
}
# Only Qt >= 5.11 support UnknownUrlSchemePolicy
try:
_UNKNOWN_URL_SCHEME_POLICY = {
'disallow':
QWebEngineSettings.DisallowUnknownUrlSchemes,
'allow-from-user-interaction':
QWebEngineSettings.AllowUnknownUrlSchemesFromUserInteraction,
'allow-all':
QWebEngineSettings.AllowAllUnknownUrlSchemes,
}
except AttributeError:
_UNKNOWN_URL_SCHEME_POLICY = None
# Mapping from WebEngineSettings::initDefaults in
# qtwebengine/src/core/web_engine_settings.cpp
_FONT_TO_QFONT = {
QWebEngineSettings.StandardFont: QFont.Serif,
QWebEngineSettings.FixedFont: QFont.Monospace,
QWebEngineSettings.SerifFont: QFont.Serif,
QWebEngineSettings.SansSerifFont: QFont.SansSerif,
QWebEngineSettings.CursiveFont: QFont.Cursive,
QWebEngineSettings.FantasyFont: QFont.Fantasy,
}
def set_unknown_url_scheme_policy(
self, policy: typing.Union[str, usertypes.Unset]) -> bool:
"""Set the UnknownUrlSchemePolicy to use.
Return:
True if there was a change, False otherwise.
"""
old_value = self._settings.unknownUrlSchemePolicy()
if isinstance(policy, usertypes.Unset):
self._settings.resetUnknownUrlSchemePolicy()
new_value = self._settings.unknownUrlSchemePolicy()
else:
new_value = self._UNKNOWN_URL_SCHEME_POLICY[policy]
self._settings.setUnknownUrlSchemePolicy(new_value)
return old_value != new_value
def _update_setting(self, setting, value):
if setting == 'content.unknown_url_scheme_policy':
if self._UNKNOWN_URL_SCHEME_POLICY:
return self.set_unknown_url_scheme_policy(value)
return False
return super()._update_setting(setting, value)
def init_settings(self):
super().init_settings()
self.update_setting('content.unknown_url_scheme_policy')
def __init__(self, settings):
super().__init__(settings)
# Attributes which don't exist in all Qt versions.
new_attributes = {
# Qt 5.8
'content.print_element_backgrounds':
('PrintElementBackgrounds', None),
# Qt 5.11
'content.autoplay':
('PlaybackRequiresUserGesture', operator.not_),
# Qt 5.12
'content.dns_prefetch':
('DnsPrefetchEnabled', None),
}
for name, (attribute, converter) in new_attributes.items():
try:
value = getattr(QWebEngineSettings, attribute)
except AttributeError:
continue
self._ATTRIBUTES[name] = Attr(value, converter=converter)
class ProfileSetter:
"""Helper to set various settings on a profile."""
def __init__(self, profile):
self._profile = profile
def init_profile(self):
"""Initialize settings on the given profile."""
self.set_http_headers()
self.set_http_cache_size()
self._set_hardcoded_settings()
if qtutils.version_check('5.8'):
self.set_dictionary_language()
def _set_hardcoded_settings(self):
"""Set up settings with a fixed value."""
settings = self._profile.settings()
settings.setAttribute(
QWebEngineSettings.FullScreenSupportEnabled, True)
try:
settings.setAttribute(
QWebEngineSettings.FocusOnNavigationEnabled, False)
except AttributeError:
# Added in Qt 5.8
pass
try:
settings.setAttribute(QWebEngineSettings.PdfViewerEnabled, False)
except AttributeError:
# Added in Qt 5.13
pass
def set_http_headers(self):
"""Set the user agent and accept-language for the given profile.
We override those per request in the URL interceptor (to allow for
per-domain values), but this one still gets used for things like
window.navigator.userAgent/.languages in JS.
"""
user_agent = websettings.user_agent()
self._profile.setHttpUserAgent(user_agent)
accept_language = config.val.content.headers.accept_language
if accept_language is not None:
self._profile.setHttpAcceptLanguage(accept_language)
def set_http_cache_size(self):
"""Initialize the HTTP cache size for the given profile."""
size = config.val.content.cache.size
if size is None:
size = 0
else:
size = qtutils.check_overflow(size, 'int', fatal=False)
# 0: automatically managed by QtWebEngine
self._profile.setHttpCacheMaximumSize(size)
def set_persistent_cookie_policy(self):
"""Set the HTTP Cookie size for the given profile."""
assert not self._profile.isOffTheRecord()
if config.val.content.cookies.store:
value = QWebEngineProfile.AllowPersistentCookies
else:
value = QWebEngineProfile.NoPersistentCookies
self._profile.setPersistentCookiesPolicy(value)
def set_dictionary_language(self, warn=True):
"""Load the given dictionaries."""
filenames = []
for code in config.val.spellcheck.languages or []:
local_filename = spell.local_filename(code)
if not local_filename:
if warn:
message.warning("Language {} is not installed - see "
"scripts/dictcli.py in qutebrowser's "
"sources".format(code))
continue
filenames.append(os.path.splitext(local_filename)[0])
log.config.debug("Found dicts: {}".format(filenames))
self._profile.setSpellCheckLanguages(filenames)
self._profile.setSpellCheckEnabled(bool(filenames))
def _update_settings(option):
"""Update global settings when qwebsettings changed."""
global_settings.update_setting(option)
if option in ['content.headers.user_agent',
'content.headers.accept_language']:
default_profile.setter.set_http_headers()
if private_profile:
private_profile.setter.set_http_headers()
elif option == 'content.cache.size':
default_profile.setter.set_http_cache_size()
if private_profile:
private_profile.setter.set_http_cache_size()
elif (option == 'content.cookies.store' and
# https://bugreports.qt.io/browse/QTBUG-58650
qtutils.version_check('5.9', compiled=False)):
default_profile.setter.set_persistent_cookie_policy()
# We're not touching the private profile's cookie policy.
elif option == 'spellcheck.languages':
default_profile.setter.set_dictionary_language()
if private_profile:
private_profile.setter.set_dictionary_language(warn=False)
def _init_user_agent_str(ua):
global parsed_user_agent
parsed_user_agent = websettings.UserAgent.parse(ua)
def init_user_agent():
_init_user_agent_str(QWebEngineProfile.defaultProfile().httpUserAgent())
def _init_profiles():
"""Init the two used QWebEngineProfiles."""
global default_profile, private_profile
default_profile = QWebEngineProfile.defaultProfile()
init_user_agent()
default_profile.setter = ProfileSetter( # type: ignore[attr-defined]
default_profile)
default_profile.setCachePath(
os.path.join(standarddir.cache(), 'webengine'))
default_profile.setPersistentStoragePath(
os.path.join(standarddir.data(), 'webengine'))
default_profile.setter.init_profile()
default_profile.setter.set_persistent_cookie_policy()
if not qtutils.is_single_process():
private_profile = QWebEngineProfile()
private_profile.setter = ProfileSetter( # type: ignore[attr-defined]
private_profile)
assert private_profile.isOffTheRecord()
private_profile.setter.init_profile()
def _init_site_specific_quirks():
if not config.val.content.site_specific_quirks:
return
# default_ua = ("Mozilla/5.0 ({os_info}) "
# "AppleWebKit/{webkit_version} (KHTML, like Gecko) "
# "{qt_key}/{qt_version} "
# "{upstream_browser_key}/{upstream_browser_version} "
# "Safari/{webkit_version}")
no_qtwe_ua = ("Mozilla/5.0 ({os_info}) "
"AppleWebKit/{webkit_version} (KHTML, like Gecko) "
"{upstream_browser_key}/{upstream_browser_version} "
"Safari/{webkit_version}")
firefox_ua = "Mozilla/5.0 ({os_info}; rv:71.0) Gecko/20100101 Firefox/71.0"
new_chrome_ua = ("Mozilla/5.0 ({os_info}) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/99 "
"Safari/537.36")
user_agents = {
'https://web.whatsapp.com/': no_qtwe_ua,
'https://accounts.google.com/*': firefox_ua,
'https://*.slack.com/*': new_chrome_ua,
'https://docs.google.com/*': firefox_ua,
}
if not qtutils.version_check('5.9'):
user_agents['https://www.dell.com/support/*'] = new_chrome_ua
for pattern, ua in user_agents.items():
config.instance.set_obj('content.headers.user_agent', ua,
pattern=urlmatch.UrlPattern(pattern),
hide_userconfig=True)
def _init_devtools_settings():
"""Make sure the devtools always get images/JS permissions."""
settings = [
('content.javascript.enabled', True),
('content.images', True)
] # type: typing.List[typing.Tuple[str, typing.Any]]
if qtutils.version_check('5.11'):
settings.append(('content.cookies.accept', 'all'))
for setting, value in settings:
for pattern in ['chrome-devtools://*', 'devtools://*']:
config.instance.set_obj(setting, value,
pattern=urlmatch.UrlPattern(pattern),
hide_userconfig=True)
def init(args):
"""Initialize the global QWebSettings."""
if (args.enable_webengine_inspector and
not hasattr(QWebEnginePage, 'setInspectedPage')): # only Qt < 5.11
os.environ['QTWEBENGINE_REMOTE_DEBUGGING'] = str(utils.random_port())
webenginequtescheme.init()
spell.init()
_init_profiles()
config.instance.changed.connect(_update_settings)
global global_settings
global_settings = WebEngineSettings(_SettingsWrapper())
global_settings.init_settings()
_init_site_specific_quirks()
_init_devtools_settings()
def shutdown():
pass
| 1 | 24,797 | I'm guessing this is unintended? | qutebrowser-qutebrowser | py |
@@ -284,11 +284,13 @@ class ChangeStreamCursor extends Cursor {
if (this.options[optionName]) result[optionName] = this.options[optionName];
}
+ const resumeKey = this.options.startAfter && !this.hasReceived ? 'startAfter' : 'resumeAfter';
+
if (this.resumeToken || this.startAtOperationTime) {
['resumeAfter', 'startAfter', 'startAtOperationTime'].forEach(key => delete result[key]);
if (this.resumeToken) {
- result.resumeAfter = this.resumeToken;
+ result[resumeKey] = this.resumeToken;
} else if (this.startAtOperationTime && maxWireVersion(this.server) >= 7) {
result.startAtOperationTime = this.startAtOperationTime;
} | 1 | 'use strict';
const EventEmitter = require('events');
const isResumableError = require('./error').isResumableError;
const MongoError = require('./core').MongoError;
const Cursor = require('./cursor');
const relayEvents = require('./core/utils').relayEvents;
const maxWireVersion = require('./core/utils').maxWireVersion;
const AggregateOperation = require('./operations/aggregate');
const CHANGE_STREAM_OPTIONS = ['resumeAfter', 'startAfter', 'startAtOperationTime', 'fullDocument'];
const CURSOR_OPTIONS = ['batchSize', 'maxAwaitTimeMS', 'collation', 'readPreference'].concat(
CHANGE_STREAM_OPTIONS
);
const CHANGE_DOMAIN_TYPES = {
COLLECTION: Symbol('Collection'),
DATABASE: Symbol('Database'),
CLUSTER: Symbol('Cluster')
};
/**
* @typedef ResumeToken
* @description Represents the logical starting point for a new or resuming {@link ChangeStream} on the server.
* @see https://docs.mongodb.com/master/changeStreams/#change-stream-resume-token
*/
/**
* @typedef OperationTime
* @description Represents a specific point in time on a server. Can be retrieved by using {@link Db#command}
* @see https://docs.mongodb.com/manual/reference/method/db.runCommand/#response
*/
/**
* @typedef ChangeStreamOptions
* @description Options that can be passed to a ChangeStream. Note that startAfter, resumeAfter, and startAtOperationTime are all mutually exclusive, and the server will error if more than one is specified.
* @property {string} [fullDocument='default'] Allowed values: ‘default’, ‘updateLookup’. When set to ‘updateLookup’, the change stream will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred.
* @property {number} [maxAwaitTimeMS] The maximum amount of time for the server to wait on new documents to satisfy a change stream query.
* @property {ResumeToken} [resumeAfter] Allows you to start a changeStream after a specified event. See {@link https://docs.mongodb.com/master/changeStreams/#resumeafter-for-change-streams|ChangeStream documentation}.
* @property {ResumeToken} [startAfter] Similar to resumeAfter, but will allow you to start after an invalidated event. See {@link https://docs.mongodb.com/master/changeStreams/#startafter-for-change-streams|ChangeStream documentation}.
* @property {OperationTime} [startAtOperationTime] Will start the changeStream after the specified operationTime.
* @property {number} [batchSize=1000] The number of documents to return per batch. See {@link https://docs.mongodb.com/manual/reference/command/aggregate|aggregation documentation}.
* @property {object} [collation] Specify collation settings for operation. See {@link https://docs.mongodb.com/manual/reference/command/aggregate|aggregation documentation}.
* @property {ReadPreference} [readPreference] The read preference. Defaults to the read preference of the database or collection. See {@link https://docs.mongodb.com/manual/reference/read-preference|read preference documentation}.
*/
/**
* Creates a new Change Stream instance. Normally created using {@link Collection#watch|Collection.watch()}.
* @class ChangeStream
* @since 3.0.0
* @param {(MongoClient|Db|Collection)} parent The parent object that created this change stream
* @param {Array} pipeline An array of {@link https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline/|aggregation pipeline stages} through which to pass change stream documents
* @param {ChangeStreamOptions} [options] Optional settings
* @fires ChangeStream#close
* @fires ChangeStream#change
* @fires ChangeStream#end
* @fires ChangeStream#error
* @fires ChangeStream#resumeTokenChanged
* @return {ChangeStream} a ChangeStream instance.
*/
class ChangeStream extends EventEmitter {
constructor(parent, pipeline, options) {
super();
const Collection = require('./collection');
const Db = require('./db');
const MongoClient = require('./mongo_client');
this.pipeline = pipeline || [];
this.options = options || {};
this.parent = parent;
this.namespace = parent.s.namespace;
if (parent instanceof Collection) {
this.type = CHANGE_DOMAIN_TYPES.COLLECTION;
this.topology = parent.s.db.serverConfig;
} else if (parent instanceof Db) {
this.type = CHANGE_DOMAIN_TYPES.DATABASE;
this.topology = parent.serverConfig;
} else if (parent instanceof MongoClient) {
this.type = CHANGE_DOMAIN_TYPES.CLUSTER;
this.topology = parent.topology;
} else {
throw new TypeError(
'parent provided to ChangeStream constructor is not an instance of Collection, Db, or MongoClient'
);
}
this.promiseLibrary = parent.s.promiseLibrary;
if (!this.options.readPreference && parent.s.readPreference) {
this.options.readPreference = parent.s.readPreference;
}
// Create contained Change Stream cursor
this.cursor = createChangeStreamCursor(this, options);
// Listen for any `change` listeners being added to ChangeStream
this.on('newListener', eventName => {
if (eventName === 'change' && this.cursor && this.listenerCount('change') === 0) {
this.cursor.on('data', change =>
processNewChange({ changeStream: this, change, eventEmitter: true })
);
}
});
// Listen for all `change` listeners being removed from ChangeStream
this.on('removeListener', eventName => {
if (eventName === 'change' && this.listenerCount('change') === 0 && this.cursor) {
this.cursor.removeAllListeners('data');
}
});
}
/**
* @property {ResumeToken} resumeToken
* The cached resume token that will be used to resume
* after the most recently returned change.
*/
get resumeToken() {
return this.cursor.resumeToken;
}
/**
* Check if there is any document still available in the Change Stream
* @function ChangeStream.prototype.hasNext
* @param {ChangeStream~resultCallback} [callback] The result callback.
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
hasNext(callback) {
return this.cursor.hasNext(callback);
}
/**
* Get the next available document from the Change Stream, returns null if no more documents are available.
* @function ChangeStream.prototype.next
* @param {ChangeStream~resultCallback} [callback] The result callback.
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
next(callback) {
var self = this;
if (this.isClosed()) {
if (callback) return callback(new Error('Change Stream is not open.'), null);
return self.promiseLibrary.reject(new Error('Change Stream is not open.'));
}
return this.cursor
.next()
.then(change => processNewChange({ changeStream: self, change, callback }))
.catch(error => processNewChange({ changeStream: self, error, callback }));
}
/**
* Is the cursor closed
* @method ChangeStream.prototype.isClosed
* @return {boolean}
*/
isClosed() {
if (this.cursor) {
return this.cursor.isClosed();
}
return true;
}
/**
* Close the Change Stream
* @method ChangeStream.prototype.close
* @param {ChangeStream~resultCallback} [callback] The result callback.
* @return {Promise} returns Promise if no callback passed
*/
close(callback) {
if (!this.cursor) {
if (callback) return callback();
return this.promiseLibrary.resolve();
}
// Tidy up the existing cursor
const cursor = this.cursor;
if (callback) {
return cursor.close(err => {
['data', 'close', 'end', 'error'].forEach(event => cursor.removeAllListeners(event));
delete this.cursor;
return callback(err);
});
}
const PromiseCtor = this.promiseLibrary || Promise;
return new PromiseCtor((resolve, reject) => {
cursor.close(err => {
['data', 'close', 'end', 'error'].forEach(event => cursor.removeAllListeners(event));
delete this.cursor;
if (err) return reject(err);
resolve();
});
});
}
/**
* This method pulls all the data out of a readable stream, and writes it to the supplied destination, automatically managing the flow so that the destination is not overwhelmed by a fast readable stream.
* @method
* @param {Writable} destination The destination for writing data
* @param {object} [options] {@link https://nodejs.org/api/stream.html#stream_readable_pipe_destination_options|Pipe options}
* @return {null}
*/
pipe(destination, options) {
if (!this.pipeDestinations) {
this.pipeDestinations = [];
}
this.pipeDestinations.push(destination);
return this.cursor.pipe(destination, options);
}
/**
* This method will remove the hooks set up for a previous pipe() call.
* @param {Writable} [destination] The destination for writing data
* @return {null}
*/
unpipe(destination) {
if (this.pipeDestinations && this.pipeDestinations.indexOf(destination) > -1) {
this.pipeDestinations.splice(this.pipeDestinations.indexOf(destination), 1);
}
return this.cursor.unpipe(destination);
}
/**
* Return a modified Readable stream including a possible transform method.
* @method
* @param {object} [options] Optional settings.
* @param {function} [options.transform] A transformation method applied to each document emitted by the stream.
* @return {Cursor}
*/
stream(options) {
this.streamOptions = options;
return this.cursor.stream(options);
}
/**
* This method will cause a stream in flowing mode to stop emitting data events. Any data that becomes available will remain in the internal buffer.
* @return {null}
*/
pause() {
return this.cursor.pause();
}
/**
* This method will cause the readable stream to resume emitting data events.
* @return {null}
*/
resume() {
return this.cursor.resume();
}
}
class ChangeStreamCursor extends Cursor {
constructor(topology, operation, options) {
super(topology, operation, options);
options = options || {};
this._resumeToken = null;
this.startAtOperationTime = options.startAtOperationTime;
if (options.startAfter) {
this.resumeToken = options.startAfter;
} else if (options.resumeAfter) {
this.resumeToken = options.resumeAfter;
}
}
set resumeToken(token) {
this._resumeToken = token;
this.emit('resumeTokenChanged', token);
}
get resumeToken() {
return this._resumeToken;
}
get resumeOptions() {
const result = {};
for (const optionName of CURSOR_OPTIONS) {
if (this.options[optionName]) result[optionName] = this.options[optionName];
}
if (this.resumeToken || this.startAtOperationTime) {
['resumeAfter', 'startAfter', 'startAtOperationTime'].forEach(key => delete result[key]);
if (this.resumeToken) {
result.resumeAfter = this.resumeToken;
} else if (this.startAtOperationTime && maxWireVersion(this.server) >= 7) {
result.startAtOperationTime = this.startAtOperationTime;
}
}
return result;
}
_initializeCursor(callback) {
super._initializeCursor((err, result) => {
if (err) {
callback(err, null);
return;
}
const response = result.documents[0];
if (
this.startAtOperationTime == null &&
this.resumeAfter == null &&
this.startAfter == null &&
maxWireVersion(this.server) >= 7
) {
this.startAtOperationTime = response.operationTime;
}
const cursor = response.cursor;
if (cursor.postBatchResumeToken) {
this.cursorState.postBatchResumeToken = cursor.postBatchResumeToken;
if (cursor.firstBatch.length === 0) {
this.resumeToken = cursor.postBatchResumeToken;
}
}
this.emit('response');
callback(err, result);
});
}
_getMore(callback) {
super._getMore((err, response) => {
if (err) {
callback(err, null);
return;
}
const cursor = response.cursor;
if (cursor.postBatchResumeToken) {
this.cursorState.postBatchResumeToken = cursor.postBatchResumeToken;
if (cursor.nextBatch.length === 0) {
this.resumeToken = cursor.postBatchResumeToken;
}
}
this.emit('response');
callback(err, response);
});
}
}
/**
* @event ChangeStreamCursor#response
* internal event DO NOT USE
* @ignore
*/
// Create a new change stream cursor based on self's configuration
function createChangeStreamCursor(self, options) {
const changeStreamStageOptions = { fullDocument: options.fullDocument || 'default' };
applyKnownOptions(changeStreamStageOptions, options, CHANGE_STREAM_OPTIONS);
if (self.type === CHANGE_DOMAIN_TYPES.CLUSTER) {
changeStreamStageOptions.allChangesForCluster = true;
}
const pipeline = [{ $changeStream: changeStreamStageOptions }].concat(self.pipeline);
const cursorOptions = applyKnownOptions({}, options, CURSOR_OPTIONS);
const changeStreamCursor = new ChangeStreamCursor(
self.topology,
new AggregateOperation(self.parent, pipeline, options),
cursorOptions
);
relayEvents(changeStreamCursor, self, ['resumeTokenChanged', 'end', 'close']);
/**
* Fired for each new matching change in the specified namespace. Attaching a `change`
* event listener to a Change Stream will switch the stream into flowing mode. Data will
* then be passed as soon as it is available.
*
* @event ChangeStream#change
* @type {object}
*/
if (self.listenerCount('change') > 0) {
changeStreamCursor.on('data', function(change) {
processNewChange({ changeStream: self, change, eventEmitter: true });
});
}
/**
* Change stream close event
*
* @event ChangeStream#close
* @type {null}
*/
/**
* Change stream end event
*
* @event ChangeStream#end
* @type {null}
*/
/**
* Emitted each time the change stream stores a new resume token.
*
* @event ChangeStream#resumeTokenChanged
* @type {ResumeToken}
*/
/**
* Fired when the stream encounters an error.
*
* @event ChangeStream#error
* @type {Error}
*/
changeStreamCursor.on('error', function(error) {
processNewChange({ changeStream: self, error, eventEmitter: true });
});
if (self.pipeDestinations) {
const cursorStream = changeStreamCursor.stream(self.streamOptions);
for (let pipeDestination in self.pipeDestinations) {
cursorStream.pipe(pipeDestination);
}
}
return changeStreamCursor;
}
function applyKnownOptions(target, source, optionNames) {
optionNames.forEach(name => {
if (source[name]) {
target[name] = source[name];
}
});
return target;
}
// This method performs a basic server selection loop, satisfying the requirements of
// ChangeStream resumability until the new SDAM layer can be used.
const SELECTION_TIMEOUT = 30000;
function waitForTopologyConnected(topology, options, callback) {
setTimeout(() => {
if (options && options.start == null) options.start = process.hrtime();
const start = options.start || process.hrtime();
const timeout = options.timeout || SELECTION_TIMEOUT;
const readPreference = options.readPreference;
if (topology.isConnected({ readPreference })) return callback(null, null);
const hrElapsed = process.hrtime(start);
const elapsed = (hrElapsed[0] * 1e9 + hrElapsed[1]) / 1e6;
if (elapsed > timeout) return callback(new MongoError('Timed out waiting for connection'));
waitForTopologyConnected(topology, options, callback);
}, 3000); // this is an arbitrary wait time to allow SDAM to transition
}
// Handle new change events. This method brings together the routes from the callback, event emitter, and promise ways of using ChangeStream.
function processNewChange(args) {
const changeStream = args.changeStream;
const error = args.error;
const change = args.change;
const callback = args.callback;
const eventEmitter = args.eventEmitter || false;
// If the changeStream is closed, then it should not process a change.
if (changeStream.isClosed()) {
// We do not error in the eventEmitter case.
if (eventEmitter) {
return;
}
const error = new MongoError('ChangeStream is closed');
return typeof callback === 'function'
? callback(error, null)
: changeStream.promiseLibrary.reject(error);
}
const cursor = changeStream.cursor;
const topology = changeStream.topology;
const options = changeStream.cursor.options;
if (error) {
if (isResumableError(error) && !changeStream.attemptingResume) {
changeStream.attemptingResume = true;
// stop listening to all events from old cursor
['data', 'close', 'end', 'error'].forEach(event =>
changeStream.cursor.removeAllListeners(event)
);
// close internal cursor, ignore errors
changeStream.cursor.close();
// attempt recreating the cursor
if (eventEmitter) {
waitForTopologyConnected(topology, { readPreference: options.readPreference }, err => {
if (err) {
changeStream.emit('error', err);
changeStream.emit('close');
return;
}
changeStream.cursor = createChangeStreamCursor(changeStream, cursor.resumeOptions);
});
return;
}
if (callback) {
waitForTopologyConnected(topology, { readPreference: options.readPreference }, err => {
if (err) return callback(err, null);
changeStream.cursor = createChangeStreamCursor(changeStream, cursor.resumeOptions);
changeStream.next(callback);
});
return;
}
return new Promise((resolve, reject) => {
waitForTopologyConnected(topology, { readPreference: options.readPreference }, err => {
if (err) return reject(err);
resolve();
});
})
.then(
() => (changeStream.cursor = createChangeStreamCursor(changeStream, cursor.resumeOptions))
)
.then(() => changeStream.next());
}
if (eventEmitter) return changeStream.emit('error', error);
if (typeof callback === 'function') return callback(error, null);
return changeStream.promiseLibrary.reject(error);
}
changeStream.attemptingResume = false;
if (change && !change._id) {
const noResumeTokenError = new Error(
'A change stream document has been received that lacks a resume token (_id).'
);
if (eventEmitter) return changeStream.emit('error', noResumeTokenError);
if (typeof callback === 'function') return callback(noResumeTokenError, null);
return changeStream.promiseLibrary.reject(noResumeTokenError);
}
// cache the resume token
if (cursor.bufferedCount() === 0 && cursor.cursorState.postBatchResumeToken) {
cursor.resumeToken = cursor.cursorState.postBatchResumeToken;
} else {
cursor.resumeToken = change._id;
}
// wipe the startAtOperationTime if there was one so that there won't be a conflict
// between resumeToken and startAtOperationTime if we need to reconnect the cursor
changeStream.options.startAtOperationTime = undefined;
// Return the change
if (eventEmitter) return changeStream.emit('change', change);
if (typeof callback === 'function') return callback(error, change);
return changeStream.promiseLibrary.resolve(change);
}
/**
* The callback format for results
* @callback ChangeStream~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {(object|null)} result The result object if the command was executed successfully.
*/
module.exports = ChangeStream;
| 1 | 17,141 | This looks a little suspicious to me, can you explain what's going on here? | mongodb-node-mongodb-native | js |
@@ -48,7 +48,15 @@ class ApplicationController < ActionController::Base
private
def current_user
- @current_user ||= User.find_or_create_by(email_address: session[:user]['email']) if session[:user] && session[:user]['email']
+ @current_user ||= find_current_user
+ end
+
+ def find_current_user
+ if ENV['FORCE_USER_ID']
+ User.find ENV['FORCE_USER_ID']
+ else
+ User.find_or_create_by(email_address: session[:user]['email']) if session[:user] && session[:user]['email']
+ end
end
def sign_in(user) | 1 | class ApplicationController < ActionController::Base
include Pundit # For authorization checks
include ReturnToHelper
include MarkdownHelper
helper ValueHelper
add_template_helper ClientHelper
protect_from_forgery with: :exception
helper_method :current_user, :signed_in?, :return_to
before_action :disable_peek_by_default
protected
# We are overriding this method to account for ExceptionPolicies
def authorize(record, query=nil, user=nil)
user ||= @current_user
policy = ::PolicyFinder.policy_for(user, record)
# use the action as a default permission
query ||= ("can_" + params[:action].to_s + "!").to_sym
unless policy.public_send(query)
# the method might raise its own exception, or it might return a
# boolean. Both systems are accommodated
# will need to replace this when a new version of pundit arrives
ex = NotAuthorizedError.new("not allowed to #{q} this #{record}")
ex.query, ex.record, ex.policy = q, record, pol
raise ex
end
end
# Override Pundit to account for proposal gymnastics
def policy(record)
obj = ::PolicyFinder.authorizing_object(record)
super(obj)
end
def admin?
signed_in? && current_user.admin?
end
def peek_enabled?
Rails.env.development? || self.admin?
end
private
def current_user
@current_user ||= User.find_or_create_by(email_address: session[:user]['email']) if session[:user] && session[:user]['email']
end
def sign_in(user)
session[:user] ||= {}
session[:user]['email'] = user.email_address
@current_user = user
end
def sign_out
reset_session
@current_user = nil
end
def signed_in?
!!current_user
end
def authenticate_user!
unless signed_in?
flash[:error] = 'You need to sign in for access to this page.'
redirect_to root_url(return_to: self.make_return_to("Previous", request.fullpath))
end
end
def disable_peek_by_default
if cookies[:peek].nil?
cookies[:peek] = false
end
end
end
| 1 | 14,047 | Can you talk about this? I'm not sure I follow why this is necessary. | 18F-C2 | rb |
@@ -136,6 +136,8 @@ class AbstractBase extends AbstractActionController
->fromPost('layout', $this->params()->fromQuery('layout', false));
if ('lightbox' === $layout) {
$this->layout()->setTemplate('layout/lightbox');
+ }elseif(isset($params['layout']) && $params['layout']=='simple'){
+ $this->layout()->setTemplate('layout/simple');
}
return new ViewModel($params);
} | 1 | <?php
/**
* VuFind controller base class (defines some methods that can be shared by other
* controllers).
*
* PHP version 7
*
* Copyright (C) Villanova University 2010.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package Controller
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:controllers Wiki
*/
namespace VuFind\Controller;
use VuFind\Exception\ILS as ILSException;
use Zend\Mvc\Controller\AbstractActionController;
use Zend\Mvc\MvcEvent;
use Zend\ServiceManager\ServiceLocatorInterface;
use Zend\View\Model\ViewModel;
use ZfcRbac\Service\AuthorizationServiceAwareInterface;
/**
* VuFind controller base class (defines some methods that can be shared by other
* controllers).
*
* @category VuFind
* @package Controller
* @author Chris Hallberg <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:controllers Wiki
*
* @SuppressWarnings(PHPMD.NumberOfChildren)
*/
class AbstractBase extends AbstractActionController
{
/**
* Permission that must be granted to access this module (false for no
* restriction)
*
* @var string|bool
*/
protected $accessPermission = false;
/**
* Behavior when access is denied (used unless overridden through
* permissionBehavior.ini). Valid values are 'promptLogin' and 'exception'.
* Leave at null to use the defaultDeniedControllerBehavior set in
* permissionBehavior.ini (normally 'promptLogin' unless changed).
*
* @var string
*/
protected $accessDeniedBehavior = null;
/**
* Service manager
*
* @var ServiceLocatorInterface
*/
protected $serviceLocator;
/**
* Constructor
*
* @param ServiceLocatorInterface $sm Service locator
*/
public function __construct(ServiceLocatorInterface $sm)
{
$this->serviceLocator = $sm;
}
/**
* Use preDispatch event to block access when appropriate.
*
* @param MvcEvent $e Event object
*
* @return void
*/
public function validateAccessPermission(MvcEvent $e)
{
// If there is an access permission set for this controller, pass it
// through the permission helper, and if the helper returns a custom
// response, use that instead of the normal behavior.
if ($this->accessPermission) {
$response = $this->permission()
->check($this->accessPermission, $this->accessDeniedBehavior);
if (is_object($response)) {
$e->setResponse($response);
}
}
}
/**
* Register the default events for this controller
*
* @return void
*/
protected function attachDefaultListeners()
{
parent::attachDefaultListeners();
// Attach preDispatch event if we need to check permissions.
if ($this->accessPermission) {
$events = $this->getEventManager();
$events->attach(
MvcEvent::EVENT_DISPATCH, [$this, 'validateAccessPermission'], 1000
);
}
}
/**
* Create a new ViewModel.
*
* @param array $params Parameters to pass to ViewModel constructor.
*
* @return ViewModel
*/
protected function createViewModel($params = null)
{
$layout = $this->params()
->fromPost('layout', $this->params()->fromQuery('layout', false));
if ('lightbox' === $layout) {
$this->layout()->setTemplate('layout/lightbox');
}
return new ViewModel($params);
}
/**
* Create a new ViewModel to use as an email form.
*
* @param array $params Parameters to pass to ViewModel constructor.
* @param string $defaultSubject Default subject line to use.
*
* @return ViewModel
*/
protected function createEmailViewModel($params = null, $defaultSubject = null)
{
// Build view:
$view = $this->createViewModel($params);
// Load configuration and current user for convenience:
$config = $this->getConfig();
$view->disableFrom
= (isset($config->Mail->disable_from) && $config->Mail->disable_from);
$view->editableSubject = isset($config->Mail->user_editable_subjects)
&& $config->Mail->user_editable_subjects;
$view->maxRecipients = isset($config->Mail->maximum_recipients)
? intval($config->Mail->maximum_recipients) : 1;
$user = $this->getUser();
// Send parameters back to view so form can be re-populated:
if ($this->getRequest()->isPost()) {
$view->to = $this->params()->fromPost('to');
if (!$view->disableFrom) {
$view->from = $this->params()->fromPost('from');
}
if ($view->editableSubject) {
$view->subject = $this->params()->fromPost('subject');
}
$view->message = $this->params()->fromPost('message');
}
// Set default values if applicable:
if ((!isset($view->to) || empty($view->to)) && $user
&& isset($config->Mail->user_email_in_to)
&& $config->Mail->user_email_in_to
) {
$view->to = $user->email;
}
if (!isset($view->from) || empty($view->from)) {
if ($user && isset($config->Mail->user_email_in_from)
&& $config->Mail->user_email_in_from
) {
$view->userEmailInFrom = true;
$view->from = $user->email;
} elseif (isset($config->Mail->default_from)
&& $config->Mail->default_from
) {
$view->from = $config->Mail->default_from;
}
}
if (!isset($view->subject) || empty($view->subject)) {
$view->subject = $defaultSubject;
}
// Fail if we're missing a from and the form element is disabled:
if ($view->disableFrom) {
if (empty($view->from)) {
$view->from = $config->Site->email;
}
if (empty($view->from)) {
throw new \Exception('Unable to determine email from address');
}
}
return $view;
}
/**
* Get the account manager object.
*
* @return \VuFind\Auth\Manager
*/
protected function getAuthManager()
{
return $this->serviceLocator->get('VuFind\Auth\Manager');
}
/**
* Get the authorization service (note that we're doing this on-demand
* rather than through injection with the AuthorizationServiceAwareInterface
* to minimize expensive initialization when authorization is not needed.
*
* @return \ZfcRbac\Service\AuthorizationService
*/
protected function getAuthorizationService()
{
return $this->serviceLocator
->get('ZfcRbac\Service\AuthorizationService');
}
/**
* Get the ILS authenticator.
*
* @return \VuFind\Auth\ILSAuthenticator
*/
protected function getILSAuthenticator()
{
return $this->serviceLocator->get('VuFind\Auth\ILSAuthenticator');
}
/**
* Get the user object if logged in, false otherwise.
*
* @return object|bool
*/
protected function getUser()
{
return $this->getAuthManager()->isLoggedIn();
}
/**
* Get the view renderer
*
* @return \Zend\View\Renderer\RendererInterface
*/
protected function getViewRenderer()
{
return $this->serviceLocator->get('ViewRenderer');
}
/**
* Redirect the user to the login screen.
*
* @param string $msg Flash message to display on login screen
* @param array $extras Associative array of extra fields to store
* @param bool $forward True to forward, false to redirect
*
* @return mixed
*/
public function forceLogin($msg = null, $extras = [], $forward = true)
{
// Set default message if necessary.
if (null === $msg) {
$msg = 'You must be logged in first';
}
// We don't want to return to the lightbox
$serverUrl = $this->getServerUrl();
$serverUrl = str_replace(
['?layout=lightbox', '&layout=lightbox'],
['?', '&'],
$serverUrl
);
// Store the current URL as a login followup action
$this->followup()->store($extras, $serverUrl);
if (!empty($msg)) {
$this->flashMessenger()->addMessage($msg, 'error');
}
// Set a flag indicating that we are forcing login:
$this->getRequest()->getPost()->set('forcingLogin', true);
if ($forward) {
return $this->forwardTo('MyResearch', 'Login');
}
return $this->redirect()->toRoute('myresearch-home');
}
/**
* Does the user have catalog credentials available? Returns associative array
* of patron data if so, otherwise forwards to appropriate login prompt and
* returns false. If there is an ILS exception, a flash message is added and
* a newly created ViewModel is returned.
*
* @return bool|array|ViewModel
*/
protected function catalogLogin()
{
// First make sure user is logged in to VuFind:
$account = $this->getAuthManager();
if ($account->isLoggedIn() == false) {
return $this->forceLogin();
}
// Now check if the user has provided credentials with which to log in:
$ilsAuth = $this->getILSAuthenticator();
if (($username = $this->params()->fromPost('cat_username', false))
&& ($password = $this->params()->fromPost('cat_password', false))
) {
// Check for multiple ILS target selection
$target = $this->params()->fromPost('target', false);
if ($target) {
$username = "$target.$username";
}
try {
$patron = $ilsAuth->newCatalogLogin($username, $password);
// If login failed, store a warning message:
if (!$patron) {
$this->flashMessenger()->addErrorMessage('Invalid Patron Login');
}
} catch (ILSException $e) {
$this->flashMessenger()->addErrorMessage('ils_connection_failed');
}
} else {
try {
// If no credentials were provided, try the stored values:
$patron = $ilsAuth->storedCatalogLogin();
} catch (ILSException $e) {
$this->flashMessenger()->addErrorMessage('ils_connection_failed');
return $this->createViewModel();
}
}
// If catalog login failed, send the user to the right page:
if (!$patron) {
return $this->forwardTo('MyResearch', 'CatalogLogin');
}
// Send value (either false or patron array) back to caller:
return $patron;
}
/**
* Get a VuFind configuration.
*
* @param string $id Configuration identifier (default = main VuFind config)
*
* @return \Zend\Config\Config
*/
public function getConfig($id = 'config')
{
return $this->serviceLocator->get('VuFind\Config\PluginManager')->get($id);
}
/**
* Get the ILS connection.
*
* @return \VuFind\ILS\Connection
*/
public function getILS()
{
return $this->serviceLocator->get('VuFind\ILS\Connection');
}
/**
* Get the record loader
*
* @return \VuFind\Record\Loader
*/
public function getRecordLoader()
{
return $this->serviceLocator->get('VuFind\Record\Loader');
}
/**
* Get the record cache
*
* @return \VuFind\Record\Cache
*/
public function getRecordCache()
{
return $this->serviceLocator->get('VuFind\Record\Cache');
}
/**
* Get the record router.
*
* @return \VuFind\Record\Router
*/
public function getRecordRouter()
{
return $this->serviceLocator->get('VuFind\Record\Router');
}
/**
* Get a database table object.
*
* @param string $table Name of table to retrieve
*
* @return \VuFind\Db\Table\Gateway
*/
public function getTable($table)
{
return $this->serviceLocator->get('VuFind\Db\Table\PluginManager')
->get($table);
}
/**
* Get the full URL to one of VuFind's routes.
*
* @param bool|string $route Boolean true for current URL, otherwise name of
* route to render as URL
*
* @return string
*/
public function getServerUrl($route = true)
{
$serverHelper = $this->getViewRenderer()->plugin('serverurl');
return $serverHelper(
$route === true ? true : $this->url()->fromRoute($route)
);
}
/**
* Translate a string if a translator is available.
*
* @param string $msg Message to translate
* @param array $tokens Tokens to inject into the translated string
* @param string $default Default value to use if no translation is found (null
* for no default).
*
* @return string
*/
public function translate($msg, $tokens = [], $default = null)
{
return $this->getViewRenderer()->plugin('translate')
->__invoke($msg, $tokens, $default);
}
/**
* Convenience method to make invocation of forward() helper less verbose.
*
* @param string $controller Controller to invoke
* @param string $action Action to invoke
* @param array $params Extra parameters for the RouteMatch object (no
* need to provide action here, since $action takes care of that)
*
* @return mixed
*/
public function forwardTo($controller, $action, $params = [])
{
// Inject action into the RouteMatch parameters
$params['action'] = $action;
// Dispatch the requested controller/action:
return $this->forward()->dispatch($controller, $params);
}
/**
* Check to see if a form was submitted from its post value
* Also validate the Captcha, if it's activated
*
* @param string $submitElement Name of the post field of the submit button
* @param bool $useRecaptcha Are we using captcha in this situation?
*
* @return bool
*/
protected function formWasSubmitted($submitElement = 'submit',
$useRecaptcha = false
) {
// Fail if the expected submission element was missing from the POST:
// Form was submitted; if CAPTCHA is expected, validate it now.
return $this->params()->fromPost($submitElement, false)
&& (!$useRecaptcha || $this->recaptcha()->validate());
}
/**
* Confirm an action.
*
* @param string $title Title of confirm dialog
* @param string $yesTarget Form target for "confirm" action
* @param string $noTarget Form target for "cancel" action
* @param string|array $messages Info messages for confirm dialog
* @param array $extras Extra details to include in form
*
* @return mixed
*/
public function confirm($title, $yesTarget, $noTarget, $messages = [],
$extras = []
) {
return $this->forwardTo(
'Confirm', 'Confirm',
[
'data' => [
'title' => $title,
'confirm' => $yesTarget,
'cancel' => $noTarget,
'messages' => (array)$messages,
'extras' => $extras
]
]
);
}
/**
* Prevent session writes -- this is designed to be called prior to time-
* consuming AJAX operations to help reduce the odds of a timing-related bug
* that causes the wrong version of session data to be written to disk (see
* VUFIND-716 for more details).
*
* @return void
*/
protected function disableSessionWrites()
{
$this->serviceLocator->get('VuFind\Session\Settings')->disableWrite();
}
/**
* Get the search memory
*
* @return \VuFind\Search\Memory
*/
public function getSearchMemory()
{
return $this->serviceLocator->get('VuFind\Search\Memory');
}
/**
* Are comments enabled?
*
* @return bool
*/
protected function commentsEnabled()
{
$check = $this->serviceLocator->get('VuFind\Config\AccountCapabilities');
return $check->getCommentSetting() !== 'disabled';
}
/**
* Are lists enabled?
*
* @return bool
*/
protected function listsEnabled()
{
$check = $this->serviceLocator->get('VuFind\Config\AccountCapabilities');
return $check->getListSetting() !== 'disabled';
}
/**
* Are tags enabled?
*
* @return bool
*/
protected function tagsEnabled()
{
$check = $this->serviceLocator->get('VuFind\Config\AccountCapabilities');
return $check->getTagSetting() !== 'disabled';
}
/**
* Store a referer (if appropriate) to keep post-login redirect pointing
* to an appropriate location. This is used when the user clicks the
* log in link from an arbitrary page or when a password is mistyped;
* separate logic is used for storing followup information when VuFind
* forces the user to log in from another context.
*
* @return void
*/
protected function setFollowupUrlToReferer()
{
// lbreferer is the stored current url of the lightbox
// which overrides the url from the server request when present
$referer = $this->getRequest()->getQuery()->get(
'lbreferer',
$this->getRequest()->getServer()->get('HTTP_REFERER', null)
);
// Get the referer -- if it's empty, there's nothing to store!
if (empty($referer)) {
return;
}
$refererNorm = $this->normalizeUrlForComparison($referer);
// If the referer lives outside of VuFind, don't store it! We only
// want internal post-login redirects.
$baseUrl = $this->getServerUrl('home');
$baseUrlNorm = $this->normalizeUrlForComparison($baseUrl);
if (0 !== strpos($refererNorm, $baseUrlNorm)) {
return;
}
// If the referer is the MyResearch/Home action, it probably means
// that the user is repeatedly mistyping their password. We should
// ignore this and instead rely on any previously stored referer.
$myResearchHomeUrl = $this->getServerUrl('myresearch-home');
$mrhuNorm = $this->normalizeUrlForComparison($myResearchHomeUrl);
if ($mrhuNorm === $refererNorm) {
return;
}
// If we got this far, we want to store the referer:
$this->followup()->store([], $referer);
}
/**
* Normalize the referer URL so that inconsistencies in protocol and trailing
* slashes do not break comparisons.
*
* @param string $url URL to normalize
*
* @return string
*/
protected function normalizeUrlForComparison($url)
{
$parts = explode('://', $url, 2);
return trim(end($parts), '/');
}
/**
* Retrieve a referer to keep post-login redirect pointing
* to an appropriate location.
* Unset the followup before returning.
*
* @return string
*/
protected function getFollowupUrl()
{
return $this->followup()->retrieve('url', '');
}
/**
* Sometimes we need to unset the followup to trigger default behaviors
*
* @return void
*/
protected function clearFollowupUrl()
{
$this->followup()->clear('url');
}
/**
* Get the tab configuration for this controller.
*
* @return array
*/
protected function getRecordTabConfig()
{
$cfg = $this->serviceLocator->get('Config');
return $cfg['vufind']['recorddriver_tabs'];
}
}
| 1 | 26,505 | Is there really a need for this 'simple' layout? Is there a reason you can't use 'lightbox'? The only difference seems to be that the lightbox layout includes Piwik/Google Analytics tracking and simple does not. If tracking needs to be disabled for some reason, perhaps there is a way to do that without creating a whole new layout. | vufind-org-vufind | php |
@@ -526,6 +526,19 @@ public abstract class MergePolicy {
public abstract MergeSpecification findForcedDeletesMerges(
SegmentInfos segmentInfos, MergeContext mergeContext) throws IOException;
+ /**
+ * Identifies merges that we want to execute (synchronously) on commit. By default, do not synchronously merge on commit.
+ *
+ * If a returned {@link OneMerge} includes a segment already included in a registered merge, then the commit will fail.
+ * Use {@link MergeContext#getMergingSegments()} to determine which segments are currently registered to merge.
+ *
+ * @param segmentInfos the total set of segments in the index (while preparing the commit)
+ * @param mergeContext the IndexWriter to find the merges on
+ */
+ public MergeSpecification findCommitMerges(SegmentInfos segmentInfos, MergeContext mergeContext) throws IOException {
+ return null;
+ }
+
/**
* Returns true if a new segment (regardless of its origin) should use the
* compound file format. The default implementation returns <code>true</code> | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import java.io.IOException;
import java.util.ArrayList;
import java.util.EnumMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.BooleanSupplier;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.apache.lucene.document.Field;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MergeInfo;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.IOSupplier;
import org.apache.lucene.util.InfoStream;
/**
* <p>Expert: a MergePolicy determines the sequence of
* primitive merge operations.</p>
*
* <p>Whenever the segments in an index have been altered by
* {@link IndexWriter}, either the addition of a newly
* flushed segment, addition of many segments from
* addIndexes* calls, or a previous merge that may now need
* to cascade, {@link IndexWriter} invokes {@link
* #findMerges} to give the MergePolicy a chance to pick
* merges that are now required. This method returns a
* {@link MergeSpecification} instance describing the set of
* merges that should be done, or null if no merges are
* necessary. When IndexWriter.forceMerge is called, it calls
* {@link #findForcedMerges(SegmentInfos, int, Map, MergeContext)} and the MergePolicy should
* then return the necessary merges.</p>
*
* <p>Note that the policy can return more than one merge at
* a time. In this case, if the writer is using {@link
* SerialMergeScheduler}, the merges will be run
* sequentially but if it is using {@link
* ConcurrentMergeScheduler} they will be run concurrently.</p>
*
* <p>The default MergePolicy is {@link
* TieredMergePolicy}.</p>
*
* @lucene.experimental
*/
public abstract class MergePolicy {
/**
* Progress and state for an executing merge. This class
* encapsulates the logic to pause and resume the merge thread
* or to abort the merge entirely.
*
* @lucene.experimental */
public static class OneMergeProgress {
/** Reason for pausing the merge thread. */
public static enum PauseReason {
/** Stopped (because of throughput rate set to 0, typically). */
STOPPED,
/** Temporarily paused because of exceeded throughput rate. */
PAUSED,
/** Other reason. */
OTHER
};
private final ReentrantLock pauseLock = new ReentrantLock();
private final Condition pausing = pauseLock.newCondition();
/**
* Pause times (in nanoseconds) for each {@link PauseReason}.
*/
private final EnumMap<PauseReason, AtomicLong> pauseTimesNS;
private volatile boolean aborted;
/**
* This field is for sanity-check purposes only. Only the same thread that invoked
* {@link OneMerge#mergeInit()} is permitted to be calling
* {@link #pauseNanos}. This is always verified at runtime.
*/
private Thread owner;
/** Creates a new merge progress info. */
public OneMergeProgress() {
// Place all the pause reasons in there immediately so that we can simply update values.
pauseTimesNS = new EnumMap<PauseReason,AtomicLong>(PauseReason.class);
for (PauseReason p : PauseReason.values()) {
pauseTimesNS.put(p, new AtomicLong());
}
}
/**
* Abort the merge this progress tracks at the next
* possible moment.
*/
public void abort() {
aborted = true;
wakeup(); // wakeup any paused merge thread.
}
/**
* Return the aborted state of this merge.
*/
public boolean isAborted() {
return aborted;
}
/**
* Pauses the calling thread for at least <code>pauseNanos</code> nanoseconds
* unless the merge is aborted or the external condition returns <code>false</code>,
* in which case control returns immediately.
*
* The external condition is required so that other threads can terminate the pausing immediately,
* before <code>pauseNanos</code> expires. We can't rely on just {@link Condition#awaitNanos(long)} alone
* because it can return due to spurious wakeups too.
*
* @param condition The pause condition that should return false if immediate return from this
* method is needed. Other threads can wake up any sleeping thread by calling
* {@link #wakeup}, but it'd fall to sleep for the remainder of the requested time if this
* condition
*/
public void pauseNanos(long pauseNanos, PauseReason reason, BooleanSupplier condition) throws InterruptedException {
if (Thread.currentThread() != owner) {
throw new RuntimeException("Only the merge owner thread can call pauseNanos(). This thread: "
+ Thread.currentThread().getName() + ", owner thread: "
+ owner);
}
long start = System.nanoTime();
AtomicLong timeUpdate = pauseTimesNS.get(reason);
pauseLock.lock();
try {
while (pauseNanos > 0 && !aborted && condition.getAsBoolean()) {
pauseNanos = pausing.awaitNanos(pauseNanos);
}
} finally {
pauseLock.unlock();
timeUpdate.addAndGet(System.nanoTime() - start);
}
}
/**
* Request a wakeup for any threads stalled in {@link #pauseNanos}.
*/
public void wakeup() {
pauseLock.lock();
try {
pausing.signalAll();
} finally {
pauseLock.unlock();
}
}
/** Returns pause reasons and associated times in nanoseconds. */
public Map<PauseReason,Long> getPauseTimes() {
Set<Entry<PauseReason,AtomicLong>> entries = pauseTimesNS.entrySet();
return entries.stream()
.collect(Collectors.toMap(
(e) -> e.getKey(),
(e) -> e.getValue().get()));
}
final void setMergeThread(Thread owner) {
assert this.owner == null;
this.owner = owner;
}
}
/** OneMerge provides the information necessary to perform
* an individual primitive merge operation, resulting in
* a single new segment. The merge spec includes the
* subset of segments to be merged as well as whether the
* new segment should use the compound file format.
*
* @lucene.experimental */
public static class OneMerge {
SegmentCommitInfo info; // used by IndexWriter
boolean registerDone; // used by IndexWriter
long mergeGen; // used by IndexWriter
boolean isExternal; // used by IndexWriter
int maxNumSegments = -1; // used by IndexWriter
/** Estimated size in bytes of the merged segment. */
public volatile long estimatedMergeBytes; // used by IndexWriter
// Sum of sizeInBytes of all SegmentInfos; set by IW.mergeInit
volatile long totalMergeBytes;
List<SegmentReader> readers; // used by IndexWriter
List<Bits> hardLiveDocs; // used by IndexWriter
/** Segments to be merged. */
public final List<SegmentCommitInfo> segments;
/**
* Control used to pause/stop/resume the merge thread.
*/
private final OneMergeProgress mergeProgress;
volatile long mergeStartNS = -1;
/** Total number of documents in segments to be merged, not accounting for deletions. */
public final int totalMaxDoc;
Throwable error;
/** Sole constructor.
* @param segments List of {@link SegmentCommitInfo}s
* to be merged. */
public OneMerge(List<SegmentCommitInfo> segments) {
if (0 == segments.size()) {
throw new RuntimeException("segments must include at least one segment");
}
// clone the list, as the in list may be based off original SegmentInfos and may be modified
this.segments = new ArrayList<>(segments);
int count = 0;
for(SegmentCommitInfo info : segments) {
count += info.info.maxDoc();
}
totalMaxDoc = count;
mergeProgress = new OneMergeProgress();
}
/**
* Called by {@link IndexWriter} after the merge started and from the
* thread that will be executing the merge.
*/
public void mergeInit() throws IOException {
mergeProgress.setMergeThread(Thread.currentThread());
}
/** Called by {@link IndexWriter} after the merge is done and all readers have been closed. */
public void mergeFinished() throws IOException {
}
/** Wrap the reader in order to add/remove information to the merged segment. */
public CodecReader wrapForMerge(CodecReader reader) throws IOException {
return reader;
}
/**
* Expert: Sets the {@link SegmentCommitInfo} of the merged segment.
* Allows sub-classes to e.g. set diagnostics properties.
*/
public void setMergeInfo(SegmentCommitInfo info) {
this.info = info;
}
/**
* Returns the {@link SegmentCommitInfo} for the merged segment,
* or null if it hasn't been set yet.
*/
public SegmentCommitInfo getMergeInfo() {
return info;
}
/** Record that an exception occurred while executing
* this merge */
synchronized void setException(Throwable error) {
this.error = error;
}
/** Retrieve previous exception set by {@link
* #setException}. */
synchronized Throwable getException() {
return error;
}
/** Returns a readable description of the current merge
* state. */
public String segString() {
StringBuilder b = new StringBuilder();
final int numSegments = segments.size();
for(int i=0;i<numSegments;i++) {
if (i > 0) {
b.append(' ');
}
b.append(segments.get(i).toString());
}
if (info != null) {
b.append(" into ").append(info.info.name);
}
if (maxNumSegments != -1) {
b.append(" [maxNumSegments=").append(maxNumSegments).append(']');
}
if (isAborted()) {
b.append(" [ABORTED]");
}
return b.toString();
}
/**
* Returns the total size in bytes of this merge. Note that this does not
* indicate the size of the merged segment, but the
* input total size. This is only set once the merge is
* initialized by IndexWriter.
*/
public long totalBytesSize() {
return totalMergeBytes;
}
/**
* Returns the total number of documents that are included with this merge.
* Note that this does not indicate the number of documents after the merge.
* */
public int totalNumDocs() {
int total = 0;
for (SegmentCommitInfo info : segments) {
total += info.info.maxDoc();
}
return total;
}
/** Return {@link MergeInfo} describing this merge. */
public MergeInfo getStoreMergeInfo() {
return new MergeInfo(totalMaxDoc, estimatedMergeBytes, isExternal, maxNumSegments);
}
/** Returns true if this merge was or should be aborted. */
public boolean isAborted() {
return mergeProgress.isAborted();
}
/** Marks this merge as aborted. The merge thread should terminate at the soonest possible moment. */
public void setAborted() {
this.mergeProgress.abort();
}
/** Checks if merge has been aborted and throws a merge exception if so. */
public void checkAborted() throws MergeAbortedException {
if (isAborted()) {
throw new MergePolicy.MergeAbortedException("merge is aborted: " + segString());
}
}
/**
* Returns a {@link OneMergeProgress} instance for this merge, which provides
* statistics of the merge threads (run time vs. sleep time) if merging is throttled.
*/
public OneMergeProgress getMergeProgress() {
return mergeProgress;
}
}
/**
* A MergeSpecification instance provides the information
* necessary to perform multiple merges. It simply
* contains a list of {@link OneMerge} instances.
*/
public static class MergeSpecification {
/**
* The subset of segments to be included in the primitive merge.
*/
public final List<OneMerge> merges = new ArrayList<>();
/** Sole constructor. Use {@link
* #add(MergePolicy.OneMerge)} to add merges. */
public MergeSpecification() {
}
/** Adds the provided {@link OneMerge} to this
* specification. */
public void add(OneMerge merge) {
merges.add(merge);
}
/** Returns a description of the merges in this specification. */
public String segString(Directory dir) {
StringBuilder b = new StringBuilder();
b.append("MergeSpec:\n");
final int count = merges.size();
for(int i=0;i<count;i++) {
b.append(" ").append(1 + i).append(": ").append(merges.get(i).segString());
}
return b.toString();
}
}
/** Exception thrown if there are any problems while executing a merge. */
public static class MergeException extends RuntimeException {
private Directory dir;
/** Create a {@code MergeException}. */
public MergeException(String message, Directory dir) {
super(message);
this.dir = dir;
}
/** Create a {@code MergeException}. */
public MergeException(Throwable exc, Directory dir) {
super(exc);
this.dir = dir;
}
/** Returns the {@link Directory} of the index that hit
* the exception. */
public Directory getDirectory() {
return dir;
}
}
/** Thrown when a merge was explicitly aborted because
* {@link IndexWriter#abortMerges} was called. Normally
* this exception is privately caught and suppressed by
* {@link IndexWriter}. */
public static class MergeAbortedException extends IOException {
/** Create a {@link MergeAbortedException}. */
public MergeAbortedException() {
super("merge is aborted");
}
/** Create a {@link MergeAbortedException} with a
* specified message. */
public MergeAbortedException(String message) {
super(message);
}
}
/**
* Default ratio for compound file system usage. Set to <tt>1.0</tt>, always use
* compound file system.
*/
protected static final double DEFAULT_NO_CFS_RATIO = 1.0;
/**
* Default max segment size in order to use compound file system. Set to {@link Long#MAX_VALUE}.
*/
protected static final long DEFAULT_MAX_CFS_SEGMENT_SIZE = Long.MAX_VALUE;
/** If the size of the merge segment exceeds this ratio of
* the total index size then it will remain in
* non-compound format */
protected double noCFSRatio = DEFAULT_NO_CFS_RATIO;
/** If the size of the merged segment exceeds
* this value then it will not use compound file format. */
protected long maxCFSSegmentSize = DEFAULT_MAX_CFS_SEGMENT_SIZE;
/**
* Creates a new merge policy instance.
*/
public MergePolicy() {
this(DEFAULT_NO_CFS_RATIO, DEFAULT_MAX_CFS_SEGMENT_SIZE);
}
/**
* Creates a new merge policy instance with default settings for noCFSRatio
* and maxCFSSegmentSize. This ctor should be used by subclasses using different
* defaults than the {@link MergePolicy}
*/
protected MergePolicy(double defaultNoCFSRatio, long defaultMaxCFSSegmentSize) {
this.noCFSRatio = defaultNoCFSRatio;
this.maxCFSSegmentSize = defaultMaxCFSSegmentSize;
}
/**
* Determine what set of merge operations are now necessary on the index.
* {@link IndexWriter} calls this whenever there is a change to the segments.
* This call is always synchronized on the {@link IndexWriter} instance so
* only one thread at a time will call this method.
* @param mergeTrigger the event that triggered the merge
* @param segmentInfos
* the total set of segments in the index
* @param mergeContext the IndexWriter to find the merges on
*/
public abstract MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos, MergeContext mergeContext)
throws IOException;
/**
* Determine what set of merge operations is necessary in
* order to merge to {@code <=} the specified segment count. {@link IndexWriter} calls this when its
* {@link IndexWriter#forceMerge} method is called. This call is always
* synchronized on the {@link IndexWriter} instance so only one thread at a
* time will call this method.
* @param segmentInfos
* the total set of segments in the index
* @param maxSegmentCount
* requested maximum number of segments in the index (currently this
* is always 1)
* @param segmentsToMerge
* contains the specific SegmentInfo instances that must be merged
* away. This may be a subset of all
* SegmentInfos. If the value is True for a
* given SegmentInfo, that means this segment was
* an original segment present in the
* to-be-merged index; else, it was a segment
* produced by a cascaded merge.
* @param mergeContext the IndexWriter to find the merges on
*/
public abstract MergeSpecification findForcedMerges(
SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge, MergeContext mergeContext)
throws IOException;
/**
* Determine what set of merge operations is necessary in order to expunge all
* deletes from the index.
* @param segmentInfos
* the total set of segments in the index
* @param mergeContext the IndexWriter to find the merges on
*/
public abstract MergeSpecification findForcedDeletesMerges(
SegmentInfos segmentInfos, MergeContext mergeContext) throws IOException;
/**
* Returns true if a new segment (regardless of its origin) should use the
* compound file format. The default implementation returns <code>true</code>
* iff the size of the given mergedInfo is less or equal to
* {@link #getMaxCFSSegmentSizeMB()} and the size is less or equal to the
* TotalIndexSize * {@link #getNoCFSRatio()} otherwise <code>false</code>.
*/
public boolean useCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo, MergeContext mergeContext) throws IOException {
if (getNoCFSRatio() == 0.0) {
return false;
}
long mergedInfoSize = size(mergedInfo, mergeContext);
if (mergedInfoSize > maxCFSSegmentSize) {
return false;
}
if (getNoCFSRatio() >= 1.0) {
return true;
}
long totalSize = 0;
for (SegmentCommitInfo info : infos) {
totalSize += size(info, mergeContext);
}
return mergedInfoSize <= getNoCFSRatio() * totalSize;
}
/** Return the byte size of the provided {@link
* SegmentCommitInfo}, pro-rated by percentage of
* non-deleted documents is set. */
protected long size(SegmentCommitInfo info, MergeContext mergeContext) throws IOException {
long byteSize = info.sizeInBytes();
int delCount = mergeContext.numDeletesToMerge(info);
assert assertDelCount(delCount, info);
double delRatio = info.info.maxDoc() <= 0 ? 0.0f : (float) delCount / (float) info.info.maxDoc();
assert delRatio <= 1.0;
return (info.info.maxDoc() <= 0 ? byteSize : (long) (byteSize * (1.0 - delRatio)));
}
/**
* Asserts that the delCount for this SegmentCommitInfo is valid
*/
protected final boolean assertDelCount(int delCount, SegmentCommitInfo info) {
assert delCount >= 0: "delCount must be positive: " + delCount;
assert delCount <= info.info.maxDoc() : "delCount: " + delCount
+ " must be leq than maxDoc: " + info.info.maxDoc();
return true;
}
/** Returns true if this single info is already fully merged (has no
* pending deletes, is in the same dir as the
* writer, and matches the current compound file setting */
protected final boolean isMerged(SegmentInfos infos, SegmentCommitInfo info, MergeContext mergeContext) throws IOException {
assert mergeContext != null;
int delCount = mergeContext.numDeletesToMerge(info);
assert assertDelCount(delCount, info);
return delCount == 0 &&
useCompoundFile(infos, info, mergeContext) == info.info.getUseCompoundFile();
}
/** Returns current {@code noCFSRatio}.
*
* @see #setNoCFSRatio */
public double getNoCFSRatio() {
return noCFSRatio;
}
/** If a merged segment will be more than this percentage
* of the total size of the index, leave the segment as
* non-compound file even if compound file is enabled.
* Set to 1.0 to always use CFS regardless of merge
* size. */
public void setNoCFSRatio(double noCFSRatio) {
if (noCFSRatio < 0.0 || noCFSRatio > 1.0) {
throw new IllegalArgumentException("noCFSRatio must be 0.0 to 1.0 inclusive; got " + noCFSRatio);
}
this.noCFSRatio = noCFSRatio;
}
/** Returns the largest size allowed for a compound file segment */
public double getMaxCFSSegmentSizeMB() {
return maxCFSSegmentSize/1024/1024.;
}
/** If a merged segment will be more than this value,
* leave the segment as
* non-compound file even if compound file is enabled.
* Set this to Double.POSITIVE_INFINITY (default) and noCFSRatio to 1.0
* to always use CFS regardless of merge size. */
public void setMaxCFSSegmentSizeMB(double v) {
if (v < 0.0) {
throw new IllegalArgumentException("maxCFSSegmentSizeMB must be >=0 (got " + v + ")");
}
v *= 1024 * 1024;
this.maxCFSSegmentSize = v > Long.MAX_VALUE ? Long.MAX_VALUE : (long) v;
}
/**
* Returns true if the segment represented by the given CodecReader should be keep even if it's fully deleted.
* This is useful for testing of for instance if the merge policy implements retention policies for soft deletes.
*/
public boolean keepFullyDeletedSegment(IOSupplier<CodecReader> readerIOSupplier) throws IOException {
return false;
}
/**
* Returns the number of deletes that a merge would claim on the given segment. This method will by default return
* the sum of the del count on disk and the pending delete count. Yet, subclasses that wrap merge readers
* might modify this to reflect deletes that are carried over to the target segment in the case of soft deletes.
*
* Soft deletes all deletes to survive across merges in order to control when the soft-deleted data is claimed.
* @see IndexWriter#softUpdateDocument(Term, Iterable, Field...)
* @see IndexWriterConfig#setSoftDeletesField(String)
* @param info the segment info that identifies the segment
* @param delCount the number deleted documents for this segment
* @param readerSupplier a supplier that allows to obtain a {@link CodecReader} for this segment
*/
public int numDeletesToMerge(SegmentCommitInfo info, int delCount,
IOSupplier<CodecReader> readerSupplier) throws IOException {
return delCount;
}
/**
* Builds a String representation of the given SegmentCommitInfo instances
*/
protected final String segString(MergeContext mergeContext, Iterable<SegmentCommitInfo> infos) {
return StreamSupport.stream(infos.spliterator(), false)
.map(info -> info.toString(mergeContext.numDeletedDocs(info) - info.getDelCount()))
.collect(Collectors.joining(" "));
}
/** Print a debug message to {@link MergeContext}'s {@code
* infoStream}. */
protected final void message(String message, MergeContext mergeContext) {
if (verbose(mergeContext)) {
mergeContext.getInfoStream().message("MP", message);
}
}
/**
* Returns <code>true</code> if the info-stream is in verbose mode
* @see #message(String, MergeContext)
*/
protected final boolean verbose(MergeContext mergeContext) {
return mergeContext.getInfoStream().isEnabled("MP");
}
/**
* This interface represents the current context of the merge selection process.
* It allows to access real-time information like the currently merging segments or
* how many deletes a segment would claim back if merged. This context might be stateful
* and change during the execution of a merge policy's selection processes.
* @lucene.experimental
*/
public interface MergeContext {
/**
* Returns the number of deletes a merge would claim back if the given segment is merged.
* @see MergePolicy#numDeletesToMerge(SegmentCommitInfo, int, org.apache.lucene.util.IOSupplier)
* @param info the segment to get the number of deletes for
*/
int numDeletesToMerge(SegmentCommitInfo info) throws IOException;
/**
* Returns the number of deleted documents in the given segments.
*/
int numDeletedDocs(SegmentCommitInfo info);
/**
* Returns the info stream that can be used to log messages
*/
InfoStream getInfoStream();
/**
* Returns an unmodifiable set of segments that are currently merging.
*/
Set<SegmentCommitInfo> getMergingSegments();
}
}
| 1 | 32,008 | Can you say what exception will be thrown in that case (or add an `@throws`, below)? | apache-lucene-solr | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.