repo_id
stringlengths
0
42
file_path
stringlengths
15
97
content
stringlengths
2
2.41M
__index_level_0__
int64
0
0
bitcoin/src/leveldb
bitcoin/src/leveldb/table/filter_block.cc
// Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "table/filter_block.h" #include "leveldb/filter_policy.h" #include "util/coding.h" namespace leveldb { // See doc/table_format.md for an explanation of the filter block format. // Generate new filter every 2KB of data static const size_t kFilterBaseLg = 11; static const size_t kFilterBase = 1 << kFilterBaseLg; FilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy) : policy_(policy) {} void FilterBlockBuilder::StartBlock(uint64_t block_offset) { uint64_t filter_index = (block_offset / kFilterBase); assert(filter_index >= filter_offsets_.size()); while (filter_index > filter_offsets_.size()) { GenerateFilter(); } } void FilterBlockBuilder::AddKey(const Slice& key) { Slice k = key; start_.push_back(keys_.size()); keys_.append(k.data(), k.size()); } Slice FilterBlockBuilder::Finish() { if (!start_.empty()) { GenerateFilter(); } // Append array of per-filter offsets const uint32_t array_offset = result_.size(); for (size_t i = 0; i < filter_offsets_.size(); i++) { PutFixed32(&result_, filter_offsets_[i]); } PutFixed32(&result_, array_offset); result_.push_back(kFilterBaseLg); // Save encoding parameter in result return Slice(result_); } void FilterBlockBuilder::GenerateFilter() { const size_t num_keys = start_.size(); if (num_keys == 0) { // Fast path if there are no keys for this filter filter_offsets_.push_back(result_.size()); return; } // Make list of keys from flattened key structure start_.push_back(keys_.size()); // Simplify length computation tmp_keys_.resize(num_keys); for (size_t i = 0; i < num_keys; i++) { const char* base = keys_.data() + start_[i]; size_t length = start_[i + 1] - start_[i]; tmp_keys_[i] = Slice(base, length); } // Generate filter for current set of keys and append to result_. filter_offsets_.push_back(result_.size()); policy_->CreateFilter(&tmp_keys_[0], static_cast<int>(num_keys), &result_); tmp_keys_.clear(); keys_.clear(); start_.clear(); } FilterBlockReader::FilterBlockReader(const FilterPolicy* policy, const Slice& contents) : policy_(policy), data_(nullptr), offset_(nullptr), num_(0), base_lg_(0) { size_t n = contents.size(); if (n < 5) return; // 1 byte for base_lg_ and 4 for start of offset array base_lg_ = contents[n - 1]; uint32_t last_word = DecodeFixed32(contents.data() + n - 5); if (last_word > n - 5) return; data_ = contents.data(); offset_ = data_ + last_word; num_ = (n - 5 - last_word) / 4; } bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) { uint64_t index = block_offset >> base_lg_; if (index < num_) { uint32_t start = DecodeFixed32(offset_ + index * 4); uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4); if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) { Slice filter = Slice(data_ + start, limit - start); return policy_->KeyMayMatch(key, filter); } else if (start == limit) { // Empty filters do not match any keys return false; } } return true; // Errors are treated as potential matches } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/table/block.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_TABLE_BLOCK_H_ #define STORAGE_LEVELDB_TABLE_BLOCK_H_ #include <stddef.h> #include <stdint.h> #include "leveldb/iterator.h" namespace leveldb { struct BlockContents; class Comparator; class Block { public: // Initialize the block with the specified contents. explicit Block(const BlockContents& contents); Block(const Block&) = delete; Block& operator=(const Block&) = delete; ~Block(); size_t size() const { return size_; } Iterator* NewIterator(const Comparator* comparator); private: class Iter; uint32_t NumRestarts() const; const char* data_; size_t size_; uint32_t restart_offset_; // Offset in data_ of restart array bool owned_; // Block owns data_[] }; } // namespace leveldb #endif // STORAGE_LEVELDB_TABLE_BLOCK_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/table/format.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "table/format.h" #include "leveldb/env.h" #include "port/port.h" #include "table/block.h" #include "util/coding.h" #include "util/crc32c.h" namespace leveldb { void BlockHandle::EncodeTo(std::string* dst) const { // Sanity check that all fields have been set assert(offset_ != ~static_cast<uint64_t>(0)); assert(size_ != ~static_cast<uint64_t>(0)); PutVarint64(dst, offset_); PutVarint64(dst, size_); } Status BlockHandle::DecodeFrom(Slice* input) { if (GetVarint64(input, &offset_) && GetVarint64(input, &size_)) { return Status::OK(); } else { return Status::Corruption("bad block handle"); } } void Footer::EncodeTo(std::string* dst) const { const size_t original_size = dst->size(); metaindex_handle_.EncodeTo(dst); index_handle_.EncodeTo(dst); dst->resize(2 * BlockHandle::kMaxEncodedLength); // Padding PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber & 0xffffffffu)); PutFixed32(dst, static_cast<uint32_t>(kTableMagicNumber >> 32)); assert(dst->size() == original_size + kEncodedLength); (void)original_size; // Disable unused variable warning. } Status Footer::DecodeFrom(Slice* input) { const char* magic_ptr = input->data() + kEncodedLength - 8; const uint32_t magic_lo = DecodeFixed32(magic_ptr); const uint32_t magic_hi = DecodeFixed32(magic_ptr + 4); const uint64_t magic = ((static_cast<uint64_t>(magic_hi) << 32) | (static_cast<uint64_t>(magic_lo))); if (magic != kTableMagicNumber) { return Status::Corruption("not an sstable (bad magic number)"); } Status result = metaindex_handle_.DecodeFrom(input); if (result.ok()) { result = index_handle_.DecodeFrom(input); } if (result.ok()) { // We skip over any leftover data (just padding for now) in "input" const char* end = magic_ptr + 8; *input = Slice(end, input->data() + input->size() - end); } return result; } Status ReadBlock(RandomAccessFile* file, const ReadOptions& options, const BlockHandle& handle, BlockContents* result) { result->data = Slice(); result->cachable = false; result->heap_allocated = false; // Read the block contents as well as the type/crc footer. // See table_builder.cc for the code that built this structure. size_t n = static_cast<size_t>(handle.size()); char* buf = new char[n + kBlockTrailerSize]; Slice contents; Status s = file->Read(handle.offset(), n + kBlockTrailerSize, &contents, buf); if (!s.ok()) { delete[] buf; return s; } if (contents.size() != n + kBlockTrailerSize) { delete[] buf; return Status::Corruption("truncated block read", file->GetName()); } // Check the crc of the type and the block contents const char* data = contents.data(); // Pointer to where Read put the data if (options.verify_checksums) { const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1)); const uint32_t actual = crc32c::Value(data, n + 1); if (actual != crc) { delete[] buf; s = Status::Corruption("block checksum mismatch", file->GetName()); return s; } } switch (data[n]) { case kNoCompression: if (data != buf) { // File implementation gave us pointer to some other data. // Use it directly under the assumption that it will be live // while the file is open. delete[] buf; result->data = Slice(data, n); result->heap_allocated = false; result->cachable = false; // Do not double-cache } else { result->data = Slice(buf, n); result->heap_allocated = true; result->cachable = true; } // Ok break; case kSnappyCompression: { size_t ulength = 0; if (!port::Snappy_GetUncompressedLength(data, n, &ulength)) { delete[] buf; return Status::Corruption("corrupted compressed block contents", file->GetName()); } char* ubuf = new char[ulength]; if (!port::Snappy_Uncompress(data, n, ubuf)) { delete[] buf; delete[] ubuf; return Status::Corruption("corrupted compressed block contents", file->GetName()); } delete[] buf; result->data = Slice(ubuf, ulength); result->heap_allocated = true; result->cachable = true; break; } default: delete[] buf; return Status::Corruption("bad block type", file->GetName()); } return Status::OK(); } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/table/filter_block.h
// Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // A filter block is stored near the end of a Table file. It contains // filters (e.g., bloom filters) for all data blocks in the table combined // into a single filter block. #ifndef STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_ #define STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_ #include <stddef.h> #include <stdint.h> #include <string> #include <vector> #include "leveldb/slice.h" #include "util/hash.h" namespace leveldb { class FilterPolicy; // A FilterBlockBuilder is used to construct all of the filters for a // particular Table. It generates a single string which is stored as // a special block in the Table. // // The sequence of calls to FilterBlockBuilder must match the regexp: // (StartBlock AddKey*)* Finish class FilterBlockBuilder { public: explicit FilterBlockBuilder(const FilterPolicy*); FilterBlockBuilder(const FilterBlockBuilder&) = delete; FilterBlockBuilder& operator=(const FilterBlockBuilder&) = delete; void StartBlock(uint64_t block_offset); void AddKey(const Slice& key); Slice Finish(); private: void GenerateFilter(); const FilterPolicy* policy_; std::string keys_; // Flattened key contents std::vector<size_t> start_; // Starting index in keys_ of each key std::string result_; // Filter data computed so far std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument std::vector<uint32_t> filter_offsets_; }; class FilterBlockReader { public: // REQUIRES: "contents" and *policy must stay live while *this is live. FilterBlockReader(const FilterPolicy* policy, const Slice& contents); bool KeyMayMatch(uint64_t block_offset, const Slice& key); private: const FilterPolicy* policy_; const char* data_; // Pointer to filter data (at block-start) const char* offset_; // Pointer to beginning of offset array (at block-end) size_t num_; // Number of entries in offset array size_t base_lg_; // Encoding parameter (see kFilterBaseLg in .cc file) }; } // namespace leveldb #endif // STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/table/two_level_iterator.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_TABLE_TWO_LEVEL_ITERATOR_H_ #define STORAGE_LEVELDB_TABLE_TWO_LEVEL_ITERATOR_H_ #include "leveldb/iterator.h" namespace leveldb { struct ReadOptions; // Return a new two level iterator. A two-level iterator contains an // index iterator whose values point to a sequence of blocks where // each block is itself a sequence of key,value pairs. The returned // two-level iterator yields the concatenation of all key/value pairs // in the sequence of blocks. Takes ownership of "index_iter" and // will delete it when no longer needed. // // Uses a supplied function to convert an index_iter value into // an iterator over the contents of the corresponding block. Iterator* NewTwoLevelIterator( Iterator* index_iter, Iterator* (*block_function)(void* arg, const ReadOptions& options, const Slice& index_value), void* arg, const ReadOptions& options); } // namespace leveldb #endif // STORAGE_LEVELDB_TABLE_TWO_LEVEL_ITERATOR_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/table/format.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_ #define STORAGE_LEVELDB_TABLE_FORMAT_H_ #include <stdint.h> #include <string> #include "leveldb/slice.h" #include "leveldb/status.h" #include "leveldb/table_builder.h" namespace leveldb { class Block; class RandomAccessFile; struct ReadOptions; // BlockHandle is a pointer to the extent of a file that stores a data // block or a meta block. class BlockHandle { public: // Maximum encoding length of a BlockHandle enum { kMaxEncodedLength = 10 + 10 }; BlockHandle(); // The offset of the block in the file. uint64_t offset() const { return offset_; } void set_offset(uint64_t offset) { offset_ = offset; } // The size of the stored block uint64_t size() const { return size_; } void set_size(uint64_t size) { size_ = size; } void EncodeTo(std::string* dst) const; Status DecodeFrom(Slice* input); private: uint64_t offset_; uint64_t size_; }; // Footer encapsulates the fixed information stored at the tail // end of every table file. class Footer { public: // Encoded length of a Footer. Note that the serialization of a // Footer will always occupy exactly this many bytes. It consists // of two block handles and a magic number. enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 }; Footer() = default; // The block handle for the metaindex block of the table const BlockHandle& metaindex_handle() const { return metaindex_handle_; } void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; } // The block handle for the index block of the table const BlockHandle& index_handle() const { return index_handle_; } void set_index_handle(const BlockHandle& h) { index_handle_ = h; } void EncodeTo(std::string* dst) const; Status DecodeFrom(Slice* input); private: BlockHandle metaindex_handle_; BlockHandle index_handle_; }; // kTableMagicNumber was picked by running // echo http://code.google.com/p/leveldb/ | sha1sum // and taking the leading 64 bits. static const uint64_t kTableMagicNumber = 0xdb4775248b80fb57ull; // 1-byte type + 32-bit crc static const size_t kBlockTrailerSize = 5; struct BlockContents { Slice data; // Actual contents of data bool cachable; // True iff data can be cached bool heap_allocated; // True iff caller should delete[] data.data() }; // Read the block identified by "handle" from "file". On failure // return non-OK. On success fill *result and return OK. Status ReadBlock(RandomAccessFile* file, const ReadOptions& options, const BlockHandle& handle, BlockContents* result); // Implementation details follow. Clients should ignore, inline BlockHandle::BlockHandle() : offset_(~static_cast<uint64_t>(0)), size_(~static_cast<uint64_t>(0)) {} } // namespace leveldb #endif // STORAGE_LEVELDB_TABLE_FORMAT_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/table/table_test.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "leveldb/table.h" #include <map> #include <string> #include "db/dbformat.h" #include "db/memtable.h" #include "db/write_batch_internal.h" #include "leveldb/db.h" #include "leveldb/env.h" #include "leveldb/iterator.h" #include "leveldb/table_builder.h" #include "table/block.h" #include "table/block_builder.h" #include "table/format.h" #include "util/random.h" #include "util/testharness.h" #include "util/testutil.h" namespace leveldb { // Return reverse of "key". // Used to test non-lexicographic comparators. static std::string Reverse(const Slice& key) { std::string str(key.ToString()); std::string rev(""); for (std::string::reverse_iterator rit = str.rbegin(); rit != str.rend(); ++rit) { rev.push_back(*rit); } return rev; } namespace { class ReverseKeyComparator : public Comparator { public: const char* Name() const override { return "leveldb.ReverseBytewiseComparator"; } int Compare(const Slice& a, const Slice& b) const override { return BytewiseComparator()->Compare(Reverse(a), Reverse(b)); } void FindShortestSeparator(std::string* start, const Slice& limit) const override { std::string s = Reverse(*start); std::string l = Reverse(limit); BytewiseComparator()->FindShortestSeparator(&s, l); *start = Reverse(s); } void FindShortSuccessor(std::string* key) const override { std::string s = Reverse(*key); BytewiseComparator()->FindShortSuccessor(&s); *key = Reverse(s); } }; } // namespace static ReverseKeyComparator reverse_key_comparator; static void Increment(const Comparator* cmp, std::string* key) { if (cmp == BytewiseComparator()) { key->push_back('\0'); } else { assert(cmp == &reverse_key_comparator); std::string rev = Reverse(*key); rev.push_back('\0'); *key = Reverse(rev); } } // An STL comparator that uses a Comparator namespace { struct STLLessThan { const Comparator* cmp; STLLessThan() : cmp(BytewiseComparator()) {} STLLessThan(const Comparator* c) : cmp(c) {} bool operator()(const std::string& a, const std::string& b) const { return cmp->Compare(Slice(a), Slice(b)) < 0; } }; } // namespace class StringSink : public WritableFile { public: ~StringSink() override = default; const std::string& contents() const { return contents_; } Status Close() override { return Status::OK(); } Status Flush() override { return Status::OK(); } Status Sync() override { return Status::OK(); } Status Append(const Slice& data) override { contents_.append(data.data(), data.size()); return Status::OK(); } std::string GetName() const override { return ""; } private: std::string contents_; }; class StringSource : public RandomAccessFile { public: StringSource(const Slice& contents) : contents_(contents.data(), contents.size()) {} ~StringSource() override = default; uint64_t Size() const { return contents_.size(); } Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const override { if (offset >= contents_.size()) { return Status::InvalidArgument("invalid Read offset"); } if (offset + n > contents_.size()) { n = contents_.size() - offset; } memcpy(scratch, &contents_[offset], n); *result = Slice(scratch, n); return Status::OK(); } std::string GetName() const { return ""; } private: std::string contents_; }; typedef std::map<std::string, std::string, STLLessThan> KVMap; // Helper class for tests to unify the interface between // BlockBuilder/TableBuilder and Block/Table. class Constructor { public: explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) {} virtual ~Constructor() = default; void Add(const std::string& key, const Slice& value) { data_[key] = value.ToString(); } // Finish constructing the data structure with all the keys that have // been added so far. Returns the keys in sorted order in "*keys" // and stores the key/value pairs in "*kvmap" void Finish(const Options& options, std::vector<std::string>* keys, KVMap* kvmap) { *kvmap = data_; keys->clear(); for (const auto& kvp : data_) { keys->push_back(kvp.first); } data_.clear(); Status s = FinishImpl(options, *kvmap); ASSERT_TRUE(s.ok()) << s.ToString(); } // Construct the data structure from the data in "data" virtual Status FinishImpl(const Options& options, const KVMap& data) = 0; virtual Iterator* NewIterator() const = 0; const KVMap& data() const { return data_; } virtual DB* db() const { return nullptr; } // Overridden in DBConstructor private: KVMap data_; }; class BlockConstructor : public Constructor { public: explicit BlockConstructor(const Comparator* cmp) : Constructor(cmp), comparator_(cmp), block_(nullptr) {} ~BlockConstructor() override { delete block_; } Status FinishImpl(const Options& options, const KVMap& data) override { delete block_; block_ = nullptr; BlockBuilder builder(&options); for (const auto& kvp : data) { builder.Add(kvp.first, kvp.second); } // Open the block data_ = builder.Finish().ToString(); BlockContents contents; contents.data = data_; contents.cachable = false; contents.heap_allocated = false; block_ = new Block(contents); return Status::OK(); } Iterator* NewIterator() const override { return block_->NewIterator(comparator_); } private: const Comparator* const comparator_; std::string data_; Block* block_; BlockConstructor(); }; class TableConstructor : public Constructor { public: TableConstructor(const Comparator* cmp) : Constructor(cmp), source_(nullptr), table_(nullptr) {} ~TableConstructor() override { Reset(); } Status FinishImpl(const Options& options, const KVMap& data) override { Reset(); StringSink sink; TableBuilder builder(options, &sink); for (const auto& kvp : data) { builder.Add(kvp.first, kvp.second); ASSERT_TRUE(builder.status().ok()); } Status s = builder.Finish(); ASSERT_TRUE(s.ok()) << s.ToString(); ASSERT_EQ(sink.contents().size(), builder.FileSize()); // Open the table source_ = new StringSource(sink.contents()); Options table_options; table_options.comparator = options.comparator; return Table::Open(table_options, source_, sink.contents().size(), &table_); } Iterator* NewIterator() const override { return table_->NewIterator(ReadOptions()); } uint64_t ApproximateOffsetOf(const Slice& key) const { return table_->ApproximateOffsetOf(key); } private: void Reset() { delete table_; delete source_; table_ = nullptr; source_ = nullptr; } StringSource* source_; Table* table_; TableConstructor(); }; // A helper class that converts internal format keys into user keys class KeyConvertingIterator : public Iterator { public: explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) {} KeyConvertingIterator(const KeyConvertingIterator&) = delete; KeyConvertingIterator& operator=(const KeyConvertingIterator&) = delete; ~KeyConvertingIterator() override { delete iter_; } bool Valid() const override { return iter_->Valid(); } void Seek(const Slice& target) override { ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue); std::string encoded; AppendInternalKey(&encoded, ikey); iter_->Seek(encoded); } void SeekToFirst() override { iter_->SeekToFirst(); } void SeekToLast() override { iter_->SeekToLast(); } void Next() override { iter_->Next(); } void Prev() override { iter_->Prev(); } Slice key() const override { assert(Valid()); ParsedInternalKey key; if (!ParseInternalKey(iter_->key(), &key)) { status_ = Status::Corruption("malformed internal key"); return Slice("corrupted key"); } return key.user_key; } Slice value() const override { return iter_->value(); } Status status() const override { return status_.ok() ? iter_->status() : status_; } private: mutable Status status_; Iterator* iter_; }; class MemTableConstructor : public Constructor { public: explicit MemTableConstructor(const Comparator* cmp) : Constructor(cmp), internal_comparator_(cmp) { memtable_ = new MemTable(internal_comparator_); memtable_->Ref(); } ~MemTableConstructor() override { memtable_->Unref(); } Status FinishImpl(const Options& options, const KVMap& data) override { memtable_->Unref(); memtable_ = new MemTable(internal_comparator_); memtable_->Ref(); int seq = 1; for (const auto& kvp : data) { memtable_->Add(seq, kTypeValue, kvp.first, kvp.second); seq++; } return Status::OK(); } Iterator* NewIterator() const override { return new KeyConvertingIterator(memtable_->NewIterator()); } private: const InternalKeyComparator internal_comparator_; MemTable* memtable_; }; class DBConstructor : public Constructor { public: explicit DBConstructor(const Comparator* cmp) : Constructor(cmp), comparator_(cmp) { db_ = nullptr; NewDB(); } ~DBConstructor() override { delete db_; } Status FinishImpl(const Options& options, const KVMap& data) override { delete db_; db_ = nullptr; NewDB(); for (const auto& kvp : data) { WriteBatch batch; batch.Put(kvp.first, kvp.second); ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok()); } return Status::OK(); } Iterator* NewIterator() const override { return db_->NewIterator(ReadOptions()); } DB* db() const override { return db_; } private: void NewDB() { std::string name = test::TmpDir() + "/table_testdb"; Options options; options.comparator = comparator_; Status status = DestroyDB(name, options); ASSERT_TRUE(status.ok()) << status.ToString(); options.create_if_missing = true; options.error_if_exists = true; options.write_buffer_size = 10000; // Something small to force merging status = DB::Open(options, name, &db_); ASSERT_TRUE(status.ok()) << status.ToString(); } const Comparator* const comparator_; DB* db_; }; enum TestType { TABLE_TEST, BLOCK_TEST, MEMTABLE_TEST, DB_TEST }; struct TestArgs { TestType type; bool reverse_compare; int restart_interval; }; static const TestArgs kTestArgList[] = { {TABLE_TEST, false, 16}, {TABLE_TEST, false, 1}, {TABLE_TEST, false, 1024}, {TABLE_TEST, true, 16}, {TABLE_TEST, true, 1}, {TABLE_TEST, true, 1024}, {BLOCK_TEST, false, 16}, {BLOCK_TEST, false, 1}, {BLOCK_TEST, false, 1024}, {BLOCK_TEST, true, 16}, {BLOCK_TEST, true, 1}, {BLOCK_TEST, true, 1024}, // Restart interval does not matter for memtables {MEMTABLE_TEST, false, 16}, {MEMTABLE_TEST, true, 16}, // Do not bother with restart interval variations for DB {DB_TEST, false, 16}, {DB_TEST, true, 16}, }; static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]); class Harness { public: Harness() : constructor_(nullptr) {} void Init(const TestArgs& args) { delete constructor_; constructor_ = nullptr; options_ = Options(); options_.block_restart_interval = args.restart_interval; // Use shorter block size for tests to exercise block boundary // conditions more. options_.block_size = 256; if (args.reverse_compare) { options_.comparator = &reverse_key_comparator; } switch (args.type) { case TABLE_TEST: constructor_ = new TableConstructor(options_.comparator); break; case BLOCK_TEST: constructor_ = new BlockConstructor(options_.comparator); break; case MEMTABLE_TEST: constructor_ = new MemTableConstructor(options_.comparator); break; case DB_TEST: constructor_ = new DBConstructor(options_.comparator); break; } } ~Harness() { delete constructor_; } void Add(const std::string& key, const std::string& value) { constructor_->Add(key, value); } void Test(Random* rnd) { std::vector<std::string> keys; KVMap data; constructor_->Finish(options_, &keys, &data); TestForwardScan(keys, data); TestBackwardScan(keys, data); TestRandomAccess(rnd, keys, data); } void TestForwardScan(const std::vector<std::string>& keys, const KVMap& data) { Iterator* iter = constructor_->NewIterator(); ASSERT_TRUE(!iter->Valid()); iter->SeekToFirst(); for (KVMap::const_iterator model_iter = data.begin(); model_iter != data.end(); ++model_iter) { ASSERT_EQ(ToString(data, model_iter), ToString(iter)); iter->Next(); } ASSERT_TRUE(!iter->Valid()); delete iter; } void TestBackwardScan(const std::vector<std::string>& keys, const KVMap& data) { Iterator* iter = constructor_->NewIterator(); ASSERT_TRUE(!iter->Valid()); iter->SeekToLast(); for (KVMap::const_reverse_iterator model_iter = data.rbegin(); model_iter != data.rend(); ++model_iter) { ASSERT_EQ(ToString(data, model_iter), ToString(iter)); iter->Prev(); } ASSERT_TRUE(!iter->Valid()); delete iter; } void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys, const KVMap& data) { static const bool kVerbose = false; Iterator* iter = constructor_->NewIterator(); ASSERT_TRUE(!iter->Valid()); KVMap::const_iterator model_iter = data.begin(); if (kVerbose) fprintf(stderr, "---\n"); for (int i = 0; i < 200; i++) { const int toss = rnd->Uniform(5); switch (toss) { case 0: { if (iter->Valid()) { if (kVerbose) fprintf(stderr, "Next\n"); iter->Next(); ++model_iter; ASSERT_EQ(ToString(data, model_iter), ToString(iter)); } break; } case 1: { if (kVerbose) fprintf(stderr, "SeekToFirst\n"); iter->SeekToFirst(); model_iter = data.begin(); ASSERT_EQ(ToString(data, model_iter), ToString(iter)); break; } case 2: { std::string key = PickRandomKey(rnd, keys); model_iter = data.lower_bound(key); if (kVerbose) fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str()); iter->Seek(Slice(key)); ASSERT_EQ(ToString(data, model_iter), ToString(iter)); break; } case 3: { if (iter->Valid()) { if (kVerbose) fprintf(stderr, "Prev\n"); iter->Prev(); if (model_iter == data.begin()) { model_iter = data.end(); // Wrap around to invalid value } else { --model_iter; } ASSERT_EQ(ToString(data, model_iter), ToString(iter)); } break; } case 4: { if (kVerbose) fprintf(stderr, "SeekToLast\n"); iter->SeekToLast(); if (keys.empty()) { model_iter = data.end(); } else { std::string last = data.rbegin()->first; model_iter = data.lower_bound(last); } ASSERT_EQ(ToString(data, model_iter), ToString(iter)); break; } } } delete iter; } std::string ToString(const KVMap& data, const KVMap::const_iterator& it) { if (it == data.end()) { return "END"; } else { return "'" + it->first + "->" + it->second + "'"; } } std::string ToString(const KVMap& data, const KVMap::const_reverse_iterator& it) { if (it == data.rend()) { return "END"; } else { return "'" + it->first + "->" + it->second + "'"; } } std::string ToString(const Iterator* it) { if (!it->Valid()) { return "END"; } else { return "'" + it->key().ToString() + "->" + it->value().ToString() + "'"; } } std::string PickRandomKey(Random* rnd, const std::vector<std::string>& keys) { if (keys.empty()) { return "foo"; } else { const int index = rnd->Uniform(keys.size()); std::string result = keys[index]; switch (rnd->Uniform(3)) { case 0: // Return an existing key break; case 1: { // Attempt to return something smaller than an existing key if (!result.empty() && result[result.size() - 1] > '\0') { result[result.size() - 1]--; } break; } case 2: { // Return something larger than an existing key Increment(options_.comparator, &result); break; } } return result; } } // Returns nullptr if not running against a DB DB* db() const { return constructor_->db(); } private: Options options_; Constructor* constructor_; }; // Test empty table/block. TEST(Harness, Empty) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 1); Test(&rnd); } } // Special test for a block with no restart entries. The C++ leveldb // code never generates such blocks, but the Java version of leveldb // seems to. TEST(Harness, ZeroRestartPointsInBlock) { char data[sizeof(uint32_t)]; memset(data, 0, sizeof(data)); BlockContents contents; contents.data = Slice(data, sizeof(data)); contents.cachable = false; contents.heap_allocated = false; Block block(contents); Iterator* iter = block.NewIterator(BytewiseComparator()); iter->SeekToFirst(); ASSERT_TRUE(!iter->Valid()); iter->SeekToLast(); ASSERT_TRUE(!iter->Valid()); iter->Seek("foo"); ASSERT_TRUE(!iter->Valid()); delete iter; } // Test the empty key TEST(Harness, SimpleEmptyKey) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 1); Add("", "v"); Test(&rnd); } } TEST(Harness, SimpleSingle) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 2); Add("abc", "v"); Test(&rnd); } } TEST(Harness, SimpleMulti) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 3); Add("abc", "v"); Add("abcd", "v"); Add("ac", "v2"); Test(&rnd); } } TEST(Harness, SimpleSpecialKey) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 4); Add("\xff\xff", "v3"); Test(&rnd); } } TEST(Harness, Randomized) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 5); for (int num_entries = 0; num_entries < 2000; num_entries += (num_entries < 50 ? 1 : 200)) { if ((num_entries % 10) == 0) { fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1), int(kNumTestArgs), num_entries); } for (int e = 0; e < num_entries; e++) { std::string v; Add(test::RandomKey(&rnd, rnd.Skewed(4)), test::RandomString(&rnd, rnd.Skewed(5), &v).ToString()); } Test(&rnd); } } } TEST(Harness, RandomizedLongDB) { Random rnd(test::RandomSeed()); TestArgs args = {DB_TEST, false, 16}; Init(args); int num_entries = 100000; for (int e = 0; e < num_entries; e++) { std::string v; Add(test::RandomKey(&rnd, rnd.Skewed(4)), test::RandomString(&rnd, rnd.Skewed(5), &v).ToString()); } Test(&rnd); // We must have created enough data to force merging int files = 0; for (int level = 0; level < config::kNumLevels; level++) { std::string value; char name[100]; snprintf(name, sizeof(name), "leveldb.num-files-at-level%d", level); ASSERT_TRUE(db()->GetProperty(name, &value)); files += atoi(value.c_str()); } ASSERT_GT(files, 0); } class MemTableTest {}; TEST(MemTableTest, Simple) { InternalKeyComparator cmp(BytewiseComparator()); MemTable* memtable = new MemTable(cmp); memtable->Ref(); WriteBatch batch; WriteBatchInternal::SetSequence(&batch, 100); batch.Put(std::string("k1"), std::string("v1")); batch.Put(std::string("k2"), std::string("v2")); batch.Put(std::string("k3"), std::string("v3")); batch.Put(std::string("largekey"), std::string("vlarge")); ASSERT_TRUE(WriteBatchInternal::InsertInto(&batch, memtable).ok()); Iterator* iter = memtable->NewIterator(); iter->SeekToFirst(); while (iter->Valid()) { fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(), iter->value().ToString().c_str()); iter->Next(); } delete iter; memtable->Unref(); } static bool Between(uint64_t val, uint64_t low, uint64_t high) { bool result = (val >= low) && (val <= high); if (!result) { fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n", (unsigned long long)(val), (unsigned long long)(low), (unsigned long long)(high)); } return result; } class TableTest {}; TEST(TableTest, ApproximateOffsetOfPlain) { TableConstructor c(BytewiseComparator()); c.Add("k01", "hello"); c.Add("k02", "hello2"); c.Add("k03", std::string(10000, 'x')); c.Add("k04", std::string(200000, 'x')); c.Add("k05", std::string(300000, 'x')); c.Add("k06", "hello3"); c.Add("k07", std::string(100000, 'x')); std::vector<std::string> keys; KVMap kvmap; Options options; options.block_size = 1024; options.compression = kNoCompression; c.Finish(options, &keys, &kvmap); ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000)); } static bool SnappyCompressionSupported() { std::string out; Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; return port::Snappy_Compress(in.data(), in.size(), &out); } TEST(TableTest, ApproximateOffsetOfCompressed) { if (!SnappyCompressionSupported()) { fprintf(stderr, "skipping compression tests\n"); return; } Random rnd(301); TableConstructor c(BytewiseComparator()); std::string tmp; c.Add("k01", "hello"); c.Add("k02", test::CompressibleString(&rnd, 0.25, 10000, &tmp)); c.Add("k03", "hello3"); c.Add("k04", test::CompressibleString(&rnd, 0.25, 10000, &tmp)); std::vector<std::string> keys; KVMap kvmap; Options options; options.block_size = 1024; options.compression = kSnappyCompression; c.Finish(options, &keys, &kvmap); // Expected upper and lower bounds of space used by compressible strings. static const int kSlop = 1000; // Compressor effectiveness varies. const int expected = 2500; // 10000 * compression ratio (0.25) const int min_z = expected - kSlop; const int max_z = expected + kSlop; ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, kSlop)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, kSlop)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, kSlop)); // Have now emitted a large compressible string, so adjust expected offset. ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), min_z, max_z)); ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), min_z, max_z)); // Have now emitted two large compressible strings, so adjust expected offset. ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 2 * min_z, 2 * max_z)); } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/table/two_level_iterator.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "table/two_level_iterator.h" #include "leveldb/table.h" #include "table/block.h" #include "table/format.h" #include "table/iterator_wrapper.h" namespace leveldb { namespace { typedef Iterator* (*BlockFunction)(void*, const ReadOptions&, const Slice&); class TwoLevelIterator : public Iterator { public: TwoLevelIterator(Iterator* index_iter, BlockFunction block_function, void* arg, const ReadOptions& options); ~TwoLevelIterator() override; void Seek(const Slice& target) override; void SeekToFirst() override; void SeekToLast() override; void Next() override; void Prev() override; bool Valid() const override { return data_iter_.Valid(); } Slice key() const override { assert(Valid()); return data_iter_.key(); } Slice value() const override { assert(Valid()); return data_iter_.value(); } Status status() const override { // It'd be nice if status() returned a const Status& instead of a Status if (!index_iter_.status().ok()) { return index_iter_.status(); } else if (data_iter_.iter() != nullptr && !data_iter_.status().ok()) { return data_iter_.status(); } else { return status_; } } private: void SaveError(const Status& s) { if (status_.ok() && !s.ok()) status_ = s; } void SkipEmptyDataBlocksForward(); void SkipEmptyDataBlocksBackward(); void SetDataIterator(Iterator* data_iter); void InitDataBlock(); BlockFunction block_function_; void* arg_; const ReadOptions options_; Status status_; IteratorWrapper index_iter_; IteratorWrapper data_iter_; // May be nullptr // If data_iter_ is non-null, then "data_block_handle_" holds the // "index_value" passed to block_function_ to create the data_iter_. std::string data_block_handle_; }; TwoLevelIterator::TwoLevelIterator(Iterator* index_iter, BlockFunction block_function, void* arg, const ReadOptions& options) : block_function_(block_function), arg_(arg), options_(options), index_iter_(index_iter), data_iter_(nullptr) {} TwoLevelIterator::~TwoLevelIterator() = default; void TwoLevelIterator::Seek(const Slice& target) { index_iter_.Seek(target); InitDataBlock(); if (data_iter_.iter() != nullptr) data_iter_.Seek(target); SkipEmptyDataBlocksForward(); } void TwoLevelIterator::SeekToFirst() { index_iter_.SeekToFirst(); InitDataBlock(); if (data_iter_.iter() != nullptr) data_iter_.SeekToFirst(); SkipEmptyDataBlocksForward(); } void TwoLevelIterator::SeekToLast() { index_iter_.SeekToLast(); InitDataBlock(); if (data_iter_.iter() != nullptr) data_iter_.SeekToLast(); SkipEmptyDataBlocksBackward(); } void TwoLevelIterator::Next() { assert(Valid()); data_iter_.Next(); SkipEmptyDataBlocksForward(); } void TwoLevelIterator::Prev() { assert(Valid()); data_iter_.Prev(); SkipEmptyDataBlocksBackward(); } void TwoLevelIterator::SkipEmptyDataBlocksForward() { while (data_iter_.iter() == nullptr || !data_iter_.Valid()) { // Move to next block if (!index_iter_.Valid()) { SetDataIterator(nullptr); return; } index_iter_.Next(); InitDataBlock(); if (data_iter_.iter() != nullptr) data_iter_.SeekToFirst(); } } void TwoLevelIterator::SkipEmptyDataBlocksBackward() { while (data_iter_.iter() == nullptr || !data_iter_.Valid()) { // Move to next block if (!index_iter_.Valid()) { SetDataIterator(nullptr); return; } index_iter_.Prev(); InitDataBlock(); if (data_iter_.iter() != nullptr) data_iter_.SeekToLast(); } } void TwoLevelIterator::SetDataIterator(Iterator* data_iter) { if (data_iter_.iter() != nullptr) SaveError(data_iter_.status()); data_iter_.Set(data_iter); } void TwoLevelIterator::InitDataBlock() { if (!index_iter_.Valid()) { SetDataIterator(nullptr); } else { Slice handle = index_iter_.value(); if (data_iter_.iter() != nullptr && handle.compare(data_block_handle_) == 0) { // data_iter_ is already constructed with this iterator, so // no need to change anything } else { Iterator* iter = (*block_function_)(arg_, options_, handle); data_block_handle_.assign(handle.data(), handle.size()); SetDataIterator(iter); } } } } // namespace Iterator* NewTwoLevelIterator(Iterator* index_iter, BlockFunction block_function, void* arg, const ReadOptions& options) { return new TwoLevelIterator(index_iter, block_function, arg, options); } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/benchmarks/db_bench_sqlite3.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <sqlite3.h> #include <stdio.h> #include <stdlib.h> #include "util/histogram.h" #include "util/random.h" #include "util/testutil.h" // Comma-separated list of operations to run in the specified order // Actual benchmarks: // // fillseq -- write N values in sequential key order in async mode // fillseqsync -- write N/100 values in sequential key order in sync mode // fillseqbatch -- batch write N values in sequential key order in async mode // fillrandom -- write N values in random key order in async mode // fillrandsync -- write N/100 values in random key order in sync mode // fillrandbatch -- batch write N values in sequential key order in async mode // overwrite -- overwrite N values in random key order in async mode // fillrand100K -- write N/1000 100K values in random order in async mode // fillseq100K -- write N/1000 100K values in sequential order in async mode // readseq -- read N times sequentially // readrandom -- read N times in random order // readrand100K -- read N/1000 100K values in sequential order in async mode static const char* FLAGS_benchmarks = "fillseq," "fillseqsync," "fillseqbatch," "fillrandom," "fillrandsync," "fillrandbatch," "overwrite," "overwritebatch," "readrandom," "readseq," "fillrand100K," "fillseq100K," "readseq," "readrand100K,"; // Number of key/values to place in database static int FLAGS_num = 1000000; // Number of read operations to do. If negative, do FLAGS_num reads. static int FLAGS_reads = -1; // Size of each value static int FLAGS_value_size = 100; // Print histogram of operation timings static bool FLAGS_histogram = false; // Arrange to generate values that shrink to this fraction of // their original size after compression static double FLAGS_compression_ratio = 0.5; // Page size. Default 1 KB. static int FLAGS_page_size = 1024; // Number of pages. // Default cache size = FLAGS_page_size * FLAGS_num_pages = 4 MB. static int FLAGS_num_pages = 4096; // If true, do not destroy the existing database. If you set this // flag and also specify a benchmark that wants a fresh database, that // benchmark will fail. static bool FLAGS_use_existing_db = false; // If true, we allow batch writes to occur static bool FLAGS_transaction = true; // If true, we enable Write-Ahead Logging static bool FLAGS_WAL_enabled = true; // Use the db with the following name. static const char* FLAGS_db = nullptr; inline static void ExecErrorCheck(int status, char* err_msg) { if (status != SQLITE_OK) { fprintf(stderr, "SQL error: %s\n", err_msg); sqlite3_free(err_msg); exit(1); } } inline static void StepErrorCheck(int status) { if (status != SQLITE_DONE) { fprintf(stderr, "SQL step error: status = %d\n", status); exit(1); } } inline static void ErrorCheck(int status) { if (status != SQLITE_OK) { fprintf(stderr, "sqlite3 error: status = %d\n", status); exit(1); } } inline static void WalCheckpoint(sqlite3* db_) { // Flush all writes to disk if (FLAGS_WAL_enabled) { sqlite3_wal_checkpoint_v2(db_, nullptr, SQLITE_CHECKPOINT_FULL, nullptr, nullptr); } } namespace leveldb { // Helper for quickly generating random data. namespace { class RandomGenerator { private: std::string data_; int pos_; public: RandomGenerator() { // We use a limited amount of data over and over again and ensure // that it is larger than the compression window (32KB), and also // large enough to serve all typical value sizes we want to write. Random rnd(301); std::string piece; while (data_.size() < 1048576) { // Add a short fragment that is as compressible as specified // by FLAGS_compression_ratio. test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece); data_.append(piece); } pos_ = 0; } Slice Generate(int len) { if (pos_ + len > data_.size()) { pos_ = 0; assert(len < data_.size()); } pos_ += len; return Slice(data_.data() + pos_ - len, len); } }; static Slice TrimSpace(Slice s) { int start = 0; while (start < s.size() && isspace(s[start])) { start++; } int limit = s.size(); while (limit > start && isspace(s[limit - 1])) { limit--; } return Slice(s.data() + start, limit - start); } } // namespace class Benchmark { private: sqlite3* db_; int db_num_; int num_; int reads_; double start_; double last_op_finish_; int64_t bytes_; std::string message_; Histogram hist_; RandomGenerator gen_; Random rand_; // State kept for progress messages int done_; int next_report_; // When to report next void PrintHeader() { const int kKeySize = 16; PrintEnvironment(); fprintf(stdout, "Keys: %d bytes each\n", kKeySize); fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size); fprintf(stdout, "Entries: %d\n", num_); fprintf(stdout, "RawSize: %.1f MB (estimated)\n", ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) / 1048576.0)); PrintWarnings(); fprintf(stdout, "------------------------------------------------\n"); } void PrintWarnings() { #if defined(__GNUC__) && !defined(__OPTIMIZE__) fprintf( stdout, "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); #endif #ifndef NDEBUG fprintf(stdout, "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); #endif } void PrintEnvironment() { fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION); #if defined(__linux) time_t now = time(nullptr); fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); if (cpuinfo != nullptr) { char line[1000]; int num_cpus = 0; std::string cpu_type; std::string cache_size; while (fgets(line, sizeof(line), cpuinfo) != nullptr) { const char* sep = strchr(line, ':'); if (sep == nullptr) { continue; } Slice key = TrimSpace(Slice(line, sep - 1 - line)); Slice val = TrimSpace(Slice(sep + 1)); if (key == "model name") { ++num_cpus; cpu_type = val.ToString(); } else if (key == "cache size") { cache_size = val.ToString(); } } fclose(cpuinfo); fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str()); fprintf(stderr, "CPUCache: %s\n", cache_size.c_str()); } #endif } void Start() { start_ = Env::Default()->NowMicros() * 1e-6; bytes_ = 0; message_.clear(); last_op_finish_ = start_; hist_.Clear(); done_ = 0; next_report_ = 100; } void FinishedSingleOp() { if (FLAGS_histogram) { double now = Env::Default()->NowMicros() * 1e-6; double micros = (now - last_op_finish_) * 1e6; hist_.Add(micros); if (micros > 20000) { fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); fflush(stderr); } last_op_finish_ = now; } done_++; if (done_ >= next_report_) { if (next_report_ < 1000) next_report_ += 100; else if (next_report_ < 5000) next_report_ += 500; else if (next_report_ < 10000) next_report_ += 1000; else if (next_report_ < 50000) next_report_ += 5000; else if (next_report_ < 100000) next_report_ += 10000; else if (next_report_ < 500000) next_report_ += 50000; else next_report_ += 100000; fprintf(stderr, "... finished %d ops%30s\r", done_, ""); fflush(stderr); } } void Stop(const Slice& name) { double finish = Env::Default()->NowMicros() * 1e-6; // Pretend at least one op was done in case we are running a benchmark // that does not call FinishedSingleOp(). if (done_ < 1) done_ = 1; if (bytes_ > 0) { char rate[100]; snprintf(rate, sizeof(rate), "%6.1f MB/s", (bytes_ / 1048576.0) / (finish - start_)); if (!message_.empty()) { message_ = std::string(rate) + " " + message_; } else { message_ = rate; } } fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(), (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "), message_.c_str()); if (FLAGS_histogram) { fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); } fflush(stdout); } public: enum Order { SEQUENTIAL, RANDOM }; enum DBState { FRESH, EXISTING }; Benchmark() : db_(nullptr), db_num_(0), num_(FLAGS_num), reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), bytes_(0), rand_(301) { std::vector<std::string> files; std::string test_dir; Env::Default()->GetTestDirectory(&test_dir); Env::Default()->GetChildren(test_dir, &files); if (!FLAGS_use_existing_db) { for (int i = 0; i < files.size(); i++) { if (Slice(files[i]).starts_with("dbbench_sqlite3")) { std::string file_name(test_dir); file_name += "/"; file_name += files[i]; Env::Default()->DeleteFile(file_name.c_str()); } } } } ~Benchmark() { int status = sqlite3_close(db_); ErrorCheck(status); } void Run() { PrintHeader(); Open(); const char* benchmarks = FLAGS_benchmarks; while (benchmarks != nullptr) { const char* sep = strchr(benchmarks, ','); Slice name; if (sep == nullptr) { name = benchmarks; benchmarks = nullptr; } else { name = Slice(benchmarks, sep - benchmarks); benchmarks = sep + 1; } bytes_ = 0; Start(); bool known = true; bool write_sync = false; if (name == Slice("fillseq")) { Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1); WalCheckpoint(db_); } else if (name == Slice("fillseqbatch")) { Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1000); WalCheckpoint(db_); } else if (name == Slice("fillrandom")) { Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1); WalCheckpoint(db_); } else if (name == Slice("fillrandbatch")) { Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1000); WalCheckpoint(db_); } else if (name == Slice("overwrite")) { Write(write_sync, RANDOM, EXISTING, num_, FLAGS_value_size, 1); WalCheckpoint(db_); } else if (name == Slice("overwritebatch")) { Write(write_sync, RANDOM, EXISTING, num_, FLAGS_value_size, 1000); WalCheckpoint(db_); } else if (name == Slice("fillrandsync")) { write_sync = true; Write(write_sync, RANDOM, FRESH, num_ / 100, FLAGS_value_size, 1); WalCheckpoint(db_); } else if (name == Slice("fillseqsync")) { write_sync = true; Write(write_sync, SEQUENTIAL, FRESH, num_ / 100, FLAGS_value_size, 1); WalCheckpoint(db_); } else if (name == Slice("fillrand100K")) { Write(write_sync, RANDOM, FRESH, num_ / 1000, 100 * 1000, 1); WalCheckpoint(db_); } else if (name == Slice("fillseq100K")) { Write(write_sync, SEQUENTIAL, FRESH, num_ / 1000, 100 * 1000, 1); WalCheckpoint(db_); } else if (name == Slice("readseq")) { ReadSequential(); } else if (name == Slice("readrandom")) { Read(RANDOM, 1); } else if (name == Slice("readrand100K")) { int n = reads_; reads_ /= 1000; Read(RANDOM, 1); reads_ = n; } else { known = false; if (name != Slice()) { // No error message for empty name fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str()); } } if (known) { Stop(name); } } } void Open() { assert(db_ == nullptr); int status; char file_name[100]; char* err_msg = nullptr; db_num_++; // Open database std::string tmp_dir; Env::Default()->GetTestDirectory(&tmp_dir); snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db", tmp_dir.c_str(), db_num_); status = sqlite3_open(file_name, &db_); if (status) { fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_)); exit(1); } // Change SQLite cache size char cache_size[100]; snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d", FLAGS_num_pages); status = sqlite3_exec(db_, cache_size, nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); // FLAGS_page_size is defaulted to 1024 if (FLAGS_page_size != 1024) { char page_size[100]; snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d", FLAGS_page_size); status = sqlite3_exec(db_, page_size, nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); } // Change journal mode to WAL if WAL enabled flag is on if (FLAGS_WAL_enabled) { std::string WAL_stmt = "PRAGMA journal_mode = WAL"; // LevelDB's default cache size is a combined 4 MB std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096"; status = sqlite3_exec(db_, WAL_stmt.c_str(), nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); status = sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); } // Change locking mode to exclusive and create tables/index for database std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE"; std::string create_stmt = "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))"; std::string stmt_array[] = {locking_stmt, create_stmt}; int stmt_array_length = sizeof(stmt_array) / sizeof(std::string); for (int i = 0; i < stmt_array_length; i++) { status = sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); } } void Write(bool write_sync, Order order, DBState state, int num_entries, int value_size, int entries_per_batch) { // Create new database if state == FRESH if (state == FRESH) { if (FLAGS_use_existing_db) { message_ = "skipping (--use_existing_db is true)"; return; } sqlite3_close(db_); db_ = nullptr; Open(); Start(); } if (num_entries != num_) { char msg[100]; snprintf(msg, sizeof(msg), "(%d ops)", num_entries); message_ = msg; } char* err_msg = nullptr; int status; sqlite3_stmt *replace_stmt, *begin_trans_stmt, *end_trans_stmt; std::string replace_str = "REPLACE INTO test (key, value) VALUES (?, ?)"; std::string begin_trans_str = "BEGIN TRANSACTION;"; std::string end_trans_str = "END TRANSACTION;"; // Check for synchronous flag in options std::string sync_stmt = (write_sync) ? "PRAGMA synchronous = FULL" : "PRAGMA synchronous = OFF"; status = sqlite3_exec(db_, sync_stmt.c_str(), nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); // Preparing sqlite3 statements status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1, &replace_stmt, nullptr); ErrorCheck(status); status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1, &begin_trans_stmt, nullptr); ErrorCheck(status); status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt, nullptr); ErrorCheck(status); bool transaction = (entries_per_batch > 1); for (int i = 0; i < num_entries; i += entries_per_batch) { // Begin write transaction if (FLAGS_transaction && transaction) { status = sqlite3_step(begin_trans_stmt); StepErrorCheck(status); status = sqlite3_reset(begin_trans_stmt); ErrorCheck(status); } // Create and execute SQL statements for (int j = 0; j < entries_per_batch; j++) { const char* value = gen_.Generate(value_size).data(); // Create values for key-value pair const int k = (order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries); char key[100]; snprintf(key, sizeof(key), "%016d", k); // Bind KV values into replace_stmt status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC); ErrorCheck(status); status = sqlite3_bind_blob(replace_stmt, 2, value, value_size, SQLITE_STATIC); ErrorCheck(status); // Execute replace_stmt bytes_ += value_size + strlen(key); status = sqlite3_step(replace_stmt); StepErrorCheck(status); // Reset SQLite statement for another use status = sqlite3_clear_bindings(replace_stmt); ErrorCheck(status); status = sqlite3_reset(replace_stmt); ErrorCheck(status); FinishedSingleOp(); } // End write transaction if (FLAGS_transaction && transaction) { status = sqlite3_step(end_trans_stmt); StepErrorCheck(status); status = sqlite3_reset(end_trans_stmt); ErrorCheck(status); } } status = sqlite3_finalize(replace_stmt); ErrorCheck(status); status = sqlite3_finalize(begin_trans_stmt); ErrorCheck(status); status = sqlite3_finalize(end_trans_stmt); ErrorCheck(status); } void Read(Order order, int entries_per_batch) { int status; sqlite3_stmt *read_stmt, *begin_trans_stmt, *end_trans_stmt; std::string read_str = "SELECT * FROM test WHERE key = ?"; std::string begin_trans_str = "BEGIN TRANSACTION;"; std::string end_trans_str = "END TRANSACTION;"; // Preparing sqlite3 statements status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1, &begin_trans_stmt, nullptr); ErrorCheck(status); status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt, nullptr); ErrorCheck(status); status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, nullptr); ErrorCheck(status); bool transaction = (entries_per_batch > 1); for (int i = 0; i < reads_; i += entries_per_batch) { // Begin read transaction if (FLAGS_transaction && transaction) { status = sqlite3_step(begin_trans_stmt); StepErrorCheck(status); status = sqlite3_reset(begin_trans_stmt); ErrorCheck(status); } // Create and execute SQL statements for (int j = 0; j < entries_per_batch; j++) { // Create key value char key[100]; int k = (order == SEQUENTIAL) ? i + j : (rand_.Next() % reads_); snprintf(key, sizeof(key), "%016d", k); // Bind key value into read_stmt status = sqlite3_bind_blob(read_stmt, 1, key, 16, SQLITE_STATIC); ErrorCheck(status); // Execute read statement while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) { } StepErrorCheck(status); // Reset SQLite statement for another use status = sqlite3_clear_bindings(read_stmt); ErrorCheck(status); status = sqlite3_reset(read_stmt); ErrorCheck(status); FinishedSingleOp(); } // End read transaction if (FLAGS_transaction && transaction) { status = sqlite3_step(end_trans_stmt); StepErrorCheck(status); status = sqlite3_reset(end_trans_stmt); ErrorCheck(status); } } status = sqlite3_finalize(read_stmt); ErrorCheck(status); status = sqlite3_finalize(begin_trans_stmt); ErrorCheck(status); status = sqlite3_finalize(end_trans_stmt); ErrorCheck(status); } void ReadSequential() { int status; sqlite3_stmt* pStmt; std::string read_str = "SELECT * FROM test ORDER BY key"; status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, nullptr); ErrorCheck(status); for (int i = 0; i < reads_ && SQLITE_ROW == sqlite3_step(pStmt); i++) { bytes_ += sqlite3_column_bytes(pStmt, 1) + sqlite3_column_bytes(pStmt, 2); FinishedSingleOp(); } status = sqlite3_finalize(pStmt); ErrorCheck(status); } }; } // namespace leveldb int main(int argc, char** argv) { std::string default_db_path; for (int i = 1; i < argc; i++) { double d; int n; char junk; if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) { FLAGS_benchmarks = argv[i] + strlen("--benchmarks="); } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_histogram = n; } else if (sscanf(argv[i], "--compression_ratio=%lf%c", &d, &junk) == 1) { FLAGS_compression_ratio = d; } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_use_existing_db = n; } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) { FLAGS_num = n; } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) { FLAGS_reads = n; } else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) { FLAGS_value_size = n; } else if (leveldb::Slice(argv[i]) == leveldb::Slice("--no_transaction")) { FLAGS_transaction = false; } else if (sscanf(argv[i], "--page_size=%d%c", &n, &junk) == 1) { FLAGS_page_size = n; } else if (sscanf(argv[i], "--num_pages=%d%c", &n, &junk) == 1) { FLAGS_num_pages = n; } else if (sscanf(argv[i], "--WAL_enabled=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_WAL_enabled = n; } else if (strncmp(argv[i], "--db=", 5) == 0) { FLAGS_db = argv[i] + 5; } else { fprintf(stderr, "Invalid flag '%s'\n", argv[i]); exit(1); } } // Choose a location for the test database if none given with --db=<path> if (FLAGS_db == nullptr) { leveldb::Env::Default()->GetTestDirectory(&default_db_path); default_db_path += "/dbbench"; FLAGS_db = default_db_path.c_str(); } leveldb::Benchmark benchmark; benchmark.Run(); return 0; }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/benchmarks/db_bench.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include "leveldb/cache.h" #include "leveldb/db.h" #include "leveldb/env.h" #include "leveldb/filter_policy.h" #include "leveldb/write_batch.h" #include "port/port.h" #include "util/crc32c.h" #include "util/histogram.h" #include "util/mutexlock.h" #include "util/random.h" #include "util/testutil.h" // Comma-separated list of operations to run in the specified order // Actual benchmarks: // fillseq -- write N values in sequential key order in async mode // fillrandom -- write N values in random key order in async mode // overwrite -- overwrite N values in random key order in async mode // fillsync -- write N/100 values in random key order in sync mode // fill100K -- write N/1000 100K values in random order in async mode // deleteseq -- delete N keys in sequential order // deleterandom -- delete N keys in random order // readseq -- read N times sequentially // readreverse -- read N times in reverse order // readrandom -- read N times in random order // readmissing -- read N missing keys in random order // readhot -- read N times in random order from 1% section of DB // seekrandom -- N random seeks // open -- cost of opening a DB // crc32c -- repeated crc32c of 4K of data // Meta operations: // compact -- Compact the entire DB // stats -- Print DB stats // sstables -- Print sstable info // heapprofile -- Dump a heap profile (if supported by this port) static const char* FLAGS_benchmarks = "fillseq," "fillsync," "fillrandom," "overwrite," "readrandom," "readrandom," // Extra run to allow previous compactions to quiesce "readseq," "readreverse," "compact," "readrandom," "readseq," "readreverse," "fill100K," "crc32c," "snappycomp," "snappyuncomp,"; // Number of key/values to place in database static int FLAGS_num = 1000000; // Number of read operations to do. If negative, do FLAGS_num reads. static int FLAGS_reads = -1; // Number of concurrent threads to run. static int FLAGS_threads = 1; // Size of each value static int FLAGS_value_size = 100; // Arrange to generate values that shrink to this fraction of // their original size after compression static double FLAGS_compression_ratio = 0.5; // Print histogram of operation timings static bool FLAGS_histogram = false; // Number of bytes to buffer in memtable before compacting // (initialized to default value by "main") static int FLAGS_write_buffer_size = 0; // Number of bytes written to each file. // (initialized to default value by "main") static int FLAGS_max_file_size = 0; // Approximate size of user data packed per block (before compression. // (initialized to default value by "main") static int FLAGS_block_size = 0; // Number of bytes to use as a cache of uncompressed data. // Negative means use default settings. static int FLAGS_cache_size = -1; // Maximum number of files to keep open at the same time (use default if == 0) static int FLAGS_open_files = 0; // Bloom filter bits per key. // Negative means use default settings. static int FLAGS_bloom_bits = -1; // If true, do not destroy the existing database. If you set this // flag and also specify a benchmark that wants a fresh database, that // benchmark will fail. static bool FLAGS_use_existing_db = false; // If true, reuse existing log/MANIFEST files when re-opening a database. static bool FLAGS_reuse_logs = false; // Use the db with the following name. static const char* FLAGS_db = nullptr; namespace leveldb { namespace { leveldb::Env* g_env = nullptr; // Helper for quickly generating random data. class RandomGenerator { private: std::string data_; int pos_; public: RandomGenerator() { // We use a limited amount of data over and over again and ensure // that it is larger than the compression window (32KB), and also // large enough to serve all typical value sizes we want to write. Random rnd(301); std::string piece; while (data_.size() < 1048576) { // Add a short fragment that is as compressible as specified // by FLAGS_compression_ratio. test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece); data_.append(piece); } pos_ = 0; } Slice Generate(size_t len) { if (pos_ + len > data_.size()) { pos_ = 0; assert(len < data_.size()); } pos_ += len; return Slice(data_.data() + pos_ - len, len); } }; #if defined(__linux) static Slice TrimSpace(Slice s) { size_t start = 0; while (start < s.size() && isspace(s[start])) { start++; } size_t limit = s.size(); while (limit > start && isspace(s[limit - 1])) { limit--; } return Slice(s.data() + start, limit - start); } #endif static void AppendWithSpace(std::string* str, Slice msg) { if (msg.empty()) return; if (!str->empty()) { str->push_back(' '); } str->append(msg.data(), msg.size()); } class Stats { private: double start_; double finish_; double seconds_; int done_; int next_report_; int64_t bytes_; double last_op_finish_; Histogram hist_; std::string message_; public: Stats() { Start(); } void Start() { next_report_ = 100; hist_.Clear(); done_ = 0; bytes_ = 0; seconds_ = 0; message_.clear(); start_ = finish_ = last_op_finish_ = g_env->NowMicros(); } void Merge(const Stats& other) { hist_.Merge(other.hist_); done_ += other.done_; bytes_ += other.bytes_; seconds_ += other.seconds_; if (other.start_ < start_) start_ = other.start_; if (other.finish_ > finish_) finish_ = other.finish_; // Just keep the messages from one thread if (message_.empty()) message_ = other.message_; } void Stop() { finish_ = g_env->NowMicros(); seconds_ = (finish_ - start_) * 1e-6; } void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); } void FinishedSingleOp() { if (FLAGS_histogram) { double now = g_env->NowMicros(); double micros = now - last_op_finish_; hist_.Add(micros); if (micros > 20000) { fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); fflush(stderr); } last_op_finish_ = now; } done_++; if (done_ >= next_report_) { if (next_report_ < 1000) next_report_ += 100; else if (next_report_ < 5000) next_report_ += 500; else if (next_report_ < 10000) next_report_ += 1000; else if (next_report_ < 50000) next_report_ += 5000; else if (next_report_ < 100000) next_report_ += 10000; else if (next_report_ < 500000) next_report_ += 50000; else next_report_ += 100000; fprintf(stderr, "... finished %d ops%30s\r", done_, ""); fflush(stderr); } } void AddBytes(int64_t n) { bytes_ += n; } void Report(const Slice& name) { // Pretend at least one op was done in case we are running a benchmark // that does not call FinishedSingleOp(). if (done_ < 1) done_ = 1; std::string extra; if (bytes_ > 0) { // Rate is computed on actual elapsed time, not the sum of per-thread // elapsed times. double elapsed = (finish_ - start_) * 1e-6; char rate[100]; snprintf(rate, sizeof(rate), "%6.1f MB/s", (bytes_ / 1048576.0) / elapsed); extra = rate; } AppendWithSpace(&extra, message_); fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(), seconds_ * 1e6 / done_, (extra.empty() ? "" : " "), extra.c_str()); if (FLAGS_histogram) { fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); } fflush(stdout); } }; // State shared by all concurrent executions of the same benchmark. struct SharedState { port::Mutex mu; port::CondVar cv GUARDED_BY(mu); int total GUARDED_BY(mu); // Each thread goes through the following states: // (1) initializing // (2) waiting for others to be initialized // (3) running // (4) done int num_initialized GUARDED_BY(mu); int num_done GUARDED_BY(mu); bool start GUARDED_BY(mu); SharedState(int total) : cv(&mu), total(total), num_initialized(0), num_done(0), start(false) {} }; // Per-thread state for concurrent executions of the same benchmark. struct ThreadState { int tid; // 0..n-1 when running in n threads Random rand; // Has different seeds for different threads Stats stats; SharedState* shared; ThreadState(int index) : tid(index), rand(1000 + index), shared(nullptr) {} }; } // namespace class Benchmark { private: Cache* cache_; const FilterPolicy* filter_policy_; DB* db_; int num_; int value_size_; int entries_per_batch_; WriteOptions write_options_; int reads_; int heap_counter_; void PrintHeader() { const int kKeySize = 16; PrintEnvironment(); fprintf(stdout, "Keys: %d bytes each\n", kKeySize); fprintf(stdout, "Values: %d bytes each (%d bytes after compression)\n", FLAGS_value_size, static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5)); fprintf(stdout, "Entries: %d\n", num_); fprintf(stdout, "RawSize: %.1f MB (estimated)\n", ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) / 1048576.0)); fprintf(stdout, "FileSize: %.1f MB (estimated)\n", (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) / 1048576.0)); PrintWarnings(); fprintf(stdout, "------------------------------------------------\n"); } void PrintWarnings() { #if defined(__GNUC__) && !defined(__OPTIMIZE__) fprintf( stdout, "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); #endif #ifndef NDEBUG fprintf(stdout, "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); #endif // See if snappy is working by attempting to compress a compressible string const char text[] = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"; std::string compressed; if (!port::Snappy_Compress(text, sizeof(text), &compressed)) { fprintf(stdout, "WARNING: Snappy compression is not enabled\n"); } else if (compressed.size() >= sizeof(text)) { fprintf(stdout, "WARNING: Snappy compression is not effective\n"); } } void PrintEnvironment() { fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion, kMinorVersion); #if defined(__linux) time_t now = time(nullptr); fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); if (cpuinfo != nullptr) { char line[1000]; int num_cpus = 0; std::string cpu_type; std::string cache_size; while (fgets(line, sizeof(line), cpuinfo) != nullptr) { const char* sep = strchr(line, ':'); if (sep == nullptr) { continue; } Slice key = TrimSpace(Slice(line, sep - 1 - line)); Slice val = TrimSpace(Slice(sep + 1)); if (key == "model name") { ++num_cpus; cpu_type = val.ToString(); } else if (key == "cache size") { cache_size = val.ToString(); } } fclose(cpuinfo); fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str()); fprintf(stderr, "CPUCache: %s\n", cache_size.c_str()); } #endif } public: Benchmark() : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr), filter_policy_(FLAGS_bloom_bits >= 0 ? NewBloomFilterPolicy(FLAGS_bloom_bits) : nullptr), db_(nullptr), num_(FLAGS_num), value_size_(FLAGS_value_size), entries_per_batch_(1), reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), heap_counter_(0) { std::vector<std::string> files; g_env->GetChildren(FLAGS_db, &files); for (size_t i = 0; i < files.size(); i++) { if (Slice(files[i]).starts_with("heap-")) { g_env->DeleteFile(std::string(FLAGS_db) + "/" + files[i]); } } if (!FLAGS_use_existing_db) { DestroyDB(FLAGS_db, Options()); } } ~Benchmark() { delete db_; delete cache_; delete filter_policy_; } void Run() { PrintHeader(); Open(); const char* benchmarks = FLAGS_benchmarks; while (benchmarks != nullptr) { const char* sep = strchr(benchmarks, ','); Slice name; if (sep == nullptr) { name = benchmarks; benchmarks = nullptr; } else { name = Slice(benchmarks, sep - benchmarks); benchmarks = sep + 1; } // Reset parameters that may be overridden below num_ = FLAGS_num; reads_ = (FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads); value_size_ = FLAGS_value_size; entries_per_batch_ = 1; write_options_ = WriteOptions(); void (Benchmark::*method)(ThreadState*) = nullptr; bool fresh_db = false; int num_threads = FLAGS_threads; if (name == Slice("open")) { method = &Benchmark::OpenBench; num_ /= 10000; if (num_ < 1) num_ = 1; } else if (name == Slice("fillseq")) { fresh_db = true; method = &Benchmark::WriteSeq; } else if (name == Slice("fillbatch")) { fresh_db = true; entries_per_batch_ = 1000; method = &Benchmark::WriteSeq; } else if (name == Slice("fillrandom")) { fresh_db = true; method = &Benchmark::WriteRandom; } else if (name == Slice("overwrite")) { fresh_db = false; method = &Benchmark::WriteRandom; } else if (name == Slice("fillsync")) { fresh_db = true; num_ /= 1000; write_options_.sync = true; method = &Benchmark::WriteRandom; } else if (name == Slice("fill100K")) { fresh_db = true; num_ /= 1000; value_size_ = 100 * 1000; method = &Benchmark::WriteRandom; } else if (name == Slice("readseq")) { method = &Benchmark::ReadSequential; } else if (name == Slice("readreverse")) { method = &Benchmark::ReadReverse; } else if (name == Slice("readrandom")) { method = &Benchmark::ReadRandom; } else if (name == Slice("readmissing")) { method = &Benchmark::ReadMissing; } else if (name == Slice("seekrandom")) { method = &Benchmark::SeekRandom; } else if (name == Slice("readhot")) { method = &Benchmark::ReadHot; } else if (name == Slice("readrandomsmall")) { reads_ /= 1000; method = &Benchmark::ReadRandom; } else if (name == Slice("deleteseq")) { method = &Benchmark::DeleteSeq; } else if (name == Slice("deleterandom")) { method = &Benchmark::DeleteRandom; } else if (name == Slice("readwhilewriting")) { num_threads++; // Add extra thread for writing method = &Benchmark::ReadWhileWriting; } else if (name == Slice("compact")) { method = &Benchmark::Compact; } else if (name == Slice("crc32c")) { method = &Benchmark::Crc32c; } else if (name == Slice("snappycomp")) { method = &Benchmark::SnappyCompress; } else if (name == Slice("snappyuncomp")) { method = &Benchmark::SnappyUncompress; } else if (name == Slice("heapprofile")) { HeapProfile(); } else if (name == Slice("stats")) { PrintStats("leveldb.stats"); } else if (name == Slice("sstables")) { PrintStats("leveldb.sstables"); } else { if (!name.empty()) { // No error message for empty name fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str()); } } if (fresh_db) { if (FLAGS_use_existing_db) { fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n", name.ToString().c_str()); method = nullptr; } else { delete db_; db_ = nullptr; DestroyDB(FLAGS_db, Options()); Open(); } } if (method != nullptr) { RunBenchmark(num_threads, name, method); } } } private: struct ThreadArg { Benchmark* bm; SharedState* shared; ThreadState* thread; void (Benchmark::*method)(ThreadState*); }; static void ThreadBody(void* v) { ThreadArg* arg = reinterpret_cast<ThreadArg*>(v); SharedState* shared = arg->shared; ThreadState* thread = arg->thread; { MutexLock l(&shared->mu); shared->num_initialized++; if (shared->num_initialized >= shared->total) { shared->cv.SignalAll(); } while (!shared->start) { shared->cv.Wait(); } } thread->stats.Start(); (arg->bm->*(arg->method))(thread); thread->stats.Stop(); { MutexLock l(&shared->mu); shared->num_done++; if (shared->num_done >= shared->total) { shared->cv.SignalAll(); } } } void RunBenchmark(int n, Slice name, void (Benchmark::*method)(ThreadState*)) { SharedState shared(n); ThreadArg* arg = new ThreadArg[n]; for (int i = 0; i < n; i++) { arg[i].bm = this; arg[i].method = method; arg[i].shared = &shared; arg[i].thread = new ThreadState(i); arg[i].thread->shared = &shared; g_env->StartThread(ThreadBody, &arg[i]); } shared.mu.Lock(); while (shared.num_initialized < n) { shared.cv.Wait(); } shared.start = true; shared.cv.SignalAll(); while (shared.num_done < n) { shared.cv.Wait(); } shared.mu.Unlock(); for (int i = 1; i < n; i++) { arg[0].thread->stats.Merge(arg[i].thread->stats); } arg[0].thread->stats.Report(name); for (int i = 0; i < n; i++) { delete arg[i].thread; } delete[] arg; } void Crc32c(ThreadState* thread) { // Checksum about 500MB of data total const int size = 4096; const char* label = "(4K per op)"; std::string data(size, 'x'); int64_t bytes = 0; uint32_t crc = 0; while (bytes < 500 * 1048576) { crc = crc32c::Value(data.data(), size); thread->stats.FinishedSingleOp(); bytes += size; } // Print so result is not dead fprintf(stderr, "... crc=0x%x\r", static_cast<unsigned int>(crc)); thread->stats.AddBytes(bytes); thread->stats.AddMessage(label); } void SnappyCompress(ThreadState* thread) { RandomGenerator gen; Slice input = gen.Generate(Options().block_size); int64_t bytes = 0; int64_t produced = 0; bool ok = true; std::string compressed; while (ok && bytes < 1024 * 1048576) { // Compress 1G ok = port::Snappy_Compress(input.data(), input.size(), &compressed); produced += compressed.size(); bytes += input.size(); thread->stats.FinishedSingleOp(); } if (!ok) { thread->stats.AddMessage("(snappy failure)"); } else { char buf[100]; snprintf(buf, sizeof(buf), "(output: %.1f%%)", (produced * 100.0) / bytes); thread->stats.AddMessage(buf); thread->stats.AddBytes(bytes); } } void SnappyUncompress(ThreadState* thread) { RandomGenerator gen; Slice input = gen.Generate(Options().block_size); std::string compressed; bool ok = port::Snappy_Compress(input.data(), input.size(), &compressed); int64_t bytes = 0; char* uncompressed = new char[input.size()]; while (ok && bytes < 1024 * 1048576) { // Compress 1G ok = port::Snappy_Uncompress(compressed.data(), compressed.size(), uncompressed); bytes += input.size(); thread->stats.FinishedSingleOp(); } delete[] uncompressed; if (!ok) { thread->stats.AddMessage("(snappy failure)"); } else { thread->stats.AddBytes(bytes); } } void Open() { assert(db_ == nullptr); Options options; options.env = g_env; options.create_if_missing = !FLAGS_use_existing_db; options.block_cache = cache_; options.write_buffer_size = FLAGS_write_buffer_size; options.max_file_size = FLAGS_max_file_size; options.block_size = FLAGS_block_size; options.max_open_files = FLAGS_open_files; options.filter_policy = filter_policy_; options.reuse_logs = FLAGS_reuse_logs; Status s = DB::Open(options, FLAGS_db, &db_); if (!s.ok()) { fprintf(stderr, "open error: %s\n", s.ToString().c_str()); exit(1); } } void OpenBench(ThreadState* thread) { for (int i = 0; i < num_; i++) { delete db_; Open(); thread->stats.FinishedSingleOp(); } } void WriteSeq(ThreadState* thread) { DoWrite(thread, true); } void WriteRandom(ThreadState* thread) { DoWrite(thread, false); } void DoWrite(ThreadState* thread, bool seq) { if (num_ != FLAGS_num) { char msg[100]; snprintf(msg, sizeof(msg), "(%d ops)", num_); thread->stats.AddMessage(msg); } RandomGenerator gen; WriteBatch batch; Status s; int64_t bytes = 0; for (int i = 0; i < num_; i += entries_per_batch_) { batch.Clear(); for (int j = 0; j < entries_per_batch_; j++) { const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num); char key[100]; snprintf(key, sizeof(key), "%016d", k); batch.Put(key, gen.Generate(value_size_)); bytes += value_size_ + strlen(key); thread->stats.FinishedSingleOp(); } s = db_->Write(write_options_, &batch); if (!s.ok()) { fprintf(stderr, "put error: %s\n", s.ToString().c_str()); exit(1); } } thread->stats.AddBytes(bytes); } void ReadSequential(ThreadState* thread) { Iterator* iter = db_->NewIterator(ReadOptions()); int i = 0; int64_t bytes = 0; for (iter->SeekToFirst(); i < reads_ && iter->Valid(); iter->Next()) { bytes += iter->key().size() + iter->value().size(); thread->stats.FinishedSingleOp(); ++i; } delete iter; thread->stats.AddBytes(bytes); } void ReadReverse(ThreadState* thread) { Iterator* iter = db_->NewIterator(ReadOptions()); int i = 0; int64_t bytes = 0; for (iter->SeekToLast(); i < reads_ && iter->Valid(); iter->Prev()) { bytes += iter->key().size() + iter->value().size(); thread->stats.FinishedSingleOp(); ++i; } delete iter; thread->stats.AddBytes(bytes); } void ReadRandom(ThreadState* thread) { ReadOptions options; std::string value; int found = 0; for (int i = 0; i < reads_; i++) { char key[100]; const int k = thread->rand.Next() % FLAGS_num; snprintf(key, sizeof(key), "%016d", k); if (db_->Get(options, key, &value).ok()) { found++; } thread->stats.FinishedSingleOp(); } char msg[100]; snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_); thread->stats.AddMessage(msg); } void ReadMissing(ThreadState* thread) { ReadOptions options; std::string value; for (int i = 0; i < reads_; i++) { char key[100]; const int k = thread->rand.Next() % FLAGS_num; snprintf(key, sizeof(key), "%016d.", k); db_->Get(options, key, &value); thread->stats.FinishedSingleOp(); } } void ReadHot(ThreadState* thread) { ReadOptions options; std::string value; const int range = (FLAGS_num + 99) / 100; for (int i = 0; i < reads_; i++) { char key[100]; const int k = thread->rand.Next() % range; snprintf(key, sizeof(key), "%016d", k); db_->Get(options, key, &value); thread->stats.FinishedSingleOp(); } } void SeekRandom(ThreadState* thread) { ReadOptions options; int found = 0; for (int i = 0; i < reads_; i++) { Iterator* iter = db_->NewIterator(options); char key[100]; const int k = thread->rand.Next() % FLAGS_num; snprintf(key, sizeof(key), "%016d", k); iter->Seek(key); if (iter->Valid() && iter->key() == key) found++; delete iter; thread->stats.FinishedSingleOp(); } char msg[100]; snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_); thread->stats.AddMessage(msg); } void DoDelete(ThreadState* thread, bool seq) { RandomGenerator gen; WriteBatch batch; Status s; for (int i = 0; i < num_; i += entries_per_batch_) { batch.Clear(); for (int j = 0; j < entries_per_batch_; j++) { const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num); char key[100]; snprintf(key, sizeof(key), "%016d", k); batch.Delete(key); thread->stats.FinishedSingleOp(); } s = db_->Write(write_options_, &batch); if (!s.ok()) { fprintf(stderr, "del error: %s\n", s.ToString().c_str()); exit(1); } } } void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); } void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); } void ReadWhileWriting(ThreadState* thread) { if (thread->tid > 0) { ReadRandom(thread); } else { // Special thread that keeps writing until other threads are done. RandomGenerator gen; while (true) { { MutexLock l(&thread->shared->mu); if (thread->shared->num_done + 1 >= thread->shared->num_initialized) { // Other threads have finished break; } } const int k = thread->rand.Next() % FLAGS_num; char key[100]; snprintf(key, sizeof(key), "%016d", k); Status s = db_->Put(write_options_, key, gen.Generate(value_size_)); if (!s.ok()) { fprintf(stderr, "put error: %s\n", s.ToString().c_str()); exit(1); } } // Do not count any of the preceding work/delay in stats. thread->stats.Start(); } } void Compact(ThreadState* thread) { db_->CompactRange(nullptr, nullptr); } void PrintStats(const char* key) { std::string stats; if (!db_->GetProperty(key, &stats)) { stats = "(failed)"; } fprintf(stdout, "\n%s\n", stats.c_str()); } static void WriteToFile(void* arg, const char* buf, int n) { reinterpret_cast<WritableFile*>(arg)->Append(Slice(buf, n)); } void HeapProfile() { char fname[100]; snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db, ++heap_counter_); WritableFile* file; Status s = g_env->NewWritableFile(fname, &file); if (!s.ok()) { fprintf(stderr, "%s\n", s.ToString().c_str()); return; } bool ok = port::GetHeapProfile(WriteToFile, file); delete file; if (!ok) { fprintf(stderr, "heap profiling not supported\n"); g_env->DeleteFile(fname); } } }; } // namespace leveldb int main(int argc, char** argv) { FLAGS_write_buffer_size = leveldb::Options().write_buffer_size; FLAGS_max_file_size = leveldb::Options().max_file_size; FLAGS_block_size = leveldb::Options().block_size; FLAGS_open_files = leveldb::Options().max_open_files; std::string default_db_path; for (int i = 1; i < argc; i++) { double d; int n; char junk; if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) { FLAGS_benchmarks = argv[i] + strlen("--benchmarks="); } else if (sscanf(argv[i], "--compression_ratio=%lf%c", &d, &junk) == 1) { FLAGS_compression_ratio = d; } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_histogram = n; } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_use_existing_db = n; } else if (sscanf(argv[i], "--reuse_logs=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_reuse_logs = n; } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) { FLAGS_num = n; } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) { FLAGS_reads = n; } else if (sscanf(argv[i], "--threads=%d%c", &n, &junk) == 1) { FLAGS_threads = n; } else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) { FLAGS_value_size = n; } else if (sscanf(argv[i], "--write_buffer_size=%d%c", &n, &junk) == 1) { FLAGS_write_buffer_size = n; } else if (sscanf(argv[i], "--max_file_size=%d%c", &n, &junk) == 1) { FLAGS_max_file_size = n; } else if (sscanf(argv[i], "--block_size=%d%c", &n, &junk) == 1) { FLAGS_block_size = n; } else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) { FLAGS_cache_size = n; } else if (sscanf(argv[i], "--bloom_bits=%d%c", &n, &junk) == 1) { FLAGS_bloom_bits = n; } else if (sscanf(argv[i], "--open_files=%d%c", &n, &junk) == 1) { FLAGS_open_files = n; } else if (strncmp(argv[i], "--db=", 5) == 0) { FLAGS_db = argv[i] + 5; } else { fprintf(stderr, "Invalid flag '%s'\n", argv[i]); exit(1); } } leveldb::g_env = leveldb::Env::Default(); // Choose a location for the test database if none given with --db=<path> if (FLAGS_db == nullptr) { leveldb::g_env->GetTestDirectory(&default_db_path); default_db_path += "/dbbench"; FLAGS_db = default_db_path.c_str(); } leveldb::Benchmark benchmark; benchmark.Run(); return 0; }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/benchmarks/db_bench_tree_db.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <kcpolydb.h> #include <stdio.h> #include <stdlib.h> #include "util/histogram.h" #include "util/random.h" #include "util/testutil.h" // Comma-separated list of operations to run in the specified order // Actual benchmarks: // // fillseq -- write N values in sequential key order in async mode // fillrandom -- write N values in random key order in async mode // overwrite -- overwrite N values in random key order in async mode // fillseqsync -- write N/100 values in sequential key order in sync mode // fillrandsync -- write N/100 values in random key order in sync mode // fillrand100K -- write N/1000 100K values in random order in async mode // fillseq100K -- write N/1000 100K values in seq order in async mode // readseq -- read N times sequentially // readseq100K -- read N/1000 100K values in sequential order in async mode // readrand100K -- read N/1000 100K values in sequential order in async mode // readrandom -- read N times in random order static const char* FLAGS_benchmarks = "fillseq," "fillseqsync," "fillrandsync," "fillrandom," "overwrite," "readrandom," "readseq," "fillrand100K," "fillseq100K," "readseq100K," "readrand100K,"; // Number of key/values to place in database static int FLAGS_num = 1000000; // Number of read operations to do. If negative, do FLAGS_num reads. static int FLAGS_reads = -1; // Size of each value static int FLAGS_value_size = 100; // Arrange to generate values that shrink to this fraction of // their original size after compression static double FLAGS_compression_ratio = 0.5; // Print histogram of operation timings static bool FLAGS_histogram = false; // Cache size. Default 4 MB static int FLAGS_cache_size = 4194304; // Page size. Default 1 KB static int FLAGS_page_size = 1024; // If true, do not destroy the existing database. If you set this // flag and also specify a benchmark that wants a fresh database, that // benchmark will fail. static bool FLAGS_use_existing_db = false; // Compression flag. If true, compression is on. If false, compression // is off. static bool FLAGS_compression = true; // Use the db with the following name. static const char* FLAGS_db = nullptr; inline static void DBSynchronize(kyotocabinet::TreeDB* db_) { // Synchronize will flush writes to disk if (!db_->synchronize()) { fprintf(stderr, "synchronize error: %s\n", db_->error().name()); } } namespace leveldb { // Helper for quickly generating random data. namespace { class RandomGenerator { private: std::string data_; int pos_; public: RandomGenerator() { // We use a limited amount of data over and over again and ensure // that it is larger than the compression window (32KB), and also // large enough to serve all typical value sizes we want to write. Random rnd(301); std::string piece; while (data_.size() < 1048576) { // Add a short fragment that is as compressible as specified // by FLAGS_compression_ratio. test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece); data_.append(piece); } pos_ = 0; } Slice Generate(int len) { if (pos_ + len > data_.size()) { pos_ = 0; assert(len < data_.size()); } pos_ += len; return Slice(data_.data() + pos_ - len, len); } }; static Slice TrimSpace(Slice s) { int start = 0; while (start < s.size() && isspace(s[start])) { start++; } int limit = s.size(); while (limit > start && isspace(s[limit - 1])) { limit--; } return Slice(s.data() + start, limit - start); } } // namespace class Benchmark { private: kyotocabinet::TreeDB* db_; int db_num_; int num_; int reads_; double start_; double last_op_finish_; int64_t bytes_; std::string message_; Histogram hist_; RandomGenerator gen_; Random rand_; kyotocabinet::LZOCompressor<kyotocabinet::LZO::RAW> comp_; // State kept for progress messages int done_; int next_report_; // When to report next void PrintHeader() { const int kKeySize = 16; PrintEnvironment(); fprintf(stdout, "Keys: %d bytes each\n", kKeySize); fprintf(stdout, "Values: %d bytes each (%d bytes after compression)\n", FLAGS_value_size, static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5)); fprintf(stdout, "Entries: %d\n", num_); fprintf(stdout, "RawSize: %.1f MB (estimated)\n", ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) / 1048576.0)); fprintf(stdout, "FileSize: %.1f MB (estimated)\n", (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) / 1048576.0)); PrintWarnings(); fprintf(stdout, "------------------------------------------------\n"); } void PrintWarnings() { #if defined(__GNUC__) && !defined(__OPTIMIZE__) fprintf( stdout, "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); #endif #ifndef NDEBUG fprintf(stdout, "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); #endif } void PrintEnvironment() { fprintf(stderr, "Kyoto Cabinet: version %s, lib ver %d, lib rev %d\n", kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV); #if defined(__linux) time_t now = time(nullptr); fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); if (cpuinfo != nullptr) { char line[1000]; int num_cpus = 0; std::string cpu_type; std::string cache_size; while (fgets(line, sizeof(line), cpuinfo) != nullptr) { const char* sep = strchr(line, ':'); if (sep == nullptr) { continue; } Slice key = TrimSpace(Slice(line, sep - 1 - line)); Slice val = TrimSpace(Slice(sep + 1)); if (key == "model name") { ++num_cpus; cpu_type = val.ToString(); } else if (key == "cache size") { cache_size = val.ToString(); } } fclose(cpuinfo); fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str()); fprintf(stderr, "CPUCache: %s\n", cache_size.c_str()); } #endif } void Start() { start_ = Env::Default()->NowMicros() * 1e-6; bytes_ = 0; message_.clear(); last_op_finish_ = start_; hist_.Clear(); done_ = 0; next_report_ = 100; } void FinishedSingleOp() { if (FLAGS_histogram) { double now = Env::Default()->NowMicros() * 1e-6; double micros = (now - last_op_finish_) * 1e6; hist_.Add(micros); if (micros > 20000) { fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); fflush(stderr); } last_op_finish_ = now; } done_++; if (done_ >= next_report_) { if (next_report_ < 1000) next_report_ += 100; else if (next_report_ < 5000) next_report_ += 500; else if (next_report_ < 10000) next_report_ += 1000; else if (next_report_ < 50000) next_report_ += 5000; else if (next_report_ < 100000) next_report_ += 10000; else if (next_report_ < 500000) next_report_ += 50000; else next_report_ += 100000; fprintf(stderr, "... finished %d ops%30s\r", done_, ""); fflush(stderr); } } void Stop(const Slice& name) { double finish = Env::Default()->NowMicros() * 1e-6; // Pretend at least one op was done in case we are running a benchmark // that does not call FinishedSingleOp(). if (done_ < 1) done_ = 1; if (bytes_ > 0) { char rate[100]; snprintf(rate, sizeof(rate), "%6.1f MB/s", (bytes_ / 1048576.0) / (finish - start_)); if (!message_.empty()) { message_ = std::string(rate) + " " + message_; } else { message_ = rate; } } fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(), (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "), message_.c_str()); if (FLAGS_histogram) { fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); } fflush(stdout); } public: enum Order { SEQUENTIAL, RANDOM }; enum DBState { FRESH, EXISTING }; Benchmark() : db_(nullptr), num_(FLAGS_num), reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), bytes_(0), rand_(301) { std::vector<std::string> files; std::string test_dir; Env::Default()->GetTestDirectory(&test_dir); Env::Default()->GetChildren(test_dir.c_str(), &files); if (!FLAGS_use_existing_db) { for (int i = 0; i < files.size(); i++) { if (Slice(files[i]).starts_with("dbbench_polyDB")) { std::string file_name(test_dir); file_name += "/"; file_name += files[i]; Env::Default()->DeleteFile(file_name.c_str()); } } } } ~Benchmark() { if (!db_->close()) { fprintf(stderr, "close error: %s\n", db_->error().name()); } } void Run() { PrintHeader(); Open(false); const char* benchmarks = FLAGS_benchmarks; while (benchmarks != nullptr) { const char* sep = strchr(benchmarks, ','); Slice name; if (sep == nullptr) { name = benchmarks; benchmarks = nullptr; } else { name = Slice(benchmarks, sep - benchmarks); benchmarks = sep + 1; } Start(); bool known = true; bool write_sync = false; if (name == Slice("fillseq")) { Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1); DBSynchronize(db_); } else if (name == Slice("fillrandom")) { Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1); DBSynchronize(db_); } else if (name == Slice("overwrite")) { Write(write_sync, RANDOM, EXISTING, num_, FLAGS_value_size, 1); DBSynchronize(db_); } else if (name == Slice("fillrandsync")) { write_sync = true; Write(write_sync, RANDOM, FRESH, num_ / 100, FLAGS_value_size, 1); DBSynchronize(db_); } else if (name == Slice("fillseqsync")) { write_sync = true; Write(write_sync, SEQUENTIAL, FRESH, num_ / 100, FLAGS_value_size, 1); DBSynchronize(db_); } else if (name == Slice("fillrand100K")) { Write(write_sync, RANDOM, FRESH, num_ / 1000, 100 * 1000, 1); DBSynchronize(db_); } else if (name == Slice("fillseq100K")) { Write(write_sync, SEQUENTIAL, FRESH, num_ / 1000, 100 * 1000, 1); DBSynchronize(db_); } else if (name == Slice("readseq")) { ReadSequential(); } else if (name == Slice("readrandom")) { ReadRandom(); } else if (name == Slice("readrand100K")) { int n = reads_; reads_ /= 1000; ReadRandom(); reads_ = n; } else if (name == Slice("readseq100K")) { int n = reads_; reads_ /= 1000; ReadSequential(); reads_ = n; } else { known = false; if (name != Slice()) { // No error message for empty name fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str()); } } if (known) { Stop(name); } } } private: void Open(bool sync) { assert(db_ == nullptr); // Initialize db_ db_ = new kyotocabinet::TreeDB(); char file_name[100]; db_num_++; std::string test_dir; Env::Default()->GetTestDirectory(&test_dir); snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct", test_dir.c_str(), db_num_); // Create tuning options and open the database int open_options = kyotocabinet::PolyDB::OWRITER | kyotocabinet::PolyDB::OCREATE; int tune_options = kyotocabinet::TreeDB::TSMALL | kyotocabinet::TreeDB::TLINEAR; if (FLAGS_compression) { tune_options |= kyotocabinet::TreeDB::TCOMPRESS; db_->tune_compressor(&comp_); } db_->tune_options(tune_options); db_->tune_page_cache(FLAGS_cache_size); db_->tune_page(FLAGS_page_size); db_->tune_map(256LL << 20); if (sync) { open_options |= kyotocabinet::PolyDB::OAUTOSYNC; } if (!db_->open(file_name, open_options)) { fprintf(stderr, "open error: %s\n", db_->error().name()); } } void Write(bool sync, Order order, DBState state, int num_entries, int value_size, int entries_per_batch) { // Create new database if state == FRESH if (state == FRESH) { if (FLAGS_use_existing_db) { message_ = "skipping (--use_existing_db is true)"; return; } delete db_; db_ = nullptr; Open(sync); Start(); // Do not count time taken to destroy/open } if (num_entries != num_) { char msg[100]; snprintf(msg, sizeof(msg), "(%d ops)", num_entries); message_ = msg; } // Write to database for (int i = 0; i < num_entries; i++) { const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries); char key[100]; snprintf(key, sizeof(key), "%016d", k); bytes_ += value_size + strlen(key); std::string cpp_key = key; if (!db_->set(cpp_key, gen_.Generate(value_size).ToString())) { fprintf(stderr, "set error: %s\n", db_->error().name()); } FinishedSingleOp(); } } void ReadSequential() { kyotocabinet::DB::Cursor* cur = db_->cursor(); cur->jump(); std::string ckey, cvalue; while (cur->get(&ckey, &cvalue, true)) { bytes_ += ckey.size() + cvalue.size(); FinishedSingleOp(); } delete cur; } void ReadRandom() { std::string value; for (int i = 0; i < reads_; i++) { char key[100]; const int k = rand_.Next() % reads_; snprintf(key, sizeof(key), "%016d", k); db_->get(key, &value); FinishedSingleOp(); } } }; } // namespace leveldb int main(int argc, char** argv) { std::string default_db_path; for (int i = 1; i < argc; i++) { double d; int n; char junk; if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) { FLAGS_benchmarks = argv[i] + strlen("--benchmarks="); } else if (sscanf(argv[i], "--compression_ratio=%lf%c", &d, &junk) == 1) { FLAGS_compression_ratio = d; } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_histogram = n; } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) { FLAGS_num = n; } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) { FLAGS_reads = n; } else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) { FLAGS_value_size = n; } else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) { FLAGS_cache_size = n; } else if (sscanf(argv[i], "--page_size=%d%c", &n, &junk) == 1) { FLAGS_page_size = n; } else if (sscanf(argv[i], "--compression=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_compression = (n == 1) ? true : false; } else if (strncmp(argv[i], "--db=", 5) == 0) { FLAGS_db = argv[i] + 5; } else { fprintf(stderr, "Invalid flag '%s'\n", argv[i]); exit(1); } } // Choose a location for the test database if none given with --db=<path> if (FLAGS_db == nullptr) { leveldb::Env::Default()->GetTestDirectory(&default_db_path); default_db_path += "/dbbench"; FLAGS_db = default_db_path.c_str(); } leveldb::Benchmark benchmark; benchmark.Run(); return 0; }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/log_format.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // Log format information shared by reader and writer. // See ../doc/log_format.md for more detail. #ifndef STORAGE_LEVELDB_DB_LOG_FORMAT_H_ #define STORAGE_LEVELDB_DB_LOG_FORMAT_H_ namespace leveldb { namespace log { enum RecordType { // Zero is reserved for preallocated files kZeroType = 0, kFullType = 1, // For fragments kFirstType = 2, kMiddleType = 3, kLastType = 4 }; static const int kMaxRecordType = kLastType; static const int kBlockSize = 32768; // Header is checksum (4 bytes), length (2 bytes), type (1 byte). static const int kHeaderSize = 4 + 2 + 1; } // namespace log } // namespace leveldb #endif // STORAGE_LEVELDB_DB_LOG_FORMAT_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/log_writer.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/log_writer.h" #include <stdint.h> #include "leveldb/env.h" #include "util/coding.h" #include "util/crc32c.h" namespace leveldb { namespace log { static void InitTypeCrc(uint32_t* type_crc) { for (int i = 0; i <= kMaxRecordType; i++) { char t = static_cast<char>(i); type_crc[i] = crc32c::Value(&t, 1); } } Writer::Writer(WritableFile* dest) : dest_(dest), block_offset_(0) { InitTypeCrc(type_crc_); } Writer::Writer(WritableFile* dest, uint64_t dest_length) : dest_(dest), block_offset_(dest_length % kBlockSize) { InitTypeCrc(type_crc_); } Writer::~Writer() = default; Status Writer::AddRecord(const Slice& slice) { const char* ptr = slice.data(); size_t left = slice.size(); // Fragment the record if necessary and emit it. Note that if slice // is empty, we still want to iterate once to emit a single // zero-length record Status s; bool begin = true; do { const int leftover = kBlockSize - block_offset_; assert(leftover >= 0); if (leftover < kHeaderSize) { // Switch to a new block if (leftover > 0) { // Fill the trailer (literal below relies on kHeaderSize being 7) static_assert(kHeaderSize == 7, ""); dest_->Append(Slice("\x00\x00\x00\x00\x00\x00", leftover)); } block_offset_ = 0; } // Invariant: we never leave < kHeaderSize bytes in a block. assert(kBlockSize - block_offset_ - kHeaderSize >= 0); const size_t avail = kBlockSize - block_offset_ - kHeaderSize; const size_t fragment_length = (left < avail) ? left : avail; RecordType type; const bool end = (left == fragment_length); if (begin && end) { type = kFullType; } else if (begin) { type = kFirstType; } else if (end) { type = kLastType; } else { type = kMiddleType; } s = EmitPhysicalRecord(type, ptr, fragment_length); ptr += fragment_length; left -= fragment_length; begin = false; } while (s.ok() && left > 0); return s; } Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr, size_t length) { assert(length <= 0xffff); // Must fit in two bytes assert(block_offset_ + kHeaderSize + length <= kBlockSize); // Format the header char buf[kHeaderSize]; buf[4] = static_cast<char>(length & 0xff); buf[5] = static_cast<char>(length >> 8); buf[6] = static_cast<char>(t); // Compute the crc of the record type and the payload. uint32_t crc = crc32c::Extend(type_crc_[t], ptr, length); crc = crc32c::Mask(crc); // Adjust for storage EncodeFixed32(buf, crc); // Write the header and the payload Status s = dest_->Append(Slice(buf, kHeaderSize)); if (s.ok()) { s = dest_->Append(Slice(ptr, length)); if (s.ok()) { s = dest_->Flush(); } } block_offset_ += kHeaderSize + length; return s; } } // namespace log } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/table_cache.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // Thread-safe (provides internal synchronization) #ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_ #define STORAGE_LEVELDB_DB_TABLE_CACHE_H_ #include <stdint.h> #include <string> #include "db/dbformat.h" #include "leveldb/cache.h" #include "leveldb/table.h" #include "port/port.h" namespace leveldb { class Env; class TableCache { public: TableCache(const std::string& dbname, const Options& options, int entries); ~TableCache(); // Return an iterator for the specified file number (the corresponding // file length must be exactly "file_size" bytes). If "tableptr" is // non-null, also sets "*tableptr" to point to the Table object // underlying the returned iterator, or to nullptr if no Table object // underlies the returned iterator. The returned "*tableptr" object is owned // by the cache and should not be deleted, and is valid for as long as the // returned iterator is live. Iterator* NewIterator(const ReadOptions& options, uint64_t file_number, uint64_t file_size, Table** tableptr = nullptr); // If a seek to internal key "k" in specified file finds an entry, // call (*handle_result)(arg, found_key, found_value). Status Get(const ReadOptions& options, uint64_t file_number, uint64_t file_size, const Slice& k, void* arg, void (*handle_result)(void*, const Slice&, const Slice&)); // Evict any entry for the specified file number void Evict(uint64_t file_number); private: Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**); Env* const env_; const std::string dbname_; const Options& options_; Cache* cache_; }; } // namespace leveldb #endif // STORAGE_LEVELDB_DB_TABLE_CACHE_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/db_impl.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/db_impl.h" #include <stdint.h> #include <stdio.h> #include <algorithm> #include <atomic> #include <set> #include <string> #include <vector> #include "db/builder.h" #include "db/db_iter.h" #include "db/dbformat.h" #include "db/filename.h" #include "db/log_reader.h" #include "db/log_writer.h" #include "db/memtable.h" #include "db/table_cache.h" #include "db/version_set.h" #include "db/write_batch_internal.h" #include "leveldb/db.h" #include "leveldb/env.h" #include "leveldb/status.h" #include "leveldb/table.h" #include "leveldb/table_builder.h" #include "port/port.h" #include "table/block.h" #include "table/merger.h" #include "table/two_level_iterator.h" #include "util/coding.h" #include "util/logging.h" #include "util/mutexlock.h" namespace leveldb { const int kNumNonTableCacheFiles = 10; // Information kept for every waiting writer struct DBImpl::Writer { explicit Writer(port::Mutex* mu) : batch(nullptr), sync(false), done(false), cv(mu) {} Status status; WriteBatch* batch; bool sync; bool done; port::CondVar cv; }; struct DBImpl::CompactionState { // Files produced by compaction struct Output { uint64_t number; uint64_t file_size; InternalKey smallest, largest; }; Output* current_output() { return &outputs[outputs.size() - 1]; } explicit CompactionState(Compaction* c) : compaction(c), smallest_snapshot(0), outfile(nullptr), builder(nullptr), total_bytes(0) {} Compaction* const compaction; // Sequence numbers < smallest_snapshot are not significant since we // will never have to service a snapshot below smallest_snapshot. // Therefore if we have seen a sequence number S <= smallest_snapshot, // we can drop all entries for the same key with sequence numbers < S. SequenceNumber smallest_snapshot; std::vector<Output> outputs; // State kept for output being generated WritableFile* outfile; TableBuilder* builder; uint64_t total_bytes; }; // Fix user-supplied options to be reasonable template <class T, class V> static void ClipToRange(T* ptr, V minvalue, V maxvalue) { if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue; if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue; } Options SanitizeOptions(const std::string& dbname, const InternalKeyComparator* icmp, const InternalFilterPolicy* ipolicy, const Options& src) { Options result = src; result.comparator = icmp; result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr; ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000); ClipToRange(&result.write_buffer_size, 64 << 10, 1 << 30); ClipToRange(&result.max_file_size, 1 << 20, 1 << 30); ClipToRange(&result.block_size, 1 << 10, 4 << 20); if (result.info_log == nullptr) { // Open a log file in the same directory as the db src.env->CreateDir(dbname); // In case it does not exist src.env->RenameFile(InfoLogFileName(dbname), OldInfoLogFileName(dbname)); Status s = src.env->NewLogger(InfoLogFileName(dbname), &result.info_log); if (!s.ok()) { // No place suitable for logging result.info_log = nullptr; } } if (result.block_cache == nullptr) { result.block_cache = NewLRUCache(8 << 20); } return result; } static int TableCacheSize(const Options& sanitized_options) { // Reserve ten files or so for other uses and give the rest to TableCache. return sanitized_options.max_open_files - kNumNonTableCacheFiles; } DBImpl::DBImpl(const Options& raw_options, const std::string& dbname) : env_(raw_options.env), internal_comparator_(raw_options.comparator), internal_filter_policy_(raw_options.filter_policy), options_(SanitizeOptions(dbname, &internal_comparator_, &internal_filter_policy_, raw_options)), owns_info_log_(options_.info_log != raw_options.info_log), owns_cache_(options_.block_cache != raw_options.block_cache), dbname_(dbname), table_cache_(new TableCache(dbname_, options_, TableCacheSize(options_))), db_lock_(nullptr), shutting_down_(false), background_work_finished_signal_(&mutex_), mem_(nullptr), imm_(nullptr), has_imm_(false), logfile_(nullptr), logfile_number_(0), log_(nullptr), seed_(0), tmp_batch_(new WriteBatch), background_compaction_scheduled_(false), manual_compaction_(nullptr), versions_(new VersionSet(dbname_, &options_, table_cache_, &internal_comparator_)) {} DBImpl::~DBImpl() { // Wait for background work to finish. mutex_.Lock(); shutting_down_.store(true, std::memory_order_release); while (background_compaction_scheduled_) { background_work_finished_signal_.Wait(); } mutex_.Unlock(); if (db_lock_ != nullptr) { env_->UnlockFile(db_lock_); } delete versions_; if (mem_ != nullptr) mem_->Unref(); if (imm_ != nullptr) imm_->Unref(); delete tmp_batch_; delete log_; delete logfile_; delete table_cache_; if (owns_info_log_) { delete options_.info_log; } if (owns_cache_) { delete options_.block_cache; } } Status DBImpl::NewDB() { VersionEdit new_db; new_db.SetComparatorName(user_comparator()->Name()); new_db.SetLogNumber(0); new_db.SetNextFile(2); new_db.SetLastSequence(0); const std::string manifest = DescriptorFileName(dbname_, 1); WritableFile* file; Status s = env_->NewWritableFile(manifest, &file); if (!s.ok()) { return s; } { log::Writer log(file); std::string record; new_db.EncodeTo(&record); s = log.AddRecord(record); if (s.ok()) { s = file->Close(); } } delete file; if (s.ok()) { // Make "CURRENT" file that points to the new manifest file. s = SetCurrentFile(env_, dbname_, 1); } else { env_->DeleteFile(manifest); } return s; } void DBImpl::MaybeIgnoreError(Status* s) const { if (s->ok() || options_.paranoid_checks) { // No change needed } else { Log(options_.info_log, "Ignoring error %s", s->ToString().c_str()); *s = Status::OK(); } } void DBImpl::DeleteObsoleteFiles() { mutex_.AssertHeld(); if (!bg_error_.ok()) { // After a background error, we don't know whether a new version may // or may not have been committed, so we cannot safely garbage collect. return; } // Make a set of all of the live files std::set<uint64_t> live = pending_outputs_; versions_->AddLiveFiles(&live); std::vector<std::string> filenames; env_->GetChildren(dbname_, &filenames); // Ignoring errors on purpose uint64_t number; FileType type; std::vector<std::string> files_to_delete; for (std::string& filename : filenames) { if (ParseFileName(filename, &number, &type)) { bool keep = true; switch (type) { case kLogFile: keep = ((number >= versions_->LogNumber()) || (number == versions_->PrevLogNumber())); break; case kDescriptorFile: // Keep my manifest file, and any newer incarnations' // (in case there is a race that allows other incarnations) keep = (number >= versions_->ManifestFileNumber()); break; case kTableFile: keep = (live.find(number) != live.end()); break; case kTempFile: // Any temp files that are currently being written to must // be recorded in pending_outputs_, which is inserted into "live" keep = (live.find(number) != live.end()); break; case kCurrentFile: case kDBLockFile: case kInfoLogFile: keep = true; break; } if (!keep) { files_to_delete.push_back(std::move(filename)); if (type == kTableFile) { table_cache_->Evict(number); } Log(options_.info_log, "Delete type=%d #%lld\n", static_cast<int>(type), static_cast<unsigned long long>(number)); } } } // While deleting all files unblock other threads. All files being deleted // have unique names which will not collide with newly created files and // are therefore safe to delete while allowing other threads to proceed. mutex_.Unlock(); for (const std::string& filename : files_to_delete) { env_->DeleteFile(dbname_ + "/" + filename); } mutex_.Lock(); } Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) { mutex_.AssertHeld(); // Ignore error from CreateDir since the creation of the DB is // committed only when the descriptor is created, and this directory // may already exist from a previous failed creation attempt. env_->CreateDir(dbname_); assert(db_lock_ == nullptr); Status s = env_->LockFile(LockFileName(dbname_), &db_lock_); if (!s.ok()) { return s; } if (!env_->FileExists(CurrentFileName(dbname_))) { if (options_.create_if_missing) { s = NewDB(); if (!s.ok()) { return s; } } else { return Status::InvalidArgument( dbname_, "does not exist (create_if_missing is false)"); } } else { if (options_.error_if_exists) { return Status::InvalidArgument(dbname_, "exists (error_if_exists is true)"); } } s = versions_->Recover(save_manifest); if (!s.ok()) { return s; } SequenceNumber max_sequence(0); // Recover from all newer log files than the ones named in the // descriptor (new log files may have been added by the previous // incarnation without registering them in the descriptor). // // Note that PrevLogNumber() is no longer used, but we pay // attention to it in case we are recovering a database // produced by an older version of leveldb. const uint64_t min_log = versions_->LogNumber(); const uint64_t prev_log = versions_->PrevLogNumber(); std::vector<std::string> filenames; s = env_->GetChildren(dbname_, &filenames); if (!s.ok()) { return s; } std::set<uint64_t> expected; versions_->AddLiveFiles(&expected); uint64_t number; FileType type; std::vector<uint64_t> logs; for (size_t i = 0; i < filenames.size(); i++) { if (ParseFileName(filenames[i], &number, &type)) { expected.erase(number); if (type == kLogFile && ((number >= min_log) || (number == prev_log))) logs.push_back(number); } } if (!expected.empty()) { char buf[50]; snprintf(buf, sizeof(buf), "%d missing files; e.g.", static_cast<int>(expected.size())); return Status::Corruption(buf, TableFileName(dbname_, *(expected.begin()))); } // Recover in the order in which the logs were generated std::sort(logs.begin(), logs.end()); for (size_t i = 0; i < logs.size(); i++) { s = RecoverLogFile(logs[i], (i == logs.size() - 1), save_manifest, edit, &max_sequence); if (!s.ok()) { return s; } // The previous incarnation may not have written any MANIFEST // records after allocating this log number. So we manually // update the file number allocation counter in VersionSet. versions_->MarkFileNumberUsed(logs[i]); } if (versions_->LastSequence() < max_sequence) { versions_->SetLastSequence(max_sequence); } return Status::OK(); } Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log, bool* save_manifest, VersionEdit* edit, SequenceNumber* max_sequence) { struct LogReporter : public log::Reader::Reporter { Env* env; Logger* info_log; const char* fname; Status* status; // null if options_.paranoid_checks==false void Corruption(size_t bytes, const Status& s) override { Log(info_log, "%s%s: dropping %d bytes; %s", (this->status == nullptr ? "(ignoring error) " : ""), fname, static_cast<int>(bytes), s.ToString().c_str()); if (this->status != nullptr && this->status->ok()) *this->status = s; } }; mutex_.AssertHeld(); // Open the log file std::string fname = LogFileName(dbname_, log_number); SequentialFile* file; Status status = env_->NewSequentialFile(fname, &file); if (!status.ok()) { MaybeIgnoreError(&status); return status; } // Create the log reader. LogReporter reporter; reporter.env = env_; reporter.info_log = options_.info_log; reporter.fname = fname.c_str(); reporter.status = (options_.paranoid_checks ? &status : nullptr); // We intentionally make log::Reader do checksumming even if // paranoid_checks==false so that corruptions cause entire commits // to be skipped instead of propagating bad information (like overly // large sequence numbers). log::Reader reader(file, &reporter, true /*checksum*/, 0 /*initial_offset*/); Log(options_.info_log, "Recovering log #%llu", (unsigned long long)log_number); // Read all the records and add to a memtable std::string scratch; Slice record; WriteBatch batch; int compactions = 0; MemTable* mem = nullptr; while (reader.ReadRecord(&record, &scratch) && status.ok()) { if (record.size() < 12) { reporter.Corruption(record.size(), Status::Corruption("log record too small", fname)); continue; } WriteBatchInternal::SetContents(&batch, record); if (mem == nullptr) { mem = new MemTable(internal_comparator_); mem->Ref(); } status = WriteBatchInternal::InsertInto(&batch, mem); MaybeIgnoreError(&status); if (!status.ok()) { break; } const SequenceNumber last_seq = WriteBatchInternal::Sequence(&batch) + WriteBatchInternal::Count(&batch) - 1; if (last_seq > *max_sequence) { *max_sequence = last_seq; } if (mem->ApproximateMemoryUsage() > options_.write_buffer_size) { compactions++; *save_manifest = true; status = WriteLevel0Table(mem, edit, nullptr); mem->Unref(); mem = nullptr; if (!status.ok()) { // Reflect errors immediately so that conditions like full // file-systems cause the DB::Open() to fail. break; } } } delete file; // See if we should keep reusing the last log file. if (status.ok() && options_.reuse_logs && last_log && compactions == 0) { assert(logfile_ == nullptr); assert(log_ == nullptr); assert(mem_ == nullptr); uint64_t lfile_size; if (env_->GetFileSize(fname, &lfile_size).ok() && env_->NewAppendableFile(fname, &logfile_).ok()) { Log(options_.info_log, "Reusing old log %s \n", fname.c_str()); log_ = new log::Writer(logfile_, lfile_size); logfile_number_ = log_number; if (mem != nullptr) { mem_ = mem; mem = nullptr; } else { // mem can be nullptr if lognum exists but was empty. mem_ = new MemTable(internal_comparator_); mem_->Ref(); } } } if (mem != nullptr) { // mem did not get reused; compact it. if (status.ok()) { *save_manifest = true; status = WriteLevel0Table(mem, edit, nullptr); } mem->Unref(); } return status; } Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit, Version* base) { mutex_.AssertHeld(); const uint64_t start_micros = env_->NowMicros(); FileMetaData meta; meta.number = versions_->NewFileNumber(); pending_outputs_.insert(meta.number); Iterator* iter = mem->NewIterator(); Log(options_.info_log, "Level-0 table #%llu: started", (unsigned long long)meta.number); Status s; { mutex_.Unlock(); s = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta); mutex_.Lock(); } Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s", (unsigned long long)meta.number, (unsigned long long)meta.file_size, s.ToString().c_str()); delete iter; pending_outputs_.erase(meta.number); // Note that if file_size is zero, the file has been deleted and // should not be added to the manifest. int level = 0; if (s.ok() && meta.file_size > 0) { const Slice min_user_key = meta.smallest.user_key(); const Slice max_user_key = meta.largest.user_key(); if (base != nullptr) { level = base->PickLevelForMemTableOutput(min_user_key, max_user_key); } edit->AddFile(level, meta.number, meta.file_size, meta.smallest, meta.largest); } CompactionStats stats; stats.micros = env_->NowMicros() - start_micros; stats.bytes_written = meta.file_size; stats_[level].Add(stats); return s; } void DBImpl::CompactMemTable() { mutex_.AssertHeld(); assert(imm_ != nullptr); // Save the contents of the memtable as a new Table VersionEdit edit; Version* base = versions_->current(); base->Ref(); Status s = WriteLevel0Table(imm_, &edit, base); base->Unref(); if (s.ok() && shutting_down_.load(std::memory_order_acquire)) { s = Status::IOError("Deleting DB during memtable compaction"); } // Replace immutable memtable with the generated Table if (s.ok()) { edit.SetPrevLogNumber(0); edit.SetLogNumber(logfile_number_); // Earlier logs no longer needed s = versions_->LogAndApply(&edit, &mutex_); } if (s.ok()) { // Commit to the new state imm_->Unref(); imm_ = nullptr; has_imm_.store(false, std::memory_order_release); DeleteObsoleteFiles(); } else { RecordBackgroundError(s); } } void DBImpl::CompactRange(const Slice* begin, const Slice* end) { int max_level_with_files = 1; { MutexLock l(&mutex_); Version* base = versions_->current(); for (int level = 1; level < config::kNumLevels; level++) { if (base->OverlapInLevel(level, begin, end)) { max_level_with_files = level; } } } TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap for (int level = 0; level < max_level_with_files; level++) { TEST_CompactRange(level, begin, end); } } void DBImpl::TEST_CompactRange(int level, const Slice* begin, const Slice* end) { assert(level >= 0); assert(level + 1 < config::kNumLevels); InternalKey begin_storage, end_storage; ManualCompaction manual; manual.level = level; manual.done = false; if (begin == nullptr) { manual.begin = nullptr; } else { begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek); manual.begin = &begin_storage; } if (end == nullptr) { manual.end = nullptr; } else { end_storage = InternalKey(*end, 0, static_cast<ValueType>(0)); manual.end = &end_storage; } MutexLock l(&mutex_); while (!manual.done && !shutting_down_.load(std::memory_order_acquire) && bg_error_.ok()) { if (manual_compaction_ == nullptr) { // Idle manual_compaction_ = &manual; MaybeScheduleCompaction(); } else { // Running either my compaction or another compaction. background_work_finished_signal_.Wait(); } } if (manual_compaction_ == &manual) { // Cancel my manual compaction since we aborted early for some reason. manual_compaction_ = nullptr; } } Status DBImpl::TEST_CompactMemTable() { // nullptr batch means just wait for earlier writes to be done Status s = Write(WriteOptions(), nullptr); if (s.ok()) { // Wait until the compaction completes MutexLock l(&mutex_); while (imm_ != nullptr && bg_error_.ok()) { background_work_finished_signal_.Wait(); } if (imm_ != nullptr) { s = bg_error_; } } return s; } void DBImpl::RecordBackgroundError(const Status& s) { mutex_.AssertHeld(); if (bg_error_.ok()) { bg_error_ = s; background_work_finished_signal_.SignalAll(); } } void DBImpl::MaybeScheduleCompaction() { mutex_.AssertHeld(); if (background_compaction_scheduled_) { // Already scheduled } else if (shutting_down_.load(std::memory_order_acquire)) { // DB is being deleted; no more background compactions } else if (!bg_error_.ok()) { // Already got an error; no more changes } else if (imm_ == nullptr && manual_compaction_ == nullptr && !versions_->NeedsCompaction()) { // No work to be done } else { background_compaction_scheduled_ = true; env_->Schedule(&DBImpl::BGWork, this); } } void DBImpl::BGWork(void* db) { reinterpret_cast<DBImpl*>(db)->BackgroundCall(); } void DBImpl::BackgroundCall() { MutexLock l(&mutex_); assert(background_compaction_scheduled_); if (shutting_down_.load(std::memory_order_acquire)) { // No more background work when shutting down. } else if (!bg_error_.ok()) { // No more background work after a background error. } else { BackgroundCompaction(); } background_compaction_scheduled_ = false; // Previous compaction may have produced too many files in a level, // so reschedule another compaction if needed. MaybeScheduleCompaction(); background_work_finished_signal_.SignalAll(); } void DBImpl::BackgroundCompaction() { mutex_.AssertHeld(); if (imm_ != nullptr) { CompactMemTable(); return; } Compaction* c; bool is_manual = (manual_compaction_ != nullptr); InternalKey manual_end; if (is_manual) { ManualCompaction* m = manual_compaction_; c = versions_->CompactRange(m->level, m->begin, m->end); m->done = (c == nullptr); if (c != nullptr) { manual_end = c->input(0, c->num_input_files(0) - 1)->largest; } Log(options_.info_log, "Manual compaction at level-%d from %s .. %s; will stop at %s\n", m->level, (m->begin ? m->begin->DebugString().c_str() : "(begin)"), (m->end ? m->end->DebugString().c_str() : "(end)"), (m->done ? "(end)" : manual_end.DebugString().c_str())); } else { c = versions_->PickCompaction(); } Status status; if (c == nullptr) { // Nothing to do } else if (!is_manual && c->IsTrivialMove()) { // Move file to next level assert(c->num_input_files(0) == 1); FileMetaData* f = c->input(0, 0); c->edit()->DeleteFile(c->level(), f->number); c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest, f->largest); status = versions_->LogAndApply(c->edit(), &mutex_); if (!status.ok()) { RecordBackgroundError(status); } VersionSet::LevelSummaryStorage tmp; Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n", static_cast<unsigned long long>(f->number), c->level() + 1, static_cast<unsigned long long>(f->file_size), status.ToString().c_str(), versions_->LevelSummary(&tmp)); } else { CompactionState* compact = new CompactionState(c); status = DoCompactionWork(compact); if (!status.ok()) { RecordBackgroundError(status); } CleanupCompaction(compact); c->ReleaseInputs(); DeleteObsoleteFiles(); } delete c; if (status.ok()) { // Done } else if (shutting_down_.load(std::memory_order_acquire)) { // Ignore compaction errors found during shutting down } else { Log(options_.info_log, "Compaction error: %s", status.ToString().c_str()); } if (is_manual) { ManualCompaction* m = manual_compaction_; if (!status.ok()) { m->done = true; } if (!m->done) { // We only compacted part of the requested range. Update *m // to the range that is left to be compacted. m->tmp_storage = manual_end; m->begin = &m->tmp_storage; } manual_compaction_ = nullptr; } } void DBImpl::CleanupCompaction(CompactionState* compact) { mutex_.AssertHeld(); if (compact->builder != nullptr) { // May happen if we get a shutdown call in the middle of compaction compact->builder->Abandon(); delete compact->builder; } else { assert(compact->outfile == nullptr); } delete compact->outfile; for (size_t i = 0; i < compact->outputs.size(); i++) { const CompactionState::Output& out = compact->outputs[i]; pending_outputs_.erase(out.number); } delete compact; } Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) { assert(compact != nullptr); assert(compact->builder == nullptr); uint64_t file_number; { mutex_.Lock(); file_number = versions_->NewFileNumber(); pending_outputs_.insert(file_number); CompactionState::Output out; out.number = file_number; out.smallest.Clear(); out.largest.Clear(); compact->outputs.push_back(out); mutex_.Unlock(); } // Make the output file std::string fname = TableFileName(dbname_, file_number); Status s = env_->NewWritableFile(fname, &compact->outfile); if (s.ok()) { compact->builder = new TableBuilder(options_, compact->outfile); } return s; } Status DBImpl::FinishCompactionOutputFile(CompactionState* compact, Iterator* input) { assert(compact != nullptr); assert(compact->outfile != nullptr); assert(compact->builder != nullptr); const uint64_t output_number = compact->current_output()->number; assert(output_number != 0); // Check for iterator errors Status s = input->status(); const uint64_t current_entries = compact->builder->NumEntries(); if (s.ok()) { s = compact->builder->Finish(); } else { compact->builder->Abandon(); } const uint64_t current_bytes = compact->builder->FileSize(); compact->current_output()->file_size = current_bytes; compact->total_bytes += current_bytes; delete compact->builder; compact->builder = nullptr; // Finish and check for file errors if (s.ok()) { s = compact->outfile->Sync(); } if (s.ok()) { s = compact->outfile->Close(); } delete compact->outfile; compact->outfile = nullptr; if (s.ok() && current_entries > 0) { // Verify that the table is usable Iterator* iter = table_cache_->NewIterator(ReadOptions(), output_number, current_bytes); s = iter->status(); delete iter; if (s.ok()) { Log(options_.info_log, "Generated table #%llu@%d: %lld keys, %lld bytes", (unsigned long long)output_number, compact->compaction->level(), (unsigned long long)current_entries, (unsigned long long)current_bytes); } } return s; } Status DBImpl::InstallCompactionResults(CompactionState* compact) { mutex_.AssertHeld(); Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes", compact->compaction->num_input_files(0), compact->compaction->level(), compact->compaction->num_input_files(1), compact->compaction->level() + 1, static_cast<long long>(compact->total_bytes)); // Add compaction outputs compact->compaction->AddInputDeletions(compact->compaction->edit()); const int level = compact->compaction->level(); for (size_t i = 0; i < compact->outputs.size(); i++) { const CompactionState::Output& out = compact->outputs[i]; compact->compaction->edit()->AddFile(level + 1, out.number, out.file_size, out.smallest, out.largest); } return versions_->LogAndApply(compact->compaction->edit(), &mutex_); } Status DBImpl::DoCompactionWork(CompactionState* compact) { const uint64_t start_micros = env_->NowMicros(); int64_t imm_micros = 0; // Micros spent doing imm_ compactions Log(options_.info_log, "Compacting %d@%d + %d@%d files", compact->compaction->num_input_files(0), compact->compaction->level(), compact->compaction->num_input_files(1), compact->compaction->level() + 1); assert(versions_->NumLevelFiles(compact->compaction->level()) > 0); assert(compact->builder == nullptr); assert(compact->outfile == nullptr); if (snapshots_.empty()) { compact->smallest_snapshot = versions_->LastSequence(); } else { compact->smallest_snapshot = snapshots_.oldest()->sequence_number(); } Iterator* input = versions_->MakeInputIterator(compact->compaction); // Release mutex while we're actually doing the compaction work mutex_.Unlock(); input->SeekToFirst(); Status status; ParsedInternalKey ikey; std::string current_user_key; bool has_current_user_key = false; SequenceNumber last_sequence_for_key = kMaxSequenceNumber; while (input->Valid() && !shutting_down_.load(std::memory_order_acquire)) { // Prioritize immutable compaction work if (has_imm_.load(std::memory_order_relaxed)) { const uint64_t imm_start = env_->NowMicros(); mutex_.Lock(); if (imm_ != nullptr) { CompactMemTable(); // Wake up MakeRoomForWrite() if necessary. background_work_finished_signal_.SignalAll(); } mutex_.Unlock(); imm_micros += (env_->NowMicros() - imm_start); } Slice key = input->key(); if (compact->compaction->ShouldStopBefore(key) && compact->builder != nullptr) { status = FinishCompactionOutputFile(compact, input); if (!status.ok()) { break; } } // Handle key/value, add to state, etc. bool drop = false; if (!ParseInternalKey(key, &ikey)) { // Do not hide error keys current_user_key.clear(); has_current_user_key = false; last_sequence_for_key = kMaxSequenceNumber; } else { if (!has_current_user_key || user_comparator()->Compare(ikey.user_key, Slice(current_user_key)) != 0) { // First occurrence of this user key current_user_key.assign(ikey.user_key.data(), ikey.user_key.size()); has_current_user_key = true; last_sequence_for_key = kMaxSequenceNumber; } if (last_sequence_for_key <= compact->smallest_snapshot) { // Hidden by an newer entry for same user key drop = true; // (A) } else if (ikey.type == kTypeDeletion && ikey.sequence <= compact->smallest_snapshot && compact->compaction->IsBaseLevelForKey(ikey.user_key)) { // For this user key: // (1) there is no data in higher levels // (2) data in lower levels will have larger sequence numbers // (3) data in layers that are being compacted here and have // smaller sequence numbers will be dropped in the next // few iterations of this loop (by rule (A) above). // Therefore this deletion marker is obsolete and can be dropped. drop = true; } last_sequence_for_key = ikey.sequence; } #if 0 Log(options_.info_log, " Compact: %s, seq %d, type: %d %d, drop: %d, is_base: %d, " "%d smallest_snapshot: %d", ikey.user_key.ToString().c_str(), (int)ikey.sequence, ikey.type, kTypeValue, drop, compact->compaction->IsBaseLevelForKey(ikey.user_key), (int)last_sequence_for_key, (int)compact->smallest_snapshot); #endif if (!drop) { // Open output file if necessary if (compact->builder == nullptr) { status = OpenCompactionOutputFile(compact); if (!status.ok()) { break; } } if (compact->builder->NumEntries() == 0) { compact->current_output()->smallest.DecodeFrom(key); } compact->current_output()->largest.DecodeFrom(key); compact->builder->Add(key, input->value()); // Close output file if it is big enough if (compact->builder->FileSize() >= compact->compaction->MaxOutputFileSize()) { status = FinishCompactionOutputFile(compact, input); if (!status.ok()) { break; } } } input->Next(); } if (status.ok() && shutting_down_.load(std::memory_order_acquire)) { status = Status::IOError("Deleting DB during compaction"); } if (status.ok() && compact->builder != nullptr) { status = FinishCompactionOutputFile(compact, input); } if (status.ok()) { status = input->status(); } delete input; input = nullptr; CompactionStats stats; stats.micros = env_->NowMicros() - start_micros - imm_micros; for (int which = 0; which < 2; which++) { for (int i = 0; i < compact->compaction->num_input_files(which); i++) { stats.bytes_read += compact->compaction->input(which, i)->file_size; } } for (size_t i = 0; i < compact->outputs.size(); i++) { stats.bytes_written += compact->outputs[i].file_size; } mutex_.Lock(); stats_[compact->compaction->level() + 1].Add(stats); if (status.ok()) { status = InstallCompactionResults(compact); } if (!status.ok()) { RecordBackgroundError(status); } VersionSet::LevelSummaryStorage tmp; Log(options_.info_log, "compacted to: %s", versions_->LevelSummary(&tmp)); return status; } namespace { struct IterState { port::Mutex* const mu; Version* const version GUARDED_BY(mu); MemTable* const mem GUARDED_BY(mu); MemTable* const imm GUARDED_BY(mu); IterState(port::Mutex* mutex, MemTable* mem, MemTable* imm, Version* version) : mu(mutex), version(version), mem(mem), imm(imm) {} }; static void CleanupIteratorState(void* arg1, void* arg2) { IterState* state = reinterpret_cast<IterState*>(arg1); state->mu->Lock(); state->mem->Unref(); if (state->imm != nullptr) state->imm->Unref(); state->version->Unref(); state->mu->Unlock(); delete state; } } // anonymous namespace Iterator* DBImpl::NewInternalIterator(const ReadOptions& options, SequenceNumber* latest_snapshot, uint32_t* seed) { mutex_.Lock(); *latest_snapshot = versions_->LastSequence(); // Collect together all needed child iterators std::vector<Iterator*> list; list.push_back(mem_->NewIterator()); mem_->Ref(); if (imm_ != nullptr) { list.push_back(imm_->NewIterator()); imm_->Ref(); } versions_->current()->AddIterators(options, &list); Iterator* internal_iter = NewMergingIterator(&internal_comparator_, &list[0], list.size()); versions_->current()->Ref(); IterState* cleanup = new IterState(&mutex_, mem_, imm_, versions_->current()); internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr); *seed = ++seed_; mutex_.Unlock(); return internal_iter; } Iterator* DBImpl::TEST_NewInternalIterator() { SequenceNumber ignored; uint32_t ignored_seed; return NewInternalIterator(ReadOptions(), &ignored, &ignored_seed); } int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() { MutexLock l(&mutex_); return versions_->MaxNextLevelOverlappingBytes(); } Status DBImpl::Get(const ReadOptions& options, const Slice& key, std::string* value) { Status s; MutexLock l(&mutex_); SequenceNumber snapshot; if (options.snapshot != nullptr) { snapshot = static_cast<const SnapshotImpl*>(options.snapshot)->sequence_number(); } else { snapshot = versions_->LastSequence(); } MemTable* mem = mem_; MemTable* imm = imm_; Version* current = versions_->current(); mem->Ref(); if (imm != nullptr) imm->Ref(); current->Ref(); bool have_stat_update = false; Version::GetStats stats; // Unlock while reading from files and memtables { mutex_.Unlock(); // First look in the memtable, then in the immutable memtable (if any). LookupKey lkey(key, snapshot); if (mem->Get(lkey, value, &s)) { // Done } else if (imm != nullptr && imm->Get(lkey, value, &s)) { // Done } else { s = current->Get(options, lkey, value, &stats); have_stat_update = true; } mutex_.Lock(); } if (have_stat_update && current->UpdateStats(stats)) { MaybeScheduleCompaction(); } mem->Unref(); if (imm != nullptr) imm->Unref(); current->Unref(); return s; } Iterator* DBImpl::NewIterator(const ReadOptions& options) { SequenceNumber latest_snapshot; uint32_t seed; Iterator* iter = NewInternalIterator(options, &latest_snapshot, &seed); return NewDBIterator(this, user_comparator(), iter, (options.snapshot != nullptr ? static_cast<const SnapshotImpl*>(options.snapshot) ->sequence_number() : latest_snapshot), seed); } void DBImpl::RecordReadSample(Slice key) { MutexLock l(&mutex_); if (versions_->current()->RecordReadSample(key)) { MaybeScheduleCompaction(); } } const Snapshot* DBImpl::GetSnapshot() { MutexLock l(&mutex_); return snapshots_.New(versions_->LastSequence()); } void DBImpl::ReleaseSnapshot(const Snapshot* snapshot) { MutexLock l(&mutex_); snapshots_.Delete(static_cast<const SnapshotImpl*>(snapshot)); } // Convenience methods Status DBImpl::Put(const WriteOptions& o, const Slice& key, const Slice& val) { return DB::Put(o, key, val); } Status DBImpl::Delete(const WriteOptions& options, const Slice& key) { return DB::Delete(options, key); } Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) { Writer w(&mutex_); w.batch = updates; w.sync = options.sync; w.done = false; MutexLock l(&mutex_); writers_.push_back(&w); while (!w.done && &w != writers_.front()) { w.cv.Wait(); } if (w.done) { return w.status; } // May temporarily unlock and wait. Status status = MakeRoomForWrite(updates == nullptr); uint64_t last_sequence = versions_->LastSequence(); Writer* last_writer = &w; if (status.ok() && updates != nullptr) { // nullptr batch is for compactions WriteBatch* write_batch = BuildBatchGroup(&last_writer); WriteBatchInternal::SetSequence(write_batch, last_sequence + 1); last_sequence += WriteBatchInternal::Count(write_batch); // Add to log and apply to memtable. We can release the lock // during this phase since &w is currently responsible for logging // and protects against concurrent loggers and concurrent writes // into mem_. { mutex_.Unlock(); status = log_->AddRecord(WriteBatchInternal::Contents(write_batch)); bool sync_error = false; if (status.ok() && options.sync) { status = logfile_->Sync(); if (!status.ok()) { sync_error = true; } } if (status.ok()) { status = WriteBatchInternal::InsertInto(write_batch, mem_); } mutex_.Lock(); if (sync_error) { // The state of the log file is indeterminate: the log record we // just added may or may not show up when the DB is re-opened. // So we force the DB into a mode where all future writes fail. RecordBackgroundError(status); } } if (write_batch == tmp_batch_) tmp_batch_->Clear(); versions_->SetLastSequence(last_sequence); } while (true) { Writer* ready = writers_.front(); writers_.pop_front(); if (ready != &w) { ready->status = status; ready->done = true; ready->cv.Signal(); } if (ready == last_writer) break; } // Notify new head of write queue if (!writers_.empty()) { writers_.front()->cv.Signal(); } return status; } // REQUIRES: Writer list must be non-empty // REQUIRES: First writer must have a non-null batch WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) { mutex_.AssertHeld(); assert(!writers_.empty()); Writer* first = writers_.front(); WriteBatch* result = first->batch; assert(result != nullptr); size_t size = WriteBatchInternal::ByteSize(first->batch); // Allow the group to grow up to a maximum size, but if the // original write is small, limit the growth so we do not slow // down the small write too much. size_t max_size = 1 << 20; if (size <= (128 << 10)) { max_size = size + (128 << 10); } *last_writer = first; std::deque<Writer*>::iterator iter = writers_.begin(); ++iter; // Advance past "first" for (; iter != writers_.end(); ++iter) { Writer* w = *iter; if (w->sync && !first->sync) { // Do not include a sync write into a batch handled by a non-sync write. break; } if (w->batch != nullptr) { size += WriteBatchInternal::ByteSize(w->batch); if (size > max_size) { // Do not make batch too big break; } // Append to *result if (result == first->batch) { // Switch to temporary batch instead of disturbing caller's batch result = tmp_batch_; assert(WriteBatchInternal::Count(result) == 0); WriteBatchInternal::Append(result, first->batch); } WriteBatchInternal::Append(result, w->batch); } *last_writer = w; } return result; } // REQUIRES: mutex_ is held // REQUIRES: this thread is currently at the front of the writer queue Status DBImpl::MakeRoomForWrite(bool force) { mutex_.AssertHeld(); assert(!writers_.empty()); bool allow_delay = !force; Status s; while (true) { if (!bg_error_.ok()) { // Yield previous error s = bg_error_; break; } else if (allow_delay && versions_->NumLevelFiles(0) >= config::kL0_SlowdownWritesTrigger) { // We are getting close to hitting a hard limit on the number of // L0 files. Rather than delaying a single write by several // seconds when we hit the hard limit, start delaying each // individual write by 1ms to reduce latency variance. Also, // this delay hands over some CPU to the compaction thread in // case it is sharing the same core as the writer. mutex_.Unlock(); env_->SleepForMicroseconds(1000); allow_delay = false; // Do not delay a single write more than once mutex_.Lock(); } else if (!force && (mem_->ApproximateMemoryUsage() <= options_.write_buffer_size)) { // There is room in current memtable break; } else if (imm_ != nullptr) { // We have filled up the current memtable, but the previous // one is still being compacted, so we wait. Log(options_.info_log, "Current memtable full; waiting...\n"); background_work_finished_signal_.Wait(); } else if (versions_->NumLevelFiles(0) >= config::kL0_StopWritesTrigger) { // There are too many level-0 files. Log(options_.info_log, "Too many L0 files; waiting...\n"); background_work_finished_signal_.Wait(); } else { // Attempt to switch to a new memtable and trigger compaction of old assert(versions_->PrevLogNumber() == 0); uint64_t new_log_number = versions_->NewFileNumber(); WritableFile* lfile = nullptr; s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile); if (!s.ok()) { // Avoid chewing through file number space in a tight loop. versions_->ReuseFileNumber(new_log_number); break; } delete log_; delete logfile_; logfile_ = lfile; logfile_number_ = new_log_number; log_ = new log::Writer(lfile); imm_ = mem_; has_imm_.store(true, std::memory_order_release); mem_ = new MemTable(internal_comparator_); mem_->Ref(); force = false; // Do not force another compaction if have room MaybeScheduleCompaction(); } } return s; } bool DBImpl::GetProperty(const Slice& property, std::string* value) { value->clear(); MutexLock l(&mutex_); Slice in = property; Slice prefix("leveldb."); if (!in.starts_with(prefix)) return false; in.remove_prefix(prefix.size()); if (in.starts_with("num-files-at-level")) { in.remove_prefix(strlen("num-files-at-level")); uint64_t level; bool ok = ConsumeDecimalNumber(&in, &level) && in.empty(); if (!ok || level >= config::kNumLevels) { return false; } else { char buf[100]; snprintf(buf, sizeof(buf), "%d", versions_->NumLevelFiles(static_cast<int>(level))); *value = buf; return true; } } else if (in == "stats") { char buf[200]; snprintf(buf, sizeof(buf), " Compactions\n" "Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n" "--------------------------------------------------\n"); value->append(buf); for (int level = 0; level < config::kNumLevels; level++) { int files = versions_->NumLevelFiles(level); if (stats_[level].micros > 0 || files > 0) { snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n", level, files, versions_->NumLevelBytes(level) / 1048576.0, stats_[level].micros / 1e6, stats_[level].bytes_read / 1048576.0, stats_[level].bytes_written / 1048576.0); value->append(buf); } } return true; } else if (in == "sstables") { *value = versions_->current()->DebugString(); return true; } else if (in == "approximate-memory-usage") { size_t total_usage = options_.block_cache->TotalCharge(); if (mem_) { total_usage += mem_->ApproximateMemoryUsage(); } if (imm_) { total_usage += imm_->ApproximateMemoryUsage(); } char buf[50]; snprintf(buf, sizeof(buf), "%llu", static_cast<unsigned long long>(total_usage)); value->append(buf); return true; } return false; } void DBImpl::GetApproximateSizes(const Range* range, int n, uint64_t* sizes) { // TODO(opt): better implementation MutexLock l(&mutex_); Version* v = versions_->current(); v->Ref(); for (int i = 0; i < n; i++) { // Convert user_key into a corresponding internal key. InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek); InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek); uint64_t start = versions_->ApproximateOffsetOf(v, k1); uint64_t limit = versions_->ApproximateOffsetOf(v, k2); sizes[i] = (limit >= start ? limit - start : 0); } v->Unref(); } // Default implementations of convenience methods that subclasses of DB // can call if they wish Status DB::Put(const WriteOptions& opt, const Slice& key, const Slice& value) { WriteBatch batch; batch.Put(key, value); return Write(opt, &batch); } Status DB::Delete(const WriteOptions& opt, const Slice& key) { WriteBatch batch; batch.Delete(key); return Write(opt, &batch); } DB::~DB() = default; Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) { *dbptr = nullptr; DBImpl* impl = new DBImpl(options, dbname); impl->mutex_.Lock(); VersionEdit edit; // Recover handles create_if_missing, error_if_exists bool save_manifest = false; Status s = impl->Recover(&edit, &save_manifest); if (s.ok() && impl->mem_ == nullptr) { // Create new log and a corresponding memtable. uint64_t new_log_number = impl->versions_->NewFileNumber(); WritableFile* lfile; s = options.env->NewWritableFile(LogFileName(dbname, new_log_number), &lfile); if (s.ok()) { edit.SetLogNumber(new_log_number); impl->logfile_ = lfile; impl->logfile_number_ = new_log_number; impl->log_ = new log::Writer(lfile); impl->mem_ = new MemTable(impl->internal_comparator_); impl->mem_->Ref(); } } if (s.ok() && save_manifest) { edit.SetPrevLogNumber(0); // No older logs needed after recovery. edit.SetLogNumber(impl->logfile_number_); s = impl->versions_->LogAndApply(&edit, &impl->mutex_); } if (s.ok()) { impl->DeleteObsoleteFiles(); impl->MaybeScheduleCompaction(); } impl->mutex_.Unlock(); if (s.ok()) { assert(impl->mem_ != nullptr); *dbptr = impl; } else { delete impl; } return s; } Snapshot::~Snapshot() = default; Status DestroyDB(const std::string& dbname, const Options& options) { Env* env = options.env; std::vector<std::string> filenames; Status result = env->GetChildren(dbname, &filenames); if (!result.ok()) { // Ignore error in case directory does not exist return Status::OK(); } FileLock* lock; const std::string lockname = LockFileName(dbname); result = env->LockFile(lockname, &lock); if (result.ok()) { uint64_t number; FileType type; for (size_t i = 0; i < filenames.size(); i++) { if (ParseFileName(filenames[i], &number, &type) && type != kDBLockFile) { // Lock file will be deleted at end Status del = env->DeleteFile(dbname + "/" + filenames[i]); if (result.ok() && !del.ok()) { result = del; } } } env->UnlockFile(lock); // Ignore error since state is already gone env->DeleteFile(lockname); env->DeleteDir(dbname); // Ignore error in case dir contains other files } return result; } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/filename.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/filename.h" #include <ctype.h> #include <stdio.h> #include "db/dbformat.h" #include "leveldb/env.h" #include "util/logging.h" namespace leveldb { // A utility routine: write "data" to the named file and Sync() it. Status WriteStringToFileSync(Env* env, const Slice& data, const std::string& fname); static std::string MakeFileName(const std::string& dbname, uint64_t number, const char* suffix) { char buf[100]; snprintf(buf, sizeof(buf), "/%06llu.%s", static_cast<unsigned long long>(number), suffix); return dbname + buf; } std::string LogFileName(const std::string& dbname, uint64_t number) { assert(number > 0); return MakeFileName(dbname, number, "log"); } std::string TableFileName(const std::string& dbname, uint64_t number) { assert(number > 0); return MakeFileName(dbname, number, "ldb"); } std::string SSTTableFileName(const std::string& dbname, uint64_t number) { assert(number > 0); return MakeFileName(dbname, number, "sst"); } std::string DescriptorFileName(const std::string& dbname, uint64_t number) { assert(number > 0); char buf[100]; snprintf(buf, sizeof(buf), "/MANIFEST-%06llu", static_cast<unsigned long long>(number)); return dbname + buf; } std::string CurrentFileName(const std::string& dbname) { return dbname + "/CURRENT"; } std::string LockFileName(const std::string& dbname) { return dbname + "/LOCK"; } std::string TempFileName(const std::string& dbname, uint64_t number) { assert(number > 0); return MakeFileName(dbname, number, "dbtmp"); } std::string InfoLogFileName(const std::string& dbname) { return dbname + "/LOG"; } // Return the name of the old info log file for "dbname". std::string OldInfoLogFileName(const std::string& dbname) { return dbname + "/LOG.old"; } // Owned filenames have the form: // dbname/CURRENT // dbname/LOCK // dbname/LOG // dbname/LOG.old // dbname/MANIFEST-[0-9]+ // dbname/[0-9]+.(log|sst|ldb) bool ParseFileName(const std::string& filename, uint64_t* number, FileType* type) { Slice rest(filename); if (rest == "CURRENT") { *number = 0; *type = kCurrentFile; } else if (rest == "LOCK") { *number = 0; *type = kDBLockFile; } else if (rest == "LOG" || rest == "LOG.old") { *number = 0; *type = kInfoLogFile; } else if (rest.starts_with("MANIFEST-")) { rest.remove_prefix(strlen("MANIFEST-")); uint64_t num; if (!ConsumeDecimalNumber(&rest, &num)) { return false; } if (!rest.empty()) { return false; } *type = kDescriptorFile; *number = num; } else { // Avoid strtoull() to keep filename format independent of the // current locale uint64_t num; if (!ConsumeDecimalNumber(&rest, &num)) { return false; } Slice suffix = rest; if (suffix == Slice(".log")) { *type = kLogFile; } else if (suffix == Slice(".sst") || suffix == Slice(".ldb")) { *type = kTableFile; } else if (suffix == Slice(".dbtmp")) { *type = kTempFile; } else { return false; } *number = num; } return true; } Status SetCurrentFile(Env* env, const std::string& dbname, uint64_t descriptor_number) { // Remove leading "dbname/" and add newline to manifest file name std::string manifest = DescriptorFileName(dbname, descriptor_number); Slice contents = manifest; assert(contents.starts_with(dbname + "/")); contents.remove_prefix(dbname.size() + 1); std::string tmp = TempFileName(dbname, descriptor_number); Status s = WriteStringToFileSync(env, contents.ToString() + "\n", tmp); if (s.ok()) { s = env->RenameFile(tmp, CurrentFileName(dbname)); } if (!s.ok()) { env->DeleteFile(tmp); } return s; } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/fault_injection_test.cc
// Copyright 2014 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // This test uses a custom Env to keep track of the state of a filesystem as of // the last "sync". It then checks for data loss errors by purposely dropping // file data (or entire files) not protected by a "sync". #include <map> #include <set> #include "db/db_impl.h" #include "db/filename.h" #include "db/log_format.h" #include "db/version_set.h" #include "leveldb/cache.h" #include "leveldb/db.h" #include "leveldb/env.h" #include "leveldb/table.h" #include "leveldb/write_batch.h" #include "port/port.h" #include "port/thread_annotations.h" #include "util/logging.h" #include "util/mutexlock.h" #include "util/testharness.h" #include "util/testutil.h" namespace leveldb { static const int kValueSize = 1000; static const int kMaxNumValues = 2000; static const size_t kNumIterations = 3; class FaultInjectionTestEnv; namespace { // Assume a filename, and not a directory name like "/foo/bar/" static std::string GetDirName(const std::string& filename) { size_t found = filename.find_last_of("/\\"); if (found == std::string::npos) { return ""; } else { return filename.substr(0, found); } } Status SyncDir(const std::string& dir) { // As this is a test it isn't required to *actually* sync this directory. return Status::OK(); } // A basic file truncation function suitable for this test. Status Truncate(const std::string& filename, uint64_t length) { leveldb::Env* env = leveldb::Env::Default(); SequentialFile* orig_file; Status s = env->NewSequentialFile(filename, &orig_file); if (!s.ok()) return s; char* scratch = new char[length]; leveldb::Slice result; s = orig_file->Read(length, &result, scratch); delete orig_file; if (s.ok()) { std::string tmp_name = GetDirName(filename) + "/truncate.tmp"; WritableFile* tmp_file; s = env->NewWritableFile(tmp_name, &tmp_file); if (s.ok()) { s = tmp_file->Append(result); delete tmp_file; if (s.ok()) { s = env->RenameFile(tmp_name, filename); } else { env->DeleteFile(tmp_name); } } } delete[] scratch; return s; } struct FileState { std::string filename_; int64_t pos_; int64_t pos_at_last_sync_; int64_t pos_at_last_flush_; FileState(const std::string& filename) : filename_(filename), pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {} FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {} bool IsFullySynced() const { return pos_ <= 0 || pos_ == pos_at_last_sync_; } Status DropUnsyncedData() const; }; } // anonymous namespace // A wrapper around WritableFile which informs another Env whenever this file // is written to or sync'ed. class TestWritableFile : public WritableFile { public: TestWritableFile(const FileState& state, WritableFile* f, FaultInjectionTestEnv* env); ~TestWritableFile() override; Status Append(const Slice& data) override; Status Close() override; Status Flush() override; Status Sync() override; std::string GetName() const override { return ""; } private: FileState state_; WritableFile* target_; bool writable_file_opened_; FaultInjectionTestEnv* env_; Status SyncParent(); }; class FaultInjectionTestEnv : public EnvWrapper { public: FaultInjectionTestEnv() : EnvWrapper(Env::Default()), filesystem_active_(true) {} ~FaultInjectionTestEnv() override = default; Status NewWritableFile(const std::string& fname, WritableFile** result) override; Status NewAppendableFile(const std::string& fname, WritableFile** result) override; Status DeleteFile(const std::string& f) override; Status RenameFile(const std::string& s, const std::string& t) override; void WritableFileClosed(const FileState& state); Status DropUnsyncedFileData(); Status DeleteFilesCreatedAfterLastDirSync(); void DirWasSynced(); bool IsFileCreatedSinceLastDirSync(const std::string& filename); void ResetState(); void UntrackFile(const std::string& f); // Setting the filesystem to inactive is the test equivalent to simulating a // system reset. Setting to inactive will freeze our saved filesystem state so // that it will stop being recorded. It can then be reset back to the state at // the time of the reset. bool IsFilesystemActive() LOCKS_EXCLUDED(mutex_) { MutexLock l(&mutex_); return filesystem_active_; } void SetFilesystemActive(bool active) LOCKS_EXCLUDED(mutex_) { MutexLock l(&mutex_); filesystem_active_ = active; } private: port::Mutex mutex_; std::map<std::string, FileState> db_file_state_ GUARDED_BY(mutex_); std::set<std::string> new_files_since_last_dir_sync_ GUARDED_BY(mutex_); bool filesystem_active_ GUARDED_BY(mutex_); // Record flushes, syncs, writes }; TestWritableFile::TestWritableFile(const FileState& state, WritableFile* f, FaultInjectionTestEnv* env) : state_(state), target_(f), writable_file_opened_(true), env_(env) { assert(f != nullptr); } TestWritableFile::~TestWritableFile() { if (writable_file_opened_) { Close(); } delete target_; } Status TestWritableFile::Append(const Slice& data) { Status s = target_->Append(data); if (s.ok() && env_->IsFilesystemActive()) { state_.pos_ += data.size(); } return s; } Status TestWritableFile::Close() { writable_file_opened_ = false; Status s = target_->Close(); if (s.ok()) { env_->WritableFileClosed(state_); } return s; } Status TestWritableFile::Flush() { Status s = target_->Flush(); if (s.ok() && env_->IsFilesystemActive()) { state_.pos_at_last_flush_ = state_.pos_; } return s; } Status TestWritableFile::SyncParent() { Status s = SyncDir(GetDirName(state_.filename_)); if (s.ok()) { env_->DirWasSynced(); } return s; } Status TestWritableFile::Sync() { if (!env_->IsFilesystemActive()) { return Status::OK(); } // Ensure new files referred to by the manifest are in the filesystem. Status s = target_->Sync(); if (s.ok()) { state_.pos_at_last_sync_ = state_.pos_; } if (env_->IsFileCreatedSinceLastDirSync(state_.filename_)) { Status ps = SyncParent(); if (s.ok() && !ps.ok()) { s = ps; } } return s; } Status FaultInjectionTestEnv::NewWritableFile(const std::string& fname, WritableFile** result) { WritableFile* actual_writable_file; Status s = target()->NewWritableFile(fname, &actual_writable_file); if (s.ok()) { FileState state(fname); state.pos_ = 0; *result = new TestWritableFile(state, actual_writable_file, this); // NewWritableFile doesn't append to files, so if the same file is // opened again then it will be truncated - so forget our saved // state. UntrackFile(fname); MutexLock l(&mutex_); new_files_since_last_dir_sync_.insert(fname); } return s; } Status FaultInjectionTestEnv::NewAppendableFile(const std::string& fname, WritableFile** result) { WritableFile* actual_writable_file; Status s = target()->NewAppendableFile(fname, &actual_writable_file); if (s.ok()) { FileState state(fname); state.pos_ = 0; { MutexLock l(&mutex_); if (db_file_state_.count(fname) == 0) { new_files_since_last_dir_sync_.insert(fname); } else { state = db_file_state_[fname]; } } *result = new TestWritableFile(state, actual_writable_file, this); } return s; } Status FaultInjectionTestEnv::DropUnsyncedFileData() { Status s; MutexLock l(&mutex_); for (const auto& kvp : db_file_state_) { if (!s.ok()) { break; } const FileState& state = kvp.second; if (!state.IsFullySynced()) { s = state.DropUnsyncedData(); } } return s; } void FaultInjectionTestEnv::DirWasSynced() { MutexLock l(&mutex_); new_files_since_last_dir_sync_.clear(); } bool FaultInjectionTestEnv::IsFileCreatedSinceLastDirSync( const std::string& filename) { MutexLock l(&mutex_); return new_files_since_last_dir_sync_.find(filename) != new_files_since_last_dir_sync_.end(); } void FaultInjectionTestEnv::UntrackFile(const std::string& f) { MutexLock l(&mutex_); db_file_state_.erase(f); new_files_since_last_dir_sync_.erase(f); } Status FaultInjectionTestEnv::DeleteFile(const std::string& f) { Status s = EnvWrapper::DeleteFile(f); ASSERT_OK(s); if (s.ok()) { UntrackFile(f); } return s; } Status FaultInjectionTestEnv::RenameFile(const std::string& s, const std::string& t) { Status ret = EnvWrapper::RenameFile(s, t); if (ret.ok()) { MutexLock l(&mutex_); if (db_file_state_.find(s) != db_file_state_.end()) { db_file_state_[t] = db_file_state_[s]; db_file_state_.erase(s); } if (new_files_since_last_dir_sync_.erase(s) != 0) { assert(new_files_since_last_dir_sync_.find(t) == new_files_since_last_dir_sync_.end()); new_files_since_last_dir_sync_.insert(t); } } return ret; } void FaultInjectionTestEnv::ResetState() { // Since we are not destroying the database, the existing files // should keep their recorded synced/flushed state. Therefore // we do not reset db_file_state_ and new_files_since_last_dir_sync_. SetFilesystemActive(true); } Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() { // Because DeleteFile access this container make a copy to avoid deadlock mutex_.Lock(); std::set<std::string> new_files(new_files_since_last_dir_sync_.begin(), new_files_since_last_dir_sync_.end()); mutex_.Unlock(); Status status; for (const auto& new_file : new_files) { Status delete_status = DeleteFile(new_file); if (!delete_status.ok() && status.ok()) { status = std::move(delete_status); } } return status; } void FaultInjectionTestEnv::WritableFileClosed(const FileState& state) { MutexLock l(&mutex_); db_file_state_[state.filename_] = state; } Status FileState::DropUnsyncedData() const { int64_t sync_pos = pos_at_last_sync_ == -1 ? 0 : pos_at_last_sync_; return Truncate(filename_, sync_pos); } class FaultInjectionTest { public: enum ExpectedVerifResult { VAL_EXPECT_NO_ERROR, VAL_EXPECT_ERROR }; enum ResetMethod { RESET_DROP_UNSYNCED_DATA, RESET_DELETE_UNSYNCED_FILES }; FaultInjectionTestEnv* env_; std::string dbname_; Cache* tiny_cache_; Options options_; DB* db_; FaultInjectionTest() : env_(new FaultInjectionTestEnv), tiny_cache_(NewLRUCache(100)), db_(nullptr) { dbname_ = test::TmpDir() + "/fault_test"; DestroyDB(dbname_, Options()); // Destroy any db from earlier run options_.reuse_logs = true; options_.env = env_; options_.paranoid_checks = true; options_.block_cache = tiny_cache_; options_.create_if_missing = true; } ~FaultInjectionTest() { CloseDB(); DestroyDB(dbname_, Options()); delete tiny_cache_; delete env_; } void ReuseLogs(bool reuse) { options_.reuse_logs = reuse; } void Build(int start_idx, int num_vals) { std::string key_space, value_space; WriteBatch batch; for (int i = start_idx; i < start_idx + num_vals; i++) { Slice key = Key(i, &key_space); batch.Clear(); batch.Put(key, Value(i, &value_space)); WriteOptions options; ASSERT_OK(db_->Write(options, &batch)); } } Status ReadValue(int i, std::string* val) const { std::string key_space, value_space; Slice key = Key(i, &key_space); Value(i, &value_space); ReadOptions options; return db_->Get(options, key, val); } Status Verify(int start_idx, int num_vals, ExpectedVerifResult expected) const { std::string val; std::string value_space; Status s; for (int i = start_idx; i < start_idx + num_vals && s.ok(); i++) { Value(i, &value_space); s = ReadValue(i, &val); if (expected == VAL_EXPECT_NO_ERROR) { if (s.ok()) { ASSERT_EQ(value_space, val); } } else if (s.ok()) { fprintf(stderr, "Expected an error at %d, but was OK\n", i); s = Status::IOError(dbname_, "Expected value error:"); } else { s = Status::OK(); // An expected error } } return s; } // Return the ith key Slice Key(int i, std::string* storage) const { char buf[100]; snprintf(buf, sizeof(buf), "%016d", i); storage->assign(buf, strlen(buf)); return Slice(*storage); } // Return the value to associate with the specified key Slice Value(int k, std::string* storage) const { Random r(k); return test::RandomString(&r, kValueSize, storage); } Status OpenDB() { delete db_; db_ = nullptr; env_->ResetState(); return DB::Open(options_, dbname_, &db_); } void CloseDB() { delete db_; db_ = nullptr; } void DeleteAllData() { Iterator* iter = db_->NewIterator(ReadOptions()); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ASSERT_OK(db_->Delete(WriteOptions(), iter->key())); } delete iter; } void ResetDBState(ResetMethod reset_method) { switch (reset_method) { case RESET_DROP_UNSYNCED_DATA: ASSERT_OK(env_->DropUnsyncedFileData()); break; case RESET_DELETE_UNSYNCED_FILES: ASSERT_OK(env_->DeleteFilesCreatedAfterLastDirSync()); break; default: assert(false); } } void PartialCompactTestPreFault(int num_pre_sync, int num_post_sync) { DeleteAllData(); Build(0, num_pre_sync); db_->CompactRange(nullptr, nullptr); Build(num_pre_sync, num_post_sync); } void PartialCompactTestReopenWithFault(ResetMethod reset_method, int num_pre_sync, int num_post_sync) { env_->SetFilesystemActive(false); CloseDB(); ResetDBState(reset_method); ASSERT_OK(OpenDB()); ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR)); ASSERT_OK(Verify(num_pre_sync, num_post_sync, FaultInjectionTest::VAL_EXPECT_ERROR)); } void NoWriteTestPreFault() {} void NoWriteTestReopenWithFault(ResetMethod reset_method) { CloseDB(); ResetDBState(reset_method); ASSERT_OK(OpenDB()); } void DoTest() { Random rnd(0); ASSERT_OK(OpenDB()); for (size_t idx = 0; idx < kNumIterations; idx++) { int num_pre_sync = rnd.Uniform(kMaxNumValues); int num_post_sync = rnd.Uniform(kMaxNumValues); PartialCompactTestPreFault(num_pre_sync, num_post_sync); PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA, num_pre_sync, num_post_sync); NoWriteTestPreFault(); NoWriteTestReopenWithFault(RESET_DROP_UNSYNCED_DATA); PartialCompactTestPreFault(num_pre_sync, num_post_sync); // No new files created so we expect all values since no files will be // dropped. PartialCompactTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES, num_pre_sync + num_post_sync, 0); NoWriteTestPreFault(); NoWriteTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES); } } }; TEST(FaultInjectionTest, FaultTestNoLogReuse) { ReuseLogs(false); DoTest(); } TEST(FaultInjectionTest, FaultTestWithLogReuse) { ReuseLogs(true); DoTest(); } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/skiplist.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_DB_SKIPLIST_H_ #define STORAGE_LEVELDB_DB_SKIPLIST_H_ // Thread safety // ------------- // // Writes require external synchronization, most likely a mutex. // Reads require a guarantee that the SkipList will not be destroyed // while the read is in progress. Apart from that, reads progress // without any internal locking or synchronization. // // Invariants: // // (1) Allocated nodes are never deleted until the SkipList is // destroyed. This is trivially guaranteed by the code since we // never delete any skip list nodes. // // (2) The contents of a Node except for the next/prev pointers are // immutable after the Node has been linked into the SkipList. // Only Insert() modifies the list, and it is careful to initialize // a node and use release-stores to publish the nodes in one or // more lists. // // ... prev vs. next pointer ordering ... #include <atomic> #include <cassert> #include <cstdlib> #include "util/arena.h" #include "util/random.h" namespace leveldb { class Arena; template <typename Key, class Comparator> class SkipList { private: struct Node; public: // Create a new SkipList object that will use "cmp" for comparing keys, // and will allocate memory using "*arena". Objects allocated in the arena // must remain allocated for the lifetime of the skiplist object. explicit SkipList(Comparator cmp, Arena* arena); SkipList(const SkipList&) = delete; SkipList& operator=(const SkipList&) = delete; // Insert key into the list. // REQUIRES: nothing that compares equal to key is currently in the list. void Insert(const Key& key); // Returns true iff an entry that compares equal to key is in the list. bool Contains(const Key& key) const; // Iteration over the contents of a skip list class Iterator { public: // Initialize an iterator over the specified list. // The returned iterator is not valid. explicit Iterator(const SkipList* list); // Returns true iff the iterator is positioned at a valid node. bool Valid() const; // Returns the key at the current position. // REQUIRES: Valid() const Key& key() const; // Advances to the next position. // REQUIRES: Valid() void Next(); // Advances to the previous position. // REQUIRES: Valid() void Prev(); // Advance to the first entry with a key >= target void Seek(const Key& target); // Position at the first entry in list. // Final state of iterator is Valid() iff list is not empty. void SeekToFirst(); // Position at the last entry in list. // Final state of iterator is Valid() iff list is not empty. void SeekToLast(); private: const SkipList* list_; Node* node_; // Intentionally copyable }; private: enum { kMaxHeight = 12 }; inline int GetMaxHeight() const { return max_height_.load(std::memory_order_relaxed); } Node* NewNode(const Key& key, int height); int RandomHeight(); bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); } // Return true if key is greater than the data stored in "n" bool KeyIsAfterNode(const Key& key, Node* n) const; // Return the earliest node that comes at or after key. // Return nullptr if there is no such node. // // If prev is non-null, fills prev[level] with pointer to previous // node at "level" for every level in [0..max_height_-1]. Node* FindGreaterOrEqual(const Key& key, Node** prev) const; // Return the latest node with a key < key. // Return head_ if there is no such node. Node* FindLessThan(const Key& key) const; // Return the last node in the list. // Return head_ if list is empty. Node* FindLast() const; // Immutable after construction Comparator const compare_; Arena* const arena_; // Arena used for allocations of nodes Node* const head_; // Modified only by Insert(). Read racily by readers, but stale // values are ok. std::atomic<int> max_height_; // Height of the entire list // Read/written only by Insert(). Random rnd_; }; // Implementation details follow template <typename Key, class Comparator> struct SkipList<Key, Comparator>::Node { explicit Node(const Key& k) : key(k) {} Key const key; // Accessors/mutators for links. Wrapped in methods so we can // add the appropriate barriers as necessary. Node* Next(int n) { assert(n >= 0); // Use an 'acquire load' so that we observe a fully initialized // version of the returned Node. return next_[n].load(std::memory_order_acquire); } void SetNext(int n, Node* x) { assert(n >= 0); // Use a 'release store' so that anybody who reads through this // pointer observes a fully initialized version of the inserted node. next_[n].store(x, std::memory_order_release); } // No-barrier variants that can be safely used in a few locations. Node* NoBarrier_Next(int n) { assert(n >= 0); return next_[n].load(std::memory_order_relaxed); } void NoBarrier_SetNext(int n, Node* x) { assert(n >= 0); next_[n].store(x, std::memory_order_relaxed); } private: // Array of length equal to the node height. next_[0] is lowest level link. std::atomic<Node*> next_[1]; }; template <typename Key, class Comparator> typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::NewNode( const Key& key, int height) { char* const node_memory = arena_->AllocateAligned( sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1)); return new (node_memory) Node(key); } template <typename Key, class Comparator> inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) { list_ = list; node_ = nullptr; } template <typename Key, class Comparator> inline bool SkipList<Key, Comparator>::Iterator::Valid() const { return node_ != nullptr; } template <typename Key, class Comparator> inline const Key& SkipList<Key, Comparator>::Iterator::key() const { assert(Valid()); return node_->key; } template <typename Key, class Comparator> inline void SkipList<Key, Comparator>::Iterator::Next() { assert(Valid()); node_ = node_->Next(0); } template <typename Key, class Comparator> inline void SkipList<Key, Comparator>::Iterator::Prev() { // Instead of using explicit "prev" links, we just search for the // last node that falls before key. assert(Valid()); node_ = list_->FindLessThan(node_->key); if (node_ == list_->head_) { node_ = nullptr; } } template <typename Key, class Comparator> inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) { node_ = list_->FindGreaterOrEqual(target, nullptr); } template <typename Key, class Comparator> inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() { node_ = list_->head_->Next(0); } template <typename Key, class Comparator> inline void SkipList<Key, Comparator>::Iterator::SeekToLast() { node_ = list_->FindLast(); if (node_ == list_->head_) { node_ = nullptr; } } template <typename Key, class Comparator> int SkipList<Key, Comparator>::RandomHeight() { // Increase height with probability 1 in kBranching static const unsigned int kBranching = 4; int height = 1; while (height < kMaxHeight && ((rnd_.Next() % kBranching) == 0)) { height++; } assert(height > 0); assert(height <= kMaxHeight); return height; } template <typename Key, class Comparator> bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const { // null n is considered infinite return (n != nullptr) && (compare_(n->key, key) < 0); } template <typename Key, class Comparator> typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key, Node** prev) const { Node* x = head_; int level = GetMaxHeight() - 1; while (true) { Node* next = x->Next(level); if (KeyIsAfterNode(key, next)) { // Keep searching in this list x = next; } else { if (prev != nullptr) prev[level] = x; if (level == 0) { return next; } else { // Switch to next list level--; } } } } template <typename Key, class Comparator> typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLessThan(const Key& key) const { Node* x = head_; int level = GetMaxHeight() - 1; while (true) { assert(x == head_ || compare_(x->key, key) < 0); Node* next = x->Next(level); if (next == nullptr || compare_(next->key, key) >= 0) { if (level == 0) { return x; } else { // Switch to next list level--; } } else { x = next; } } } template <typename Key, class Comparator> typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast() const { Node* x = head_; int level = GetMaxHeight() - 1; while (true) { Node* next = x->Next(level); if (next == nullptr) { if (level == 0) { return x; } else { // Switch to next list level--; } } else { x = next; } } } template <typename Key, class Comparator> SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena) : compare_(cmp), arena_(arena), head_(NewNode(0 /* any key will do */, kMaxHeight)), max_height_(1), rnd_(0xdeadbeef) { for (int i = 0; i < kMaxHeight; i++) { head_->SetNext(i, nullptr); } } template <typename Key, class Comparator> void SkipList<Key, Comparator>::Insert(const Key& key) { // TODO(opt): We can use a barrier-free variant of FindGreaterOrEqual() // here since Insert() is externally synchronized. Node* prev[kMaxHeight]; Node* x = FindGreaterOrEqual(key, prev); // Our data structure does not allow duplicate insertion assert(x == nullptr || !Equal(key, x->key)); int height = RandomHeight(); if (height > GetMaxHeight()) { for (int i = GetMaxHeight(); i < height; i++) { prev[i] = head_; } // It is ok to mutate max_height_ without any synchronization // with concurrent readers. A concurrent reader that observes // the new value of max_height_ will see either the old value of // new level pointers from head_ (nullptr), or a new value set in // the loop below. In the former case the reader will // immediately drop to the next level since nullptr sorts after all // keys. In the latter case the reader will use the new node. max_height_.store(height, std::memory_order_relaxed); } x = NewNode(key, height); for (int i = 0; i < height; i++) { // NoBarrier_SetNext() suffices since we will add a barrier when // we publish a pointer to "x" in prev[i]. x->NoBarrier_SetNext(i, prev[i]->NoBarrier_Next(i)); prev[i]->SetNext(i, x); } } template <typename Key, class Comparator> bool SkipList<Key, Comparator>::Contains(const Key& key) const { Node* x = FindGreaterOrEqual(key, nullptr); if (x != nullptr && Equal(key, x->key)) { return true; } else { return false; } } } // namespace leveldb #endif // STORAGE_LEVELDB_DB_SKIPLIST_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/dbformat.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_DB_DBFORMAT_H_ #define STORAGE_LEVELDB_DB_DBFORMAT_H_ #include <cstddef> #include <cstdint> #include <string> #include "leveldb/comparator.h" #include "leveldb/db.h" #include "leveldb/filter_policy.h" #include "leveldb/slice.h" #include "leveldb/table_builder.h" #include "util/coding.h" #include "util/logging.h" namespace leveldb { // Grouping of constants. We may want to make some of these // parameters set via options. namespace config { static const int kNumLevels = 7; // Level-0 compaction is started when we hit this many files. static const int kL0_CompactionTrigger = 4; // Soft limit on number of level-0 files. We slow down writes at this point. static const int kL0_SlowdownWritesTrigger = 8; // Maximum number of level-0 files. We stop writes at this point. static const int kL0_StopWritesTrigger = 12; // Maximum level to which a new compacted memtable is pushed if it // does not create overlap. We try to push to level 2 to avoid the // relatively expensive level 0=>1 compactions and to avoid some // expensive manifest file operations. We do not push all the way to // the largest level since that can generate a lot of wasted disk // space if the same key space is being repeatedly overwritten. static const int kMaxMemCompactLevel = 2; // Approximate gap in bytes between samples of data read during iteration. static const int kReadBytesPeriod = 1048576; } // namespace config class InternalKey; // Value types encoded as the last component of internal keys. // DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk // data structures. enum ValueType { kTypeDeletion = 0x0, kTypeValue = 0x1 }; // kValueTypeForSeek defines the ValueType that should be passed when // constructing a ParsedInternalKey object for seeking to a particular // sequence number (since we sort sequence numbers in decreasing order // and the value type is embedded as the low 8 bits in the sequence // number in internal keys, we need to use the highest-numbered // ValueType, not the lowest). static const ValueType kValueTypeForSeek = kTypeValue; typedef uint64_t SequenceNumber; // We leave eight bits empty at the bottom so a type and sequence# // can be packed together into 64-bits. static const SequenceNumber kMaxSequenceNumber = ((0x1ull << 56) - 1); struct ParsedInternalKey { Slice user_key; SequenceNumber sequence; ValueType type; ParsedInternalKey() {} // Intentionally left uninitialized (for speed) ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t) : user_key(u), sequence(seq), type(t) {} std::string DebugString() const; }; // Return the length of the encoding of "key". inline size_t InternalKeyEncodingLength(const ParsedInternalKey& key) { return key.user_key.size() + 8; } // Append the serialization of "key" to *result. void AppendInternalKey(std::string* result, const ParsedInternalKey& key); // Attempt to parse an internal key from "internal_key". On success, // stores the parsed data in "*result", and returns true. // // On error, returns false, leaves "*result" in an undefined state. bool ParseInternalKey(const Slice& internal_key, ParsedInternalKey* result); // Returns the user key portion of an internal key. inline Slice ExtractUserKey(const Slice& internal_key) { assert(internal_key.size() >= 8); return Slice(internal_key.data(), internal_key.size() - 8); } // A comparator for internal keys that uses a specified comparator for // the user key portion and breaks ties by decreasing sequence number. class InternalKeyComparator : public Comparator { private: const Comparator* user_comparator_; public: explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) {} const char* Name() const override; int Compare(const Slice& a, const Slice& b) const override; void FindShortestSeparator(std::string* start, const Slice& limit) const override; void FindShortSuccessor(std::string* key) const override; const Comparator* user_comparator() const { return user_comparator_; } int Compare(const InternalKey& a, const InternalKey& b) const; }; // Filter policy wrapper that converts from internal keys to user keys class InternalFilterPolicy : public FilterPolicy { private: const FilterPolicy* const user_policy_; public: explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) {} const char* Name() const override; void CreateFilter(const Slice* keys, int n, std::string* dst) const override; bool KeyMayMatch(const Slice& key, const Slice& filter) const override; }; // Modules in this directory should keep internal keys wrapped inside // the following class instead of plain strings so that we do not // incorrectly use string comparisons instead of an InternalKeyComparator. class InternalKey { private: std::string rep_; public: InternalKey() {} // Leave rep_ as empty to indicate it is invalid InternalKey(const Slice& user_key, SequenceNumber s, ValueType t) { AppendInternalKey(&rep_, ParsedInternalKey(user_key, s, t)); } bool DecodeFrom(const Slice& s) { rep_.assign(s.data(), s.size()); return !rep_.empty(); } Slice Encode() const { assert(!rep_.empty()); return rep_; } Slice user_key() const { return ExtractUserKey(rep_); } void SetFrom(const ParsedInternalKey& p) { rep_.clear(); AppendInternalKey(&rep_, p); } void Clear() { rep_.clear(); } std::string DebugString() const; }; inline int InternalKeyComparator::Compare(const InternalKey& a, const InternalKey& b) const { return Compare(a.Encode(), b.Encode()); } inline bool ParseInternalKey(const Slice& internal_key, ParsedInternalKey* result) { const size_t n = internal_key.size(); if (n < 8) return false; uint64_t num = DecodeFixed64(internal_key.data() + n - 8); uint8_t c = num & 0xff; result->sequence = num >> 8; result->type = static_cast<ValueType>(c); result->user_key = Slice(internal_key.data(), n - 8); return (c <= static_cast<uint8_t>(kTypeValue)); } // A helper class useful for DBImpl::Get() class LookupKey { public: // Initialize *this for looking up user_key at a snapshot with // the specified sequence number. LookupKey(const Slice& user_key, SequenceNumber sequence); LookupKey(const LookupKey&) = delete; LookupKey& operator=(const LookupKey&) = delete; ~LookupKey(); // Return a key suitable for lookup in a MemTable. Slice memtable_key() const { return Slice(start_, end_ - start_); } // Return an internal key (suitable for passing to an internal iterator) Slice internal_key() const { return Slice(kstart_, end_ - kstart_); } // Return the user key Slice user_key() const { return Slice(kstart_, end_ - kstart_ - 8); } private: // We construct a char array of the form: // klength varint32 <-- start_ // userkey char[klength] <-- kstart_ // tag uint64 // <-- end_ // The array is a suitable MemTable key. // The suffix starting with "userkey" can be used as an InternalKey. const char* start_; const char* kstart_; const char* end_; char space_[200]; // Avoid allocation for short keys }; inline LookupKey::~LookupKey() { if (start_ != space_) delete[] start_; } } // namespace leveldb #endif // STORAGE_LEVELDB_DB_DBFORMAT_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/table_cache.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/table_cache.h" #include "db/filename.h" #include "leveldb/env.h" #include "leveldb/table.h" #include "util/coding.h" namespace leveldb { struct TableAndFile { RandomAccessFile* file; Table* table; }; static void DeleteEntry(const Slice& key, void* value) { TableAndFile* tf = reinterpret_cast<TableAndFile*>(value); delete tf->table; delete tf->file; delete tf; } static void UnrefEntry(void* arg1, void* arg2) { Cache* cache = reinterpret_cast<Cache*>(arg1); Cache::Handle* h = reinterpret_cast<Cache::Handle*>(arg2); cache->Release(h); } TableCache::TableCache(const std::string& dbname, const Options& options, int entries) : env_(options.env), dbname_(dbname), options_(options), cache_(NewLRUCache(entries)) {} TableCache::~TableCache() { delete cache_; } Status TableCache::FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle** handle) { Status s; char buf[sizeof(file_number)]; EncodeFixed64(buf, file_number); Slice key(buf, sizeof(buf)); *handle = cache_->Lookup(key); if (*handle == nullptr) { std::string fname = TableFileName(dbname_, file_number); RandomAccessFile* file = nullptr; Table* table = nullptr; s = env_->NewRandomAccessFile(fname, &file); if (!s.ok()) { std::string old_fname = SSTTableFileName(dbname_, file_number); if (env_->NewRandomAccessFile(old_fname, &file).ok()) { s = Status::OK(); } } if (s.ok()) { s = Table::Open(options_, file, file_size, &table); } if (!s.ok()) { assert(table == nullptr); delete file; // We do not cache error results so that if the error is transient, // or somebody repairs the file, we recover automatically. } else { TableAndFile* tf = new TableAndFile; tf->file = file; tf->table = table; *handle = cache_->Insert(key, tf, 1, &DeleteEntry); } } return s; } Iterator* TableCache::NewIterator(const ReadOptions& options, uint64_t file_number, uint64_t file_size, Table** tableptr) { if (tableptr != nullptr) { *tableptr = nullptr; } Cache::Handle* handle = nullptr; Status s = FindTable(file_number, file_size, &handle); if (!s.ok()) { return NewErrorIterator(s); } Table* table = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table; Iterator* result = table->NewIterator(options); result->RegisterCleanup(&UnrefEntry, cache_, handle); if (tableptr != nullptr) { *tableptr = table; } return result; } Status TableCache::Get(const ReadOptions& options, uint64_t file_number, uint64_t file_size, const Slice& k, void* arg, void (*handle_result)(void*, const Slice&, const Slice&)) { Cache::Handle* handle = nullptr; Status s = FindTable(file_number, file_size, &handle); if (s.ok()) { Table* t = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table; s = t->InternalGet(options, k, arg, handle_result); cache_->Release(handle); } return s; } void TableCache::Evict(uint64_t file_number) { char buf[sizeof(file_number)]; EncodeFixed64(buf, file_number); cache_->Erase(Slice(buf, sizeof(buf))); } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/log_reader.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/log_reader.h" #include <stdio.h> #include "leveldb/env.h" #include "util/coding.h" #include "util/crc32c.h" namespace leveldb { namespace log { Reader::Reporter::~Reporter() = default; Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum, uint64_t initial_offset) : file_(file), reporter_(reporter), checksum_(checksum), backing_store_(new char[kBlockSize]), buffer_(), eof_(false), last_record_offset_(0), end_of_buffer_offset_(0), initial_offset_(initial_offset), resyncing_(initial_offset > 0) {} Reader::~Reader() { delete[] backing_store_; } bool Reader::SkipToInitialBlock() { const size_t offset_in_block = initial_offset_ % kBlockSize; uint64_t block_start_location = initial_offset_ - offset_in_block; // Don't search a block if we'd be in the trailer if (offset_in_block > kBlockSize - 6) { block_start_location += kBlockSize; } end_of_buffer_offset_ = block_start_location; // Skip to start of first block that can contain the initial record if (block_start_location > 0) { Status skip_status = file_->Skip(block_start_location); if (!skip_status.ok()) { ReportDrop(block_start_location, skip_status); return false; } } return true; } bool Reader::ReadRecord(Slice* record, std::string* scratch) { if (last_record_offset_ < initial_offset_) { if (!SkipToInitialBlock()) { return false; } } scratch->clear(); record->clear(); bool in_fragmented_record = false; // Record offset of the logical record that we're reading // 0 is a dummy value to make compilers happy uint64_t prospective_record_offset = 0; Slice fragment; while (true) { const unsigned int record_type = ReadPhysicalRecord(&fragment); // ReadPhysicalRecord may have only had an empty trailer remaining in its // internal buffer. Calculate the offset of the next physical record now // that it has returned, properly accounting for its header size. uint64_t physical_record_offset = end_of_buffer_offset_ - buffer_.size() - kHeaderSize - fragment.size(); if (resyncing_) { if (record_type == kMiddleType) { continue; } else if (record_type == kLastType) { resyncing_ = false; continue; } else { resyncing_ = false; } } switch (record_type) { case kFullType: if (in_fragmented_record) { // Handle bug in earlier versions of log::Writer where // it could emit an empty kFirstType record at the tail end // of a block followed by a kFullType or kFirstType record // at the beginning of the next block. if (!scratch->empty()) { ReportCorruption(scratch->size(), "partial record without end(1)"); } } prospective_record_offset = physical_record_offset; scratch->clear(); *record = fragment; last_record_offset_ = prospective_record_offset; return true; case kFirstType: if (in_fragmented_record) { // Handle bug in earlier versions of log::Writer where // it could emit an empty kFirstType record at the tail end // of a block followed by a kFullType or kFirstType record // at the beginning of the next block. if (!scratch->empty()) { ReportCorruption(scratch->size(), "partial record without end(2)"); } } prospective_record_offset = physical_record_offset; scratch->assign(fragment.data(), fragment.size()); in_fragmented_record = true; break; case kMiddleType: if (!in_fragmented_record) { ReportCorruption(fragment.size(), "missing start of fragmented record(1)"); } else { scratch->append(fragment.data(), fragment.size()); } break; case kLastType: if (!in_fragmented_record) { ReportCorruption(fragment.size(), "missing start of fragmented record(2)"); } else { scratch->append(fragment.data(), fragment.size()); *record = Slice(*scratch); last_record_offset_ = prospective_record_offset; return true; } break; case kEof: if (in_fragmented_record) { // This can be caused by the writer dying immediately after // writing a physical record but before completing the next; don't // treat it as a corruption, just ignore the entire logical record. scratch->clear(); } return false; case kBadRecord: if (in_fragmented_record) { ReportCorruption(scratch->size(), "error in middle of record"); in_fragmented_record = false; scratch->clear(); } break; default: { char buf[40]; snprintf(buf, sizeof(buf), "unknown record type %u", record_type); ReportCorruption( (fragment.size() + (in_fragmented_record ? scratch->size() : 0)), buf); in_fragmented_record = false; scratch->clear(); break; } } } return false; } uint64_t Reader::LastRecordOffset() { return last_record_offset_; } void Reader::ReportCorruption(uint64_t bytes, const char* reason) { ReportDrop(bytes, Status::Corruption(reason, file_->GetName())); } void Reader::ReportDrop(uint64_t bytes, const Status& reason) { if (reporter_ != nullptr && end_of_buffer_offset_ - buffer_.size() - bytes >= initial_offset_) { reporter_->Corruption(static_cast<size_t>(bytes), reason); } } unsigned int Reader::ReadPhysicalRecord(Slice* result) { while (true) { if (buffer_.size() < kHeaderSize) { if (!eof_) { // Last read was a full read, so this is a trailer to skip buffer_.clear(); Status status = file_->Read(kBlockSize, &buffer_, backing_store_); end_of_buffer_offset_ += buffer_.size(); if (!status.ok()) { buffer_.clear(); ReportDrop(kBlockSize, status); eof_ = true; return kEof; } else if (buffer_.size() < kBlockSize) { eof_ = true; } continue; } else { // Note that if buffer_ is non-empty, we have a truncated header at the // end of the file, which can be caused by the writer crashing in the // middle of writing the header. Instead of considering this an error, // just report EOF. buffer_.clear(); return kEof; } } // Parse the header const char* header = buffer_.data(); const uint32_t a = static_cast<uint32_t>(header[4]) & 0xff; const uint32_t b = static_cast<uint32_t>(header[5]) & 0xff; const unsigned int type = header[6]; const uint32_t length = a | (b << 8); if (kHeaderSize + length > buffer_.size()) { size_t drop_size = buffer_.size(); buffer_.clear(); if (!eof_) { ReportCorruption(drop_size, "bad record length"); return kBadRecord; } // If the end of the file has been reached without reading |length| bytes // of payload, assume the writer died in the middle of writing the record. // Don't report a corruption. return kEof; } if (type == kZeroType && length == 0) { // Skip zero length record without reporting any drops since // such records are produced by the mmap based writing code in // env_posix.cc that preallocates file regions. buffer_.clear(); return kBadRecord; } // Check crc if (checksum_) { uint32_t expected_crc = crc32c::Unmask(DecodeFixed32(header)); uint32_t actual_crc = crc32c::Value(header + 6, 1 + length); if (actual_crc != expected_crc) { // Drop the rest of the buffer since "length" itself may have // been corrupted and if we trust it, we could find some // fragment of a real log record that just happens to look // like a valid log record. size_t drop_size = buffer_.size(); buffer_.clear(); ReportCorruption(drop_size, "checksum mismatch"); return kBadRecord; } } buffer_.remove_prefix(kHeaderSize + length); // Skip physical record that started before initial_offset_ if (end_of_buffer_offset_ - buffer_.size() - kHeaderSize - length < initial_offset_) { result->clear(); return kBadRecord; } *result = Slice(header + kHeaderSize, length); return type; } } } // namespace log } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/write_batch.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // WriteBatch::rep_ := // sequence: fixed64 // count: fixed32 // data: record[count] // record := // kTypeValue varstring varstring | // kTypeDeletion varstring // varstring := // len: varint32 // data: uint8[len] #include "leveldb/write_batch.h" #include "db/dbformat.h" #include "db/memtable.h" #include "db/write_batch_internal.h" #include "leveldb/db.h" #include "util/coding.h" namespace leveldb { // WriteBatch header has an 8-byte sequence number followed by a 4-byte count. static const size_t kHeader = 12; WriteBatch::WriteBatch() { Clear(); } WriteBatch::~WriteBatch() = default; WriteBatch::Handler::~Handler() = default; void WriteBatch::Clear() { rep_.clear(); rep_.resize(kHeader); } size_t WriteBatch::ApproximateSize() const { return rep_.size(); } Status WriteBatch::Iterate(Handler* handler) const { Slice input(rep_); if (input.size() < kHeader) { return Status::Corruption("malformed WriteBatch (too small)"); } input.remove_prefix(kHeader); Slice key, value; int found = 0; while (!input.empty()) { found++; char tag = input[0]; input.remove_prefix(1); switch (tag) { case kTypeValue: if (GetLengthPrefixedSlice(&input, &key) && GetLengthPrefixedSlice(&input, &value)) { handler->Put(key, value); } else { return Status::Corruption("bad WriteBatch Put"); } break; case kTypeDeletion: if (GetLengthPrefixedSlice(&input, &key)) { handler->Delete(key); } else { return Status::Corruption("bad WriteBatch Delete"); } break; default: return Status::Corruption("unknown WriteBatch tag"); } } if (found != WriteBatchInternal::Count(this)) { return Status::Corruption("WriteBatch has wrong count"); } else { return Status::OK(); } } int WriteBatchInternal::Count(const WriteBatch* b) { return DecodeFixed32(b->rep_.data() + 8); } void WriteBatchInternal::SetCount(WriteBatch* b, int n) { EncodeFixed32(&b->rep_[8], n); } SequenceNumber WriteBatchInternal::Sequence(const WriteBatch* b) { return SequenceNumber(DecodeFixed64(b->rep_.data())); } void WriteBatchInternal::SetSequence(WriteBatch* b, SequenceNumber seq) { EncodeFixed64(&b->rep_[0], seq); } void WriteBatch::Put(const Slice& key, const Slice& value) { WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1); rep_.push_back(static_cast<char>(kTypeValue)); PutLengthPrefixedSlice(&rep_, key); PutLengthPrefixedSlice(&rep_, value); } void WriteBatch::Delete(const Slice& key) { WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1); rep_.push_back(static_cast<char>(kTypeDeletion)); PutLengthPrefixedSlice(&rep_, key); } void WriteBatch::Append(const WriteBatch& source) { WriteBatchInternal::Append(this, &source); } namespace { class MemTableInserter : public WriteBatch::Handler { public: SequenceNumber sequence_; MemTable* mem_; void Put(const Slice& key, const Slice& value) override { mem_->Add(sequence_, kTypeValue, key, value); sequence_++; } void Delete(const Slice& key) override { mem_->Add(sequence_, kTypeDeletion, key, Slice()); sequence_++; } }; } // namespace Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) { MemTableInserter inserter; inserter.sequence_ = WriteBatchInternal::Sequence(b); inserter.mem_ = memtable; return b->Iterate(&inserter); } void WriteBatchInternal::SetContents(WriteBatch* b, const Slice& contents) { assert(contents.size() >= kHeader); b->rep_.assign(contents.data(), contents.size()); } void WriteBatchInternal::Append(WriteBatch* dst, const WriteBatch* src) { SetCount(dst, Count(dst) + Count(src)); assert(src->rep_.size() >= kHeader); dst->rep_.append(src->rep_.data() + kHeader, src->rep_.size() - kHeader); } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/c.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "leveldb/c.h" #include <cstdint> #include <cstdlib> #include "leveldb/cache.h" #include "leveldb/comparator.h" #include "leveldb/db.h" #include "leveldb/env.h" #include "leveldb/filter_policy.h" #include "leveldb/iterator.h" #include "leveldb/options.h" #include "leveldb/status.h" #include "leveldb/write_batch.h" using leveldb::Cache; using leveldb::Comparator; using leveldb::CompressionType; using leveldb::DB; using leveldb::Env; using leveldb::FileLock; using leveldb::FilterPolicy; using leveldb::Iterator; using leveldb::kMajorVersion; using leveldb::kMinorVersion; using leveldb::Logger; using leveldb::NewBloomFilterPolicy; using leveldb::NewLRUCache; using leveldb::Options; using leveldb::RandomAccessFile; using leveldb::Range; using leveldb::ReadOptions; using leveldb::SequentialFile; using leveldb::Slice; using leveldb::Snapshot; using leveldb::Status; using leveldb::WritableFile; using leveldb::WriteBatch; using leveldb::WriteOptions; extern "C" { struct leveldb_t { DB* rep; }; struct leveldb_iterator_t { Iterator* rep; }; struct leveldb_writebatch_t { WriteBatch rep; }; struct leveldb_snapshot_t { const Snapshot* rep; }; struct leveldb_readoptions_t { ReadOptions rep; }; struct leveldb_writeoptions_t { WriteOptions rep; }; struct leveldb_options_t { Options rep; }; struct leveldb_cache_t { Cache* rep; }; struct leveldb_seqfile_t { SequentialFile* rep; }; struct leveldb_randomfile_t { RandomAccessFile* rep; }; struct leveldb_writablefile_t { WritableFile* rep; }; struct leveldb_logger_t { Logger* rep; }; struct leveldb_filelock_t { FileLock* rep; }; struct leveldb_comparator_t : public Comparator { ~leveldb_comparator_t() override { (*destructor_)(state_); } int Compare(const Slice& a, const Slice& b) const override { return (*compare_)(state_, a.data(), a.size(), b.data(), b.size()); } const char* Name() const override { return (*name_)(state_); } // No-ops since the C binding does not support key shortening methods. void FindShortestSeparator(std::string*, const Slice&) const override {} void FindShortSuccessor(std::string* key) const override {} void* state_; void (*destructor_)(void*); int (*compare_)(void*, const char* a, size_t alen, const char* b, size_t blen); const char* (*name_)(void*); }; struct leveldb_filterpolicy_t : public FilterPolicy { ~leveldb_filterpolicy_t() override { (*destructor_)(state_); } const char* Name() const override { return (*name_)(state_); } void CreateFilter(const Slice* keys, int n, std::string* dst) const override { std::vector<const char*> key_pointers(n); std::vector<size_t> key_sizes(n); for (int i = 0; i < n; i++) { key_pointers[i] = keys[i].data(); key_sizes[i] = keys[i].size(); } size_t len; char* filter = (*create_)(state_, &key_pointers[0], &key_sizes[0], n, &len); dst->append(filter, len); free(filter); } bool KeyMayMatch(const Slice& key, const Slice& filter) const override { return (*key_match_)(state_, key.data(), key.size(), filter.data(), filter.size()); } void* state_; void (*destructor_)(void*); const char* (*name_)(void*); char* (*create_)(void*, const char* const* key_array, const size_t* key_length_array, int num_keys, size_t* filter_length); uint8_t (*key_match_)(void*, const char* key, size_t length, const char* filter, size_t filter_length); }; struct leveldb_env_t { Env* rep; bool is_default; }; static bool SaveError(char** errptr, const Status& s) { assert(errptr != nullptr); if (s.ok()) { return false; } else if (*errptr == nullptr) { *errptr = strdup(s.ToString().c_str()); } else { // TODO(sanjay): Merge with existing error? free(*errptr); *errptr = strdup(s.ToString().c_str()); } return true; } static char* CopyString(const std::string& str) { char* result = reinterpret_cast<char*>(malloc(sizeof(char) * str.size())); memcpy(result, str.data(), sizeof(char) * str.size()); return result; } leveldb_t* leveldb_open(const leveldb_options_t* options, const char* name, char** errptr) { DB* db; if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) { return nullptr; } leveldb_t* result = new leveldb_t; result->rep = db; return result; } void leveldb_close(leveldb_t* db) { delete db->rep; delete db; } void leveldb_put(leveldb_t* db, const leveldb_writeoptions_t* options, const char* key, size_t keylen, const char* val, size_t vallen, char** errptr) { SaveError(errptr, db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen))); } void leveldb_delete(leveldb_t* db, const leveldb_writeoptions_t* options, const char* key, size_t keylen, char** errptr) { SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen))); } void leveldb_write(leveldb_t* db, const leveldb_writeoptions_t* options, leveldb_writebatch_t* batch, char** errptr) { SaveError(errptr, db->rep->Write(options->rep, &batch->rep)); } char* leveldb_get(leveldb_t* db, const leveldb_readoptions_t* options, const char* key, size_t keylen, size_t* vallen, char** errptr) { char* result = nullptr; std::string tmp; Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp); if (s.ok()) { *vallen = tmp.size(); result = CopyString(tmp); } else { *vallen = 0; if (!s.IsNotFound()) { SaveError(errptr, s); } } return result; } leveldb_iterator_t* leveldb_create_iterator( leveldb_t* db, const leveldb_readoptions_t* options) { leveldb_iterator_t* result = new leveldb_iterator_t; result->rep = db->rep->NewIterator(options->rep); return result; } const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db) { leveldb_snapshot_t* result = new leveldb_snapshot_t; result->rep = db->rep->GetSnapshot(); return result; } void leveldb_release_snapshot(leveldb_t* db, const leveldb_snapshot_t* snapshot) { db->rep->ReleaseSnapshot(snapshot->rep); delete snapshot; } char* leveldb_property_value(leveldb_t* db, const char* propname) { std::string tmp; if (db->rep->GetProperty(Slice(propname), &tmp)) { // We use strdup() since we expect human readable output. return strdup(tmp.c_str()); } else { return nullptr; } } void leveldb_approximate_sizes(leveldb_t* db, int num_ranges, const char* const* range_start_key, const size_t* range_start_key_len, const char* const* range_limit_key, const size_t* range_limit_key_len, uint64_t* sizes) { Range* ranges = new Range[num_ranges]; for (int i = 0; i < num_ranges; i++) { ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]); ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]); } db->rep->GetApproximateSizes(ranges, num_ranges, sizes); delete[] ranges; } void leveldb_compact_range(leveldb_t* db, const char* start_key, size_t start_key_len, const char* limit_key, size_t limit_key_len) { Slice a, b; db->rep->CompactRange( // Pass null Slice if corresponding "const char*" is null (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr), (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr)); } void leveldb_destroy_db(const leveldb_options_t* options, const char* name, char** errptr) { SaveError(errptr, DestroyDB(name, options->rep)); } void leveldb_repair_db(const leveldb_options_t* options, const char* name, char** errptr) { SaveError(errptr, RepairDB(name, options->rep)); } void leveldb_iter_destroy(leveldb_iterator_t* iter) { delete iter->rep; delete iter; } uint8_t leveldb_iter_valid(const leveldb_iterator_t* iter) { return iter->rep->Valid(); } void leveldb_iter_seek_to_first(leveldb_iterator_t* iter) { iter->rep->SeekToFirst(); } void leveldb_iter_seek_to_last(leveldb_iterator_t* iter) { iter->rep->SeekToLast(); } void leveldb_iter_seek(leveldb_iterator_t* iter, const char* k, size_t klen) { iter->rep->Seek(Slice(k, klen)); } void leveldb_iter_next(leveldb_iterator_t* iter) { iter->rep->Next(); } void leveldb_iter_prev(leveldb_iterator_t* iter) { iter->rep->Prev(); } const char* leveldb_iter_key(const leveldb_iterator_t* iter, size_t* klen) { Slice s = iter->rep->key(); *klen = s.size(); return s.data(); } const char* leveldb_iter_value(const leveldb_iterator_t* iter, size_t* vlen) { Slice s = iter->rep->value(); *vlen = s.size(); return s.data(); } void leveldb_iter_get_error(const leveldb_iterator_t* iter, char** errptr) { SaveError(errptr, iter->rep->status()); } leveldb_writebatch_t* leveldb_writebatch_create() { return new leveldb_writebatch_t; } void leveldb_writebatch_destroy(leveldb_writebatch_t* b) { delete b; } void leveldb_writebatch_clear(leveldb_writebatch_t* b) { b->rep.Clear(); } void leveldb_writebatch_put(leveldb_writebatch_t* b, const char* key, size_t klen, const char* val, size_t vlen) { b->rep.Put(Slice(key, klen), Slice(val, vlen)); } void leveldb_writebatch_delete(leveldb_writebatch_t* b, const char* key, size_t klen) { b->rep.Delete(Slice(key, klen)); } void leveldb_writebatch_iterate(const leveldb_writebatch_t* b, void* state, void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), void (*deleted)(void*, const char* k, size_t klen)) { class H : public WriteBatch::Handler { public: void* state_; void (*put_)(void*, const char* k, size_t klen, const char* v, size_t vlen); void (*deleted_)(void*, const char* k, size_t klen); void Put(const Slice& key, const Slice& value) override { (*put_)(state_, key.data(), key.size(), value.data(), value.size()); } void Delete(const Slice& key) override { (*deleted_)(state_, key.data(), key.size()); } }; H handler; handler.state_ = state; handler.put_ = put; handler.deleted_ = deleted; b->rep.Iterate(&handler); } void leveldb_writebatch_append(leveldb_writebatch_t* destination, const leveldb_writebatch_t* source) { destination->rep.Append(source->rep); } leveldb_options_t* leveldb_options_create() { return new leveldb_options_t; } void leveldb_options_destroy(leveldb_options_t* options) { delete options; } void leveldb_options_set_comparator(leveldb_options_t* opt, leveldb_comparator_t* cmp) { opt->rep.comparator = cmp; } void leveldb_options_set_filter_policy(leveldb_options_t* opt, leveldb_filterpolicy_t* policy) { opt->rep.filter_policy = policy; } void leveldb_options_set_create_if_missing(leveldb_options_t* opt, uint8_t v) { opt->rep.create_if_missing = v; } void leveldb_options_set_error_if_exists(leveldb_options_t* opt, uint8_t v) { opt->rep.error_if_exists = v; } void leveldb_options_set_paranoid_checks(leveldb_options_t* opt, uint8_t v) { opt->rep.paranoid_checks = v; } void leveldb_options_set_env(leveldb_options_t* opt, leveldb_env_t* env) { opt->rep.env = (env ? env->rep : nullptr); } void leveldb_options_set_info_log(leveldb_options_t* opt, leveldb_logger_t* l) { opt->rep.info_log = (l ? l->rep : nullptr); } void leveldb_options_set_write_buffer_size(leveldb_options_t* opt, size_t s) { opt->rep.write_buffer_size = s; } void leveldb_options_set_max_open_files(leveldb_options_t* opt, int n) { opt->rep.max_open_files = n; } void leveldb_options_set_cache(leveldb_options_t* opt, leveldb_cache_t* c) { opt->rep.block_cache = c->rep; } void leveldb_options_set_block_size(leveldb_options_t* opt, size_t s) { opt->rep.block_size = s; } void leveldb_options_set_block_restart_interval(leveldb_options_t* opt, int n) { opt->rep.block_restart_interval = n; } void leveldb_options_set_max_file_size(leveldb_options_t* opt, size_t s) { opt->rep.max_file_size = s; } void leveldb_options_set_compression(leveldb_options_t* opt, int t) { opt->rep.compression = static_cast<CompressionType>(t); } leveldb_comparator_t* leveldb_comparator_create( void* state, void (*destructor)(void*), int (*compare)(void*, const char* a, size_t alen, const char* b, size_t blen), const char* (*name)(void*)) { leveldb_comparator_t* result = new leveldb_comparator_t; result->state_ = state; result->destructor_ = destructor; result->compare_ = compare; result->name_ = name; return result; } void leveldb_comparator_destroy(leveldb_comparator_t* cmp) { delete cmp; } leveldb_filterpolicy_t* leveldb_filterpolicy_create( void* state, void (*destructor)(void*), char* (*create_filter)(void*, const char* const* key_array, const size_t* key_length_array, int num_keys, size_t* filter_length), uint8_t (*key_may_match)(void*, const char* key, size_t length, const char* filter, size_t filter_length), const char* (*name)(void*)) { leveldb_filterpolicy_t* result = new leveldb_filterpolicy_t; result->state_ = state; result->destructor_ = destructor; result->create_ = create_filter; result->key_match_ = key_may_match; result->name_ = name; return result; } void leveldb_filterpolicy_destroy(leveldb_filterpolicy_t* filter) { delete filter; } leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) { // Make a leveldb_filterpolicy_t, but override all of its methods so // they delegate to a NewBloomFilterPolicy() instead of user // supplied C functions. struct Wrapper : public leveldb_filterpolicy_t { static void DoNothing(void*) {} ~Wrapper() { delete rep_; } const char* Name() const { return rep_->Name(); } void CreateFilter(const Slice* keys, int n, std::string* dst) const { return rep_->CreateFilter(keys, n, dst); } bool KeyMayMatch(const Slice& key, const Slice& filter) const { return rep_->KeyMayMatch(key, filter); } const FilterPolicy* rep_; }; Wrapper* wrapper = new Wrapper; wrapper->rep_ = NewBloomFilterPolicy(bits_per_key); wrapper->state_ = nullptr; wrapper->destructor_ = &Wrapper::DoNothing; return wrapper; } leveldb_readoptions_t* leveldb_readoptions_create() { return new leveldb_readoptions_t; } void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) { delete opt; } void leveldb_readoptions_set_verify_checksums(leveldb_readoptions_t* opt, uint8_t v) { opt->rep.verify_checksums = v; } void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t* opt, uint8_t v) { opt->rep.fill_cache = v; } void leveldb_readoptions_set_snapshot(leveldb_readoptions_t* opt, const leveldb_snapshot_t* snap) { opt->rep.snapshot = (snap ? snap->rep : nullptr); } leveldb_writeoptions_t* leveldb_writeoptions_create() { return new leveldb_writeoptions_t; } void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) { delete opt; } void leveldb_writeoptions_set_sync(leveldb_writeoptions_t* opt, uint8_t v) { opt->rep.sync = v; } leveldb_cache_t* leveldb_cache_create_lru(size_t capacity) { leveldb_cache_t* c = new leveldb_cache_t; c->rep = NewLRUCache(capacity); return c; } void leveldb_cache_destroy(leveldb_cache_t* cache) { delete cache->rep; delete cache; } leveldb_env_t* leveldb_create_default_env() { leveldb_env_t* result = new leveldb_env_t; result->rep = Env::Default(); result->is_default = true; return result; } void leveldb_env_destroy(leveldb_env_t* env) { if (!env->is_default) delete env->rep; delete env; } char* leveldb_env_get_test_directory(leveldb_env_t* env) { std::string result; if (!env->rep->GetTestDirectory(&result).ok()) { return nullptr; } char* buffer = static_cast<char*>(malloc(result.size() + 1)); memcpy(buffer, result.data(), result.size()); buffer[result.size()] = '\0'; return buffer; } void leveldb_free(void* ptr) { free(ptr); } int leveldb_major_version() { return kMajorVersion; } int leveldb_minor_version() { return kMinorVersion; } } // end extern "C"
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/recovery_test.cc
// Copyright (c) 2014 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/db_impl.h" #include "db/filename.h" #include "db/version_set.h" #include "db/write_batch_internal.h" #include "leveldb/db.h" #include "leveldb/env.h" #include "leveldb/write_batch.h" #include "util/logging.h" #include "util/testharness.h" #include "util/testutil.h" namespace leveldb { class RecoveryTest { public: RecoveryTest() : env_(Env::Default()), db_(nullptr) { dbname_ = test::TmpDir() + "/recovery_test"; DestroyDB(dbname_, Options()); Open(); } ~RecoveryTest() { Close(); DestroyDB(dbname_, Options()); } DBImpl* dbfull() const { return reinterpret_cast<DBImpl*>(db_); } Env* env() const { return env_; } bool CanAppend() { WritableFile* tmp; Status s = env_->NewAppendableFile(CurrentFileName(dbname_), &tmp); delete tmp; if (s.IsNotSupportedError()) { return false; } else { return true; } } void Close() { delete db_; db_ = nullptr; } Status OpenWithStatus(Options* options = nullptr) { Close(); Options opts; if (options != nullptr) { opts = *options; } else { opts.reuse_logs = true; // TODO(sanjay): test both ways opts.create_if_missing = true; } if (opts.env == nullptr) { opts.env = env_; } return DB::Open(opts, dbname_, &db_); } void Open(Options* options = nullptr) { ASSERT_OK(OpenWithStatus(options)); ASSERT_EQ(1, NumLogs()); } Status Put(const std::string& k, const std::string& v) { return db_->Put(WriteOptions(), k, v); } std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) { std::string result; Status s = db_->Get(ReadOptions(), k, &result); if (s.IsNotFound()) { result = "NOT_FOUND"; } else if (!s.ok()) { result = s.ToString(); } return result; } std::string ManifestFileName() { std::string current; ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), &current)); size_t len = current.size(); if (len > 0 && current[len - 1] == '\n') { current.resize(len - 1); } return dbname_ + "/" + current; } std::string LogName(uint64_t number) { return LogFileName(dbname_, number); } size_t DeleteLogFiles() { // Linux allows unlinking open files, but Windows does not. // Closing the db allows for file deletion. Close(); std::vector<uint64_t> logs = GetFiles(kLogFile); for (size_t i = 0; i < logs.size(); i++) { ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]); } return logs.size(); } void DeleteManifestFile() { ASSERT_OK(env_->DeleteFile(ManifestFileName())); } uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; } std::vector<uint64_t> GetFiles(FileType t) { std::vector<std::string> filenames; ASSERT_OK(env_->GetChildren(dbname_, &filenames)); std::vector<uint64_t> result; for (size_t i = 0; i < filenames.size(); i++) { uint64_t number; FileType type; if (ParseFileName(filenames[i], &number, &type) && type == t) { result.push_back(number); } } return result; } int NumLogs() { return GetFiles(kLogFile).size(); } int NumTables() { return GetFiles(kTableFile).size(); } uint64_t FileSize(const std::string& fname) { uint64_t result; ASSERT_OK(env_->GetFileSize(fname, &result)) << fname; return result; } void CompactMemTable() { dbfull()->TEST_CompactMemTable(); } // Directly construct a log file that sets key to val. void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) { std::string fname = LogFileName(dbname_, lognum); WritableFile* file; ASSERT_OK(env_->NewWritableFile(fname, &file)); log::Writer writer(file); WriteBatch batch; batch.Put(key, val); WriteBatchInternal::SetSequence(&batch, seq); ASSERT_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch))); ASSERT_OK(file->Flush()); delete file; } private: std::string dbname_; Env* env_; DB* db_; }; TEST(RecoveryTest, ManifestReused) { if (!CanAppend()) { fprintf(stderr, "skipping test because env does not support appending\n"); return; } ASSERT_OK(Put("foo", "bar")); Close(); std::string old_manifest = ManifestFileName(); Open(); ASSERT_EQ(old_manifest, ManifestFileName()); ASSERT_EQ("bar", Get("foo")); Open(); ASSERT_EQ(old_manifest, ManifestFileName()); ASSERT_EQ("bar", Get("foo")); } TEST(RecoveryTest, LargeManifestCompacted) { if (!CanAppend()) { fprintf(stderr, "skipping test because env does not support appending\n"); return; } ASSERT_OK(Put("foo", "bar")); Close(); std::string old_manifest = ManifestFileName(); // Pad with zeroes to make manifest file very big. { uint64_t len = FileSize(old_manifest); WritableFile* file; ASSERT_OK(env()->NewAppendableFile(old_manifest, &file)); std::string zeroes(3 * 1048576 - static_cast<size_t>(len), 0); ASSERT_OK(file->Append(zeroes)); ASSERT_OK(file->Flush()); delete file; } Open(); std::string new_manifest = ManifestFileName(); ASSERT_NE(old_manifest, new_manifest); ASSERT_GT(10000, FileSize(new_manifest)); ASSERT_EQ("bar", Get("foo")); Open(); ASSERT_EQ(new_manifest, ManifestFileName()); ASSERT_EQ("bar", Get("foo")); } TEST(RecoveryTest, NoLogFiles) { ASSERT_OK(Put("foo", "bar")); ASSERT_EQ(1, DeleteLogFiles()); Open(); ASSERT_EQ("NOT_FOUND", Get("foo")); Open(); ASSERT_EQ("NOT_FOUND", Get("foo")); } TEST(RecoveryTest, LogFileReuse) { if (!CanAppend()) { fprintf(stderr, "skipping test because env does not support appending\n"); return; } for (int i = 0; i < 2; i++) { ASSERT_OK(Put("foo", "bar")); if (i == 0) { // Compact to ensure current log is empty CompactMemTable(); } Close(); ASSERT_EQ(1, NumLogs()); uint64_t number = FirstLogFile(); if (i == 0) { ASSERT_EQ(0, FileSize(LogName(number))); } else { ASSERT_LT(0, FileSize(LogName(number))); } Open(); ASSERT_EQ(1, NumLogs()); ASSERT_EQ(number, FirstLogFile()) << "did not reuse log file"; ASSERT_EQ("bar", Get("foo")); Open(); ASSERT_EQ(1, NumLogs()); ASSERT_EQ(number, FirstLogFile()) << "did not reuse log file"; ASSERT_EQ("bar", Get("foo")); } } TEST(RecoveryTest, MultipleMemTables) { // Make a large log. const int kNum = 1000; for (int i = 0; i < kNum; i++) { char buf[100]; snprintf(buf, sizeof(buf), "%050d", i); ASSERT_OK(Put(buf, buf)); } ASSERT_EQ(0, NumTables()); Close(); ASSERT_EQ(0, NumTables()); ASSERT_EQ(1, NumLogs()); uint64_t old_log_file = FirstLogFile(); // Force creation of multiple memtables by reducing the write buffer size. Options opt; opt.reuse_logs = true; opt.write_buffer_size = (kNum * 100) / 2; Open(&opt); ASSERT_LE(2, NumTables()); ASSERT_EQ(1, NumLogs()); ASSERT_NE(old_log_file, FirstLogFile()) << "must not reuse log"; for (int i = 0; i < kNum; i++) { char buf[100]; snprintf(buf, sizeof(buf), "%050d", i); ASSERT_EQ(buf, Get(buf)); } } TEST(RecoveryTest, MultipleLogFiles) { ASSERT_OK(Put("foo", "bar")); Close(); ASSERT_EQ(1, NumLogs()); // Make a bunch of uncompacted log files. uint64_t old_log = FirstLogFile(); MakeLogFile(old_log + 1, 1000, "hello", "world"); MakeLogFile(old_log + 2, 1001, "hi", "there"); MakeLogFile(old_log + 3, 1002, "foo", "bar2"); // Recover and check that all log files were processed. Open(); ASSERT_LE(1, NumTables()); ASSERT_EQ(1, NumLogs()); uint64_t new_log = FirstLogFile(); ASSERT_LE(old_log + 3, new_log); ASSERT_EQ("bar2", Get("foo")); ASSERT_EQ("world", Get("hello")); ASSERT_EQ("there", Get("hi")); // Test that previous recovery produced recoverable state. Open(); ASSERT_LE(1, NumTables()); ASSERT_EQ(1, NumLogs()); if (CanAppend()) { ASSERT_EQ(new_log, FirstLogFile()); } ASSERT_EQ("bar2", Get("foo")); ASSERT_EQ("world", Get("hello")); ASSERT_EQ("there", Get("hi")); // Check that introducing an older log file does not cause it to be re-read. Close(); MakeLogFile(old_log + 1, 2000, "hello", "stale write"); Open(); ASSERT_LE(1, NumTables()); ASSERT_EQ(1, NumLogs()); if (CanAppend()) { ASSERT_EQ(new_log, FirstLogFile()); } ASSERT_EQ("bar2", Get("foo")); ASSERT_EQ("world", Get("hello")); ASSERT_EQ("there", Get("hi")); } TEST(RecoveryTest, ManifestMissing) { ASSERT_OK(Put("foo", "bar")); Close(); DeleteManifestFile(); Status status = OpenWithStatus(); ASSERT_TRUE(status.IsCorruption()); } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/builder.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/builder.h" #include "db/dbformat.h" #include "db/filename.h" #include "db/table_cache.h" #include "db/version_edit.h" #include "leveldb/db.h" #include "leveldb/env.h" #include "leveldb/iterator.h" namespace leveldb { Status BuildTable(const std::string& dbname, Env* env, const Options& options, TableCache* table_cache, Iterator* iter, FileMetaData* meta) { Status s; meta->file_size = 0; iter->SeekToFirst(); std::string fname = TableFileName(dbname, meta->number); if (iter->Valid()) { WritableFile* file; s = env->NewWritableFile(fname, &file); if (!s.ok()) { return s; } TableBuilder* builder = new TableBuilder(options, file); meta->smallest.DecodeFrom(iter->key()); for (; iter->Valid(); iter->Next()) { Slice key = iter->key(); meta->largest.DecodeFrom(key); builder->Add(key, iter->value()); } // Finish and check for builder errors s = builder->Finish(); if (s.ok()) { meta->file_size = builder->FileSize(); assert(meta->file_size > 0); } delete builder; // Finish and check for file errors if (s.ok()) { s = file->Sync(); } if (s.ok()) { s = file->Close(); } delete file; file = nullptr; if (s.ok()) { // Verify that the table is usable Iterator* it = table_cache->NewIterator(ReadOptions(), meta->number, meta->file_size); s = it->status(); delete it; } } // Check for input iterator errors if (!iter->status().ok()) { s = iter->status(); } if (s.ok() && meta->file_size > 0) { // Keep it } else { env->DeleteFile(fname); } return s; } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/log_writer.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_DB_LOG_WRITER_H_ #define STORAGE_LEVELDB_DB_LOG_WRITER_H_ #include <stdint.h> #include "db/log_format.h" #include "leveldb/slice.h" #include "leveldb/status.h" namespace leveldb { class WritableFile; namespace log { class Writer { public: // Create a writer that will append data to "*dest". // "*dest" must be initially empty. // "*dest" must remain live while this Writer is in use. explicit Writer(WritableFile* dest); // Create a writer that will append data to "*dest". // "*dest" must have initial length "dest_length". // "*dest" must remain live while this Writer is in use. Writer(WritableFile* dest, uint64_t dest_length); Writer(const Writer&) = delete; Writer& operator=(const Writer&) = delete; ~Writer(); Status AddRecord(const Slice& slice); private: Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length); WritableFile* dest_; int block_offset_; // Current offset in block // crc32c values for all supported record types. These are // pre-computed to reduce the overhead of computing the crc of the // record type stored in the header. uint32_t type_crc_[kMaxRecordType + 1]; }; } // namespace log } // namespace leveldb #endif // STORAGE_LEVELDB_DB_LOG_WRITER_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/version_edit.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/version_edit.h" #include "db/version_set.h" #include "util/coding.h" namespace leveldb { // Tag numbers for serialized VersionEdit. These numbers are written to // disk and should not be changed. enum Tag { kComparator = 1, kLogNumber = 2, kNextFileNumber = 3, kLastSequence = 4, kCompactPointer = 5, kDeletedFile = 6, kNewFile = 7, // 8 was used for large value refs kPrevLogNumber = 9 }; void VersionEdit::Clear() { comparator_.clear(); log_number_ = 0; prev_log_number_ = 0; last_sequence_ = 0; next_file_number_ = 0; has_comparator_ = false; has_log_number_ = false; has_prev_log_number_ = false; has_next_file_number_ = false; has_last_sequence_ = false; deleted_files_.clear(); new_files_.clear(); } void VersionEdit::EncodeTo(std::string* dst) const { if (has_comparator_) { PutVarint32(dst, kComparator); PutLengthPrefixedSlice(dst, comparator_); } if (has_log_number_) { PutVarint32(dst, kLogNumber); PutVarint64(dst, log_number_); } if (has_prev_log_number_) { PutVarint32(dst, kPrevLogNumber); PutVarint64(dst, prev_log_number_); } if (has_next_file_number_) { PutVarint32(dst, kNextFileNumber); PutVarint64(dst, next_file_number_); } if (has_last_sequence_) { PutVarint32(dst, kLastSequence); PutVarint64(dst, last_sequence_); } for (size_t i = 0; i < compact_pointers_.size(); i++) { PutVarint32(dst, kCompactPointer); PutVarint32(dst, compact_pointers_[i].first); // level PutLengthPrefixedSlice(dst, compact_pointers_[i].second.Encode()); } for (const auto& deleted_file_kvp : deleted_files_) { PutVarint32(dst, kDeletedFile); PutVarint32(dst, deleted_file_kvp.first); // level PutVarint64(dst, deleted_file_kvp.second); // file number } for (size_t i = 0; i < new_files_.size(); i++) { const FileMetaData& f = new_files_[i].second; PutVarint32(dst, kNewFile); PutVarint32(dst, new_files_[i].first); // level PutVarint64(dst, f.number); PutVarint64(dst, f.file_size); PutLengthPrefixedSlice(dst, f.smallest.Encode()); PutLengthPrefixedSlice(dst, f.largest.Encode()); } } static bool GetInternalKey(Slice* input, InternalKey* dst) { Slice str; if (GetLengthPrefixedSlice(input, &str)) { return dst->DecodeFrom(str); } else { return false; } } static bool GetLevel(Slice* input, int* level) { uint32_t v; if (GetVarint32(input, &v) && v < config::kNumLevels) { *level = v; return true; } else { return false; } } Status VersionEdit::DecodeFrom(const Slice& src) { Clear(); Slice input = src; const char* msg = nullptr; uint32_t tag; // Temporary storage for parsing int level; uint64_t number; FileMetaData f; Slice str; InternalKey key; while (msg == nullptr && GetVarint32(&input, &tag)) { switch (tag) { case kComparator: if (GetLengthPrefixedSlice(&input, &str)) { comparator_ = str.ToString(); has_comparator_ = true; } else { msg = "comparator name"; } break; case kLogNumber: if (GetVarint64(&input, &log_number_)) { has_log_number_ = true; } else { msg = "log number"; } break; case kPrevLogNumber: if (GetVarint64(&input, &prev_log_number_)) { has_prev_log_number_ = true; } else { msg = "previous log number"; } break; case kNextFileNumber: if (GetVarint64(&input, &next_file_number_)) { has_next_file_number_ = true; } else { msg = "next file number"; } break; case kLastSequence: if (GetVarint64(&input, &last_sequence_)) { has_last_sequence_ = true; } else { msg = "last sequence number"; } break; case kCompactPointer: if (GetLevel(&input, &level) && GetInternalKey(&input, &key)) { compact_pointers_.push_back(std::make_pair(level, key)); } else { msg = "compaction pointer"; } break; case kDeletedFile: if (GetLevel(&input, &level) && GetVarint64(&input, &number)) { deleted_files_.insert(std::make_pair(level, number)); } else { msg = "deleted file"; } break; case kNewFile: if (GetLevel(&input, &level) && GetVarint64(&input, &f.number) && GetVarint64(&input, &f.file_size) && GetInternalKey(&input, &f.smallest) && GetInternalKey(&input, &f.largest)) { new_files_.push_back(std::make_pair(level, f)); } else { msg = "new-file entry"; } break; default: msg = "unknown tag"; break; } } if (msg == nullptr && !input.empty()) { msg = "invalid tag"; } Status result; if (msg != nullptr) { result = Status::Corruption("VersionEdit", msg); } return result; } std::string VersionEdit::DebugString() const { std::string r; r.append("VersionEdit {"); if (has_comparator_) { r.append("\n Comparator: "); r.append(comparator_); } if (has_log_number_) { r.append("\n LogNumber: "); AppendNumberTo(&r, log_number_); } if (has_prev_log_number_) { r.append("\n PrevLogNumber: "); AppendNumberTo(&r, prev_log_number_); } if (has_next_file_number_) { r.append("\n NextFile: "); AppendNumberTo(&r, next_file_number_); } if (has_last_sequence_) { r.append("\n LastSeq: "); AppendNumberTo(&r, last_sequence_); } for (size_t i = 0; i < compact_pointers_.size(); i++) { r.append("\n CompactPointer: "); AppendNumberTo(&r, compact_pointers_[i].first); r.append(" "); r.append(compact_pointers_[i].second.DebugString()); } for (const auto& deleted_files_kvp : deleted_files_) { r.append("\n DeleteFile: "); AppendNumberTo(&r, deleted_files_kvp.first); r.append(" "); AppendNumberTo(&r, deleted_files_kvp.second); } for (size_t i = 0; i < new_files_.size(); i++) { const FileMetaData& f = new_files_[i].second; r.append("\n AddFile: "); AppendNumberTo(&r, new_files_[i].first); r.append(" "); AppendNumberTo(&r, f.number); r.append(" "); AppendNumberTo(&r, f.file_size); r.append(" "); r.append(f.smallest.DebugString()); r.append(" .. "); r.append(f.largest.DebugString()); } r.append("\n}\n"); return r; } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/db_impl.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_DB_DB_IMPL_H_ #define STORAGE_LEVELDB_DB_DB_IMPL_H_ #include <atomic> #include <deque> #include <set> #include <string> #include "db/dbformat.h" #include "db/log_writer.h" #include "db/snapshot.h" #include "leveldb/db.h" #include "leveldb/env.h" #include "port/port.h" #include "port/thread_annotations.h" namespace leveldb { class MemTable; class TableCache; class Version; class VersionEdit; class VersionSet; class DBImpl : public DB { public: DBImpl(const Options& options, const std::string& dbname); DBImpl(const DBImpl&) = delete; DBImpl& operator=(const DBImpl&) = delete; ~DBImpl() override; // Implementations of the DB interface Status Put(const WriteOptions&, const Slice& key, const Slice& value) override; Status Delete(const WriteOptions&, const Slice& key) override; Status Write(const WriteOptions& options, WriteBatch* updates) override; Status Get(const ReadOptions& options, const Slice& key, std::string* value) override; Iterator* NewIterator(const ReadOptions&) override; const Snapshot* GetSnapshot() override; void ReleaseSnapshot(const Snapshot* snapshot) override; bool GetProperty(const Slice& property, std::string* value) override; void GetApproximateSizes(const Range* range, int n, uint64_t* sizes) override; void CompactRange(const Slice* begin, const Slice* end) override; // Extra methods (for testing) that are not in the public DB interface // Compact any files in the named level that overlap [*begin,*end] void TEST_CompactRange(int level, const Slice* begin, const Slice* end); // Force current memtable contents to be compacted. Status TEST_CompactMemTable(); // Return an internal iterator over the current state of the database. // The keys of this iterator are internal keys (see format.h). // The returned iterator should be deleted when no longer needed. Iterator* TEST_NewInternalIterator(); // Return the maximum overlapping data (in bytes) at next level for any // file at a level >= 1. int64_t TEST_MaxNextLevelOverlappingBytes(); // Record a sample of bytes read at the specified internal key. // Samples are taken approximately once every config::kReadBytesPeriod // bytes. void RecordReadSample(Slice key); private: friend class DB; struct CompactionState; struct Writer; // Information for a manual compaction struct ManualCompaction { int level; bool done; const InternalKey* begin; // null means beginning of key range const InternalKey* end; // null means end of key range InternalKey tmp_storage; // Used to keep track of compaction progress }; // Per level compaction stats. stats_[level] stores the stats for // compactions that produced data for the specified "level". struct CompactionStats { CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {} void Add(const CompactionStats& c) { this->micros += c.micros; this->bytes_read += c.bytes_read; this->bytes_written += c.bytes_written; } int64_t micros; int64_t bytes_read; int64_t bytes_written; }; Iterator* NewInternalIterator(const ReadOptions&, SequenceNumber* latest_snapshot, uint32_t* seed); Status NewDB(); // Recover the descriptor from persistent storage. May do a significant // amount of work to recover recently logged updates. Any changes to // be made to the descriptor are added to *edit. Status Recover(VersionEdit* edit, bool* save_manifest) EXCLUSIVE_LOCKS_REQUIRED(mutex_); void MaybeIgnoreError(Status* s) const; // Delete any unneeded files and stale in-memory entries. void DeleteObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Compact the in-memory write buffer to disk. Switches to a new // log-file/memtable and writes a new descriptor iff successful. // Errors are recorded in bg_error_. void CompactMemTable() EXCLUSIVE_LOCKS_REQUIRED(mutex_); Status RecoverLogFile(uint64_t log_number, bool last_log, bool* save_manifest, VersionEdit* edit, SequenceNumber* max_sequence) EXCLUSIVE_LOCKS_REQUIRED(mutex_); Status WriteLevel0Table(MemTable* mem, VersionEdit* edit, Version* base) EXCLUSIVE_LOCKS_REQUIRED(mutex_); Status MakeRoomForWrite(bool force /* compact even if there is room? */) EXCLUSIVE_LOCKS_REQUIRED(mutex_); WriteBatch* BuildBatchGroup(Writer** last_writer) EXCLUSIVE_LOCKS_REQUIRED(mutex_); void RecordBackgroundError(const Status& s); void MaybeScheduleCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_); static void BGWork(void* db); void BackgroundCall(); void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_); void CleanupCompaction(CompactionState* compact) EXCLUSIVE_LOCKS_REQUIRED(mutex_); Status DoCompactionWork(CompactionState* compact) EXCLUSIVE_LOCKS_REQUIRED(mutex_); Status OpenCompactionOutputFile(CompactionState* compact); Status FinishCompactionOutputFile(CompactionState* compact, Iterator* input); Status InstallCompactionResults(CompactionState* compact) EXCLUSIVE_LOCKS_REQUIRED(mutex_); const Comparator* user_comparator() const { return internal_comparator_.user_comparator(); } // Constant after construction Env* const env_; const InternalKeyComparator internal_comparator_; const InternalFilterPolicy internal_filter_policy_; const Options options_; // options_.comparator == &internal_comparator_ const bool owns_info_log_; const bool owns_cache_; const std::string dbname_; // table_cache_ provides its own synchronization TableCache* const table_cache_; // Lock over the persistent DB state. Non-null iff successfully acquired. FileLock* db_lock_; // State below is protected by mutex_ port::Mutex mutex_; std::atomic<bool> shutting_down_; port::CondVar background_work_finished_signal_ GUARDED_BY(mutex_); MemTable* mem_; MemTable* imm_ GUARDED_BY(mutex_); // Memtable being compacted std::atomic<bool> has_imm_; // So bg thread can detect non-null imm_ WritableFile* logfile_; uint64_t logfile_number_ GUARDED_BY(mutex_); log::Writer* log_; uint32_t seed_ GUARDED_BY(mutex_); // For sampling. // Queue of writers. std::deque<Writer*> writers_ GUARDED_BY(mutex_); WriteBatch* tmp_batch_ GUARDED_BY(mutex_); SnapshotList snapshots_ GUARDED_BY(mutex_); // Set of table files to protect from deletion because they are // part of ongoing compactions. std::set<uint64_t> pending_outputs_ GUARDED_BY(mutex_); // Has a background compaction been scheduled or is running? bool background_compaction_scheduled_ GUARDED_BY(mutex_); ManualCompaction* manual_compaction_ GUARDED_BY(mutex_); VersionSet* const versions_ GUARDED_BY(mutex_); // Have we encountered a background error in paranoid mode? Status bg_error_ GUARDED_BY(mutex_); CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_); }; // Sanitize db options. The caller should delete result.info_log if // it is not equal to src.info_log. Options SanitizeOptions(const std::string& db, const InternalKeyComparator* icmp, const InternalFilterPolicy* ipolicy, const Options& src); } // namespace leveldb #endif // STORAGE_LEVELDB_DB_DB_IMPL_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/skiplist_test.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/skiplist.h" #include <atomic> #include <set> #include "leveldb/env.h" #include "port/port.h" #include "port/thread_annotations.h" #include "util/arena.h" #include "util/hash.h" #include "util/random.h" #include "util/testharness.h" namespace leveldb { typedef uint64_t Key; struct Comparator { int operator()(const Key& a, const Key& b) const { if (a < b) { return -1; } else if (a > b) { return +1; } else { return 0; } } }; class SkipTest {}; TEST(SkipTest, Empty) { Arena arena; Comparator cmp; SkipList<Key, Comparator> list(cmp, &arena); ASSERT_TRUE(!list.Contains(10)); SkipList<Key, Comparator>::Iterator iter(&list); ASSERT_TRUE(!iter.Valid()); iter.SeekToFirst(); ASSERT_TRUE(!iter.Valid()); iter.Seek(100); ASSERT_TRUE(!iter.Valid()); iter.SeekToLast(); ASSERT_TRUE(!iter.Valid()); } TEST(SkipTest, InsertAndLookup) { const int N = 2000; const int R = 5000; Random rnd(1000); std::set<Key> keys; Arena arena; Comparator cmp; SkipList<Key, Comparator> list(cmp, &arena); for (int i = 0; i < N; i++) { Key key = rnd.Next() % R; if (keys.insert(key).second) { list.Insert(key); } } for (int i = 0; i < R; i++) { if (list.Contains(i)) { ASSERT_EQ(keys.count(i), 1); } else { ASSERT_EQ(keys.count(i), 0); } } // Simple iterator tests { SkipList<Key, Comparator>::Iterator iter(&list); ASSERT_TRUE(!iter.Valid()); iter.Seek(0); ASSERT_TRUE(iter.Valid()); ASSERT_EQ(*(keys.begin()), iter.key()); iter.SeekToFirst(); ASSERT_TRUE(iter.Valid()); ASSERT_EQ(*(keys.begin()), iter.key()); iter.SeekToLast(); ASSERT_TRUE(iter.Valid()); ASSERT_EQ(*(keys.rbegin()), iter.key()); } // Forward iteration test for (int i = 0; i < R; i++) { SkipList<Key, Comparator>::Iterator iter(&list); iter.Seek(i); // Compare against model iterator std::set<Key>::iterator model_iter = keys.lower_bound(i); for (int j = 0; j < 3; j++) { if (model_iter == keys.end()) { ASSERT_TRUE(!iter.Valid()); break; } else { ASSERT_TRUE(iter.Valid()); ASSERT_EQ(*model_iter, iter.key()); ++model_iter; iter.Next(); } } } // Backward iteration test { SkipList<Key, Comparator>::Iterator iter(&list); iter.SeekToLast(); // Compare against model iterator for (std::set<Key>::reverse_iterator model_iter = keys.rbegin(); model_iter != keys.rend(); ++model_iter) { ASSERT_TRUE(iter.Valid()); ASSERT_EQ(*model_iter, iter.key()); iter.Prev(); } ASSERT_TRUE(!iter.Valid()); } } // We want to make sure that with a single writer and multiple // concurrent readers (with no synchronization other than when a // reader's iterator is created), the reader always observes all the // data that was present in the skip list when the iterator was // constructed. Because insertions are happening concurrently, we may // also observe new values that were inserted since the iterator was // constructed, but we should never miss any values that were present // at iterator construction time. // // We generate multi-part keys: // <key,gen,hash> // where: // key is in range [0..K-1] // gen is a generation number for key // hash is hash(key,gen) // // The insertion code picks a random key, sets gen to be 1 + the last // generation number inserted for that key, and sets hash to Hash(key,gen). // // At the beginning of a read, we snapshot the last inserted // generation number for each key. We then iterate, including random // calls to Next() and Seek(). For every key we encounter, we // check that it is either expected given the initial snapshot or has // been concurrently added since the iterator started. class ConcurrentTest { private: static const uint32_t K = 4; static uint64_t key(Key key) { return (key >> 40); } static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; } static uint64_t hash(Key key) { return key & 0xff; } static uint64_t HashNumbers(uint64_t k, uint64_t g) { uint64_t data[2] = {k, g}; return Hash(reinterpret_cast<char*>(data), sizeof(data), 0); } static Key MakeKey(uint64_t k, uint64_t g) { static_assert(sizeof(Key) == sizeof(uint64_t), ""); assert(k <= K); // We sometimes pass K to seek to the end of the skiplist assert(g <= 0xffffffffu); return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff)); } static bool IsValidKey(Key k) { return hash(k) == (HashNumbers(key(k), gen(k)) & 0xff); } static Key RandomTarget(Random* rnd) { switch (rnd->Next() % 10) { case 0: // Seek to beginning return MakeKey(0, 0); case 1: // Seek to end return MakeKey(K, 0); default: // Seek to middle return MakeKey(rnd->Next() % K, 0); } } // Per-key generation struct State { std::atomic<int> generation[K]; void Set(int k, int v) { generation[k].store(v, std::memory_order_release); } int Get(int k) { return generation[k].load(std::memory_order_acquire); } State() { for (int k = 0; k < K; k++) { Set(k, 0); } } }; // Current state of the test State current_; Arena arena_; // SkipList is not protected by mu_. We just use a single writer // thread to modify it. SkipList<Key, Comparator> list_; public: ConcurrentTest() : list_(Comparator(), &arena_) {} // REQUIRES: External synchronization void WriteStep(Random* rnd) { const uint32_t k = rnd->Next() % K; const intptr_t g = current_.Get(k) + 1; const Key key = MakeKey(k, g); list_.Insert(key); current_.Set(k, g); } void ReadStep(Random* rnd) { // Remember the initial committed state of the skiplist. State initial_state; for (int k = 0; k < K; k++) { initial_state.Set(k, current_.Get(k)); } Key pos = RandomTarget(rnd); SkipList<Key, Comparator>::Iterator iter(&list_); iter.Seek(pos); while (true) { Key current; if (!iter.Valid()) { current = MakeKey(K, 0); } else { current = iter.key(); ASSERT_TRUE(IsValidKey(current)) << current; } ASSERT_LE(pos, current) << "should not go backwards"; // Verify that everything in [pos,current) was not present in // initial_state. while (pos < current) { ASSERT_LT(key(pos), K) << pos; // Note that generation 0 is never inserted, so it is ok if // <*,0,*> is missing. ASSERT_TRUE((gen(pos) == 0) || (gen(pos) > static_cast<Key>(initial_state.Get(key(pos))))) << "key: " << key(pos) << "; gen: " << gen(pos) << "; initgen: " << initial_state.Get(key(pos)); // Advance to next key in the valid key space if (key(pos) < key(current)) { pos = MakeKey(key(pos) + 1, 0); } else { pos = MakeKey(key(pos), gen(pos) + 1); } } if (!iter.Valid()) { break; } if (rnd->Next() % 2) { iter.Next(); pos = MakeKey(key(pos), gen(pos) + 1); } else { Key new_target = RandomTarget(rnd); if (new_target > pos) { pos = new_target; iter.Seek(new_target); } } } } }; const uint32_t ConcurrentTest::K; // Simple test that does single-threaded testing of the ConcurrentTest // scaffolding. TEST(SkipTest, ConcurrentWithoutThreads) { ConcurrentTest test; Random rnd(test::RandomSeed()); for (int i = 0; i < 10000; i++) { test.ReadStep(&rnd); test.WriteStep(&rnd); } } class TestState { public: ConcurrentTest t_; int seed_; std::atomic<bool> quit_flag_; enum ReaderState { STARTING, RUNNING, DONE }; explicit TestState(int s) : seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {} void Wait(ReaderState s) LOCKS_EXCLUDED(mu_) { mu_.Lock(); while (state_ != s) { state_cv_.Wait(); } mu_.Unlock(); } void Change(ReaderState s) LOCKS_EXCLUDED(mu_) { mu_.Lock(); state_ = s; state_cv_.Signal(); mu_.Unlock(); } private: port::Mutex mu_; ReaderState state_ GUARDED_BY(mu_); port::CondVar state_cv_ GUARDED_BY(mu_); }; static void ConcurrentReader(void* arg) { TestState* state = reinterpret_cast<TestState*>(arg); Random rnd(state->seed_); int64_t reads = 0; state->Change(TestState::RUNNING); while (!state->quit_flag_.load(std::memory_order_acquire)) { state->t_.ReadStep(&rnd); ++reads; } state->Change(TestState::DONE); } static void RunConcurrent(int run) { const int seed = test::RandomSeed() + (run * 100); Random rnd(seed); const int N = 1000; const int kSize = 1000; for (int i = 0; i < N; i++) { if ((i % 100) == 0) { fprintf(stderr, "Run %d of %d\n", i, N); } TestState state(seed + 1); Env::Default()->Schedule(ConcurrentReader, &state); state.Wait(TestState::RUNNING); for (int i = 0; i < kSize; i++) { state.t_.WriteStep(&rnd); } state.quit_flag_.store(true, std::memory_order_release); state.Wait(TestState::DONE); } } TEST(SkipTest, Concurrent1) { RunConcurrent(1); } TEST(SkipTest, Concurrent2) { RunConcurrent(2); } TEST(SkipTest, Concurrent3) { RunConcurrent(3); } TEST(SkipTest, Concurrent4) { RunConcurrent(4); } TEST(SkipTest, Concurrent5) { RunConcurrent(5); } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/db_iter.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_DB_DB_ITER_H_ #define STORAGE_LEVELDB_DB_DB_ITER_H_ #include <stdint.h> #include "db/dbformat.h" #include "leveldb/db.h" namespace leveldb { class DBImpl; // Return a new iterator that converts internal keys (yielded by // "*internal_iter") that were live at the specified "sequence" number // into appropriate user keys. Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator, Iterator* internal_iter, SequenceNumber sequence, uint32_t seed); } // namespace leveldb #endif // STORAGE_LEVELDB_DB_DB_ITER_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/corruption_test.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <sys/types.h> #include "db/db_impl.h" #include "db/filename.h" #include "db/log_format.h" #include "db/version_set.h" #include "leveldb/cache.h" #include "leveldb/db.h" #include "leveldb/table.h" #include "leveldb/write_batch.h" #include "util/logging.h" #include "util/testharness.h" #include "util/testutil.h" namespace leveldb { static const int kValueSize = 1000; class CorruptionTest { public: CorruptionTest() : db_(nullptr), dbname_("/memenv/corruption_test"), tiny_cache_(NewLRUCache(100)) { options_.env = &env_; options_.block_cache = tiny_cache_; DestroyDB(dbname_, options_); options_.create_if_missing = true; Reopen(); options_.create_if_missing = false; } ~CorruptionTest() { delete db_; delete tiny_cache_; } Status TryReopen() { delete db_; db_ = nullptr; return DB::Open(options_, dbname_, &db_); } void Reopen() { ASSERT_OK(TryReopen()); } void RepairDB() { delete db_; db_ = nullptr; ASSERT_OK(::leveldb::RepairDB(dbname_, options_)); } void Build(int n) { std::string key_space, value_space; WriteBatch batch; for (int i = 0; i < n; i++) { // if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n); Slice key = Key(i, &key_space); batch.Clear(); batch.Put(key, Value(i, &value_space)); WriteOptions options; // Corrupt() doesn't work without this sync on windows; stat reports 0 for // the file size. if (i == n - 1) { options.sync = true; } ASSERT_OK(db_->Write(options, &batch)); } } void Check(int min_expected, int max_expected) { int next_expected = 0; int missed = 0; int bad_keys = 0; int bad_values = 0; int correct = 0; std::string value_space; Iterator* iter = db_->NewIterator(ReadOptions()); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { uint64_t key; Slice in(iter->key()); if (in == "" || in == "~") { // Ignore boundary keys. continue; } if (!ConsumeDecimalNumber(&in, &key) || !in.empty() || key < next_expected) { bad_keys++; continue; } missed += (key - next_expected); next_expected = key + 1; if (iter->value() != Value(key, &value_space)) { bad_values++; } else { correct++; } } delete iter; fprintf(stderr, "expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%d\n", min_expected, max_expected, correct, bad_keys, bad_values, missed); ASSERT_LE(min_expected, correct); ASSERT_GE(max_expected, correct); } void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) { // Pick file to corrupt std::vector<std::string> filenames; ASSERT_OK(env_.target()->GetChildren(dbname_, &filenames)); uint64_t number; FileType type; std::string fname; int picked_number = -1; for (size_t i = 0; i < filenames.size(); i++) { if (ParseFileName(filenames[i], &number, &type) && type == filetype && int(number) > picked_number) { // Pick latest file fname = dbname_ + "/" + filenames[i]; picked_number = number; } } ASSERT_TRUE(!fname.empty()) << filetype; uint64_t file_size; ASSERT_OK(env_.target()->GetFileSize(fname, &file_size)); if (offset < 0) { // Relative to end of file; make it absolute if (-offset > file_size) { offset = 0; } else { offset = file_size + offset; } } if (offset > file_size) { offset = file_size; } if (offset + bytes_to_corrupt > file_size) { bytes_to_corrupt = file_size - offset; } // Do it std::string contents; Status s = ReadFileToString(env_.target(), fname, &contents); ASSERT_TRUE(s.ok()) << s.ToString(); for (int i = 0; i < bytes_to_corrupt; i++) { contents[i + offset] ^= 0x80; } s = WriteStringToFile(env_.target(), contents, fname); ASSERT_TRUE(s.ok()) << s.ToString(); } int Property(const std::string& name) { std::string property; int result; if (db_->GetProperty(name, &property) && sscanf(property.c_str(), "%d", &result) == 1) { return result; } else { return -1; } } // Return the ith key Slice Key(int i, std::string* storage) { char buf[100]; snprintf(buf, sizeof(buf), "%016d", i); storage->assign(buf, strlen(buf)); return Slice(*storage); } // Return the value to associate with the specified key Slice Value(int k, std::string* storage) { Random r(k); return test::RandomString(&r, kValueSize, storage); } test::ErrorEnv env_; Options options_; DB* db_; private: std::string dbname_; Cache* tiny_cache_; }; TEST(CorruptionTest, Recovery) { Build(100); Check(100, 100); Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record Corrupt(kLogFile, log::kBlockSize + 1000, 1); // Somewhere in second block Reopen(); // The 64 records in the first two log blocks are completely lost. Check(36, 36); } TEST(CorruptionTest, RecoverWriteError) { env_.writable_file_error_ = true; Status s = TryReopen(); ASSERT_TRUE(!s.ok()); } TEST(CorruptionTest, NewFileErrorDuringWrite) { // Do enough writing to force minor compaction env_.writable_file_error_ = true; const int num = 3 + (Options().write_buffer_size / kValueSize); std::string value_storage; Status s; for (int i = 0; s.ok() && i < num; i++) { WriteBatch batch; batch.Put("a", Value(100, &value_storage)); s = db_->Write(WriteOptions(), &batch); } ASSERT_TRUE(!s.ok()); ASSERT_GE(env_.num_writable_file_errors_, 1); env_.writable_file_error_ = false; Reopen(); } TEST(CorruptionTest, TableFile) { Build(100); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); dbi->TEST_CompactMemTable(); dbi->TEST_CompactRange(0, nullptr, nullptr); dbi->TEST_CompactRange(1, nullptr, nullptr); Corrupt(kTableFile, 100, 1); Check(90, 99); } TEST(CorruptionTest, TableFileRepair) { options_.block_size = 2 * kValueSize; // Limit scope of corruption options_.paranoid_checks = true; Reopen(); Build(100); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); dbi->TEST_CompactMemTable(); dbi->TEST_CompactRange(0, nullptr, nullptr); dbi->TEST_CompactRange(1, nullptr, nullptr); Corrupt(kTableFile, 100, 1); RepairDB(); Reopen(); Check(95, 99); } TEST(CorruptionTest, TableFileIndexData) { Build(10000); // Enough to build multiple Tables DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); dbi->TEST_CompactMemTable(); Corrupt(kTableFile, -2000, 500); Reopen(); Check(5000, 9999); } TEST(CorruptionTest, MissingDescriptor) { Build(1000); RepairDB(); Reopen(); Check(1000, 1000); } TEST(CorruptionTest, SequenceNumberRecovery) { ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1")); ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2")); ASSERT_OK(db_->Put(WriteOptions(), "foo", "v3")); ASSERT_OK(db_->Put(WriteOptions(), "foo", "v4")); ASSERT_OK(db_->Put(WriteOptions(), "foo", "v5")); RepairDB(); Reopen(); std::string v; ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); ASSERT_EQ("v5", v); // Write something. If sequence number was not recovered properly, // it will be hidden by an earlier write. ASSERT_OK(db_->Put(WriteOptions(), "foo", "v6")); ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); ASSERT_EQ("v6", v); Reopen(); ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); ASSERT_EQ("v6", v); } TEST(CorruptionTest, CorruptedDescriptor) { ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello")); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); dbi->TEST_CompactMemTable(); dbi->TEST_CompactRange(0, nullptr, nullptr); Corrupt(kDescriptorFile, 0, 1000); Status s = TryReopen(); ASSERT_TRUE(!s.ok()); RepairDB(); Reopen(); std::string v; ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); ASSERT_EQ("hello", v); } TEST(CorruptionTest, CompactionInputError) { Build(10); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); dbi->TEST_CompactMemTable(); const int last = config::kMaxMemCompactLevel; ASSERT_EQ(1, Property("leveldb.num-files-at-level" + NumberToString(last))); Corrupt(kTableFile, 100, 1); Check(5, 9); // Force compactions by writing lots of values Build(10000); Check(10000, 10000); } TEST(CorruptionTest, CompactionInputErrorParanoid) { options_.paranoid_checks = true; options_.write_buffer_size = 512 << 10; Reopen(); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); // Make multiple inputs so we need to compact. for (int i = 0; i < 2; i++) { Build(10); dbi->TEST_CompactMemTable(); Corrupt(kTableFile, 100, 1); env_.SleepForMicroseconds(100000); } dbi->CompactRange(nullptr, nullptr); // Write must fail because of corrupted table std::string tmp1, tmp2; Status s = db_->Put(WriteOptions(), Key(5, &tmp1), Value(5, &tmp2)); ASSERT_TRUE(!s.ok()) << "write did not fail in corrupted paranoid db"; } TEST(CorruptionTest, UnrelatedKeys) { Build(10); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); dbi->TEST_CompactMemTable(); Corrupt(kTableFile, 100, 1); std::string tmp1, tmp2; ASSERT_OK(db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2))); std::string v; ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v)); ASSERT_EQ(Value(1000, &tmp2).ToString(), v); dbi->TEST_CompactMemTable(); ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v)); ASSERT_EQ(Value(1000, &tmp2).ToString(), v); } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/dbformat.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/dbformat.h" #include <stdio.h> #include <sstream> #include "port/port.h" #include "util/coding.h" namespace leveldb { static uint64_t PackSequenceAndType(uint64_t seq, ValueType t) { assert(seq <= kMaxSequenceNumber); assert(t <= kValueTypeForSeek); return (seq << 8) | t; } void AppendInternalKey(std::string* result, const ParsedInternalKey& key) { result->append(key.user_key.data(), key.user_key.size()); PutFixed64(result, PackSequenceAndType(key.sequence, key.type)); } std::string ParsedInternalKey::DebugString() const { std::ostringstream ss; ss << '\'' << EscapeString(user_key.ToString()) << "' @ " << sequence << " : " << static_cast<int>(type); return ss.str(); } std::string InternalKey::DebugString() const { ParsedInternalKey parsed; if (ParseInternalKey(rep_, &parsed)) { return parsed.DebugString(); } std::ostringstream ss; ss << "(bad)" << EscapeString(rep_); return ss.str(); } const char* InternalKeyComparator::Name() const { return "leveldb.InternalKeyComparator"; } int InternalKeyComparator::Compare(const Slice& akey, const Slice& bkey) const { // Order by: // increasing user key (according to user-supplied comparator) // decreasing sequence number // decreasing type (though sequence# should be enough to disambiguate) int r = user_comparator_->Compare(ExtractUserKey(akey), ExtractUserKey(bkey)); if (r == 0) { const uint64_t anum = DecodeFixed64(akey.data() + akey.size() - 8); const uint64_t bnum = DecodeFixed64(bkey.data() + bkey.size() - 8); if (anum > bnum) { r = -1; } else if (anum < bnum) { r = +1; } } return r; } void InternalKeyComparator::FindShortestSeparator(std::string* start, const Slice& limit) const { // Attempt to shorten the user portion of the key Slice user_start = ExtractUserKey(*start); Slice user_limit = ExtractUserKey(limit); std::string tmp(user_start.data(), user_start.size()); user_comparator_->FindShortestSeparator(&tmp, user_limit); if (tmp.size() < user_start.size() && user_comparator_->Compare(user_start, tmp) < 0) { // User key has become shorter physically, but larger logically. // Tack on the earliest possible number to the shortened user key. PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek)); assert(this->Compare(*start, tmp) < 0); assert(this->Compare(tmp, limit) < 0); start->swap(tmp); } } void InternalKeyComparator::FindShortSuccessor(std::string* key) const { Slice user_key = ExtractUserKey(*key); std::string tmp(user_key.data(), user_key.size()); user_comparator_->FindShortSuccessor(&tmp); if (tmp.size() < user_key.size() && user_comparator_->Compare(user_key, tmp) < 0) { // User key has become shorter physically, but larger logically. // Tack on the earliest possible number to the shortened user key. PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek)); assert(this->Compare(*key, tmp) < 0); key->swap(tmp); } } const char* InternalFilterPolicy::Name() const { return user_policy_->Name(); } void InternalFilterPolicy::CreateFilter(const Slice* keys, int n, std::string* dst) const { // We rely on the fact that the code in table.cc does not mind us // adjusting keys[]. Slice* mkey = const_cast<Slice*>(keys); for (int i = 0; i < n; i++) { mkey[i] = ExtractUserKey(keys[i]); // TODO(sanjay): Suppress dups? } user_policy_->CreateFilter(keys, n, dst); } bool InternalFilterPolicy::KeyMayMatch(const Slice& key, const Slice& f) const { return user_policy_->KeyMayMatch(ExtractUserKey(key), f); } LookupKey::LookupKey(const Slice& user_key, SequenceNumber s) { size_t usize = user_key.size(); size_t needed = usize + 13; // A conservative estimate char* dst; if (needed <= sizeof(space_)) { dst = space_; } else { dst = new char[needed]; } start_ = dst; dst = EncodeVarint32(dst, usize + 8); kstart_ = dst; memcpy(dst, user_key.data(), usize); dst += usize; EncodeFixed64(dst, PackSequenceAndType(s, kValueTypeForSeek)); dst += 8; end_ = dst; } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/filename_test.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/filename.h" #include "db/dbformat.h" #include "port/port.h" #include "util/logging.h" #include "util/testharness.h" namespace leveldb { class FileNameTest {}; TEST(FileNameTest, Parse) { Slice db; FileType type; uint64_t number; // Successful parses static struct { const char* fname; uint64_t number; FileType type; } cases[] = { {"100.log", 100, kLogFile}, {"0.log", 0, kLogFile}, {"0.sst", 0, kTableFile}, {"0.ldb", 0, kTableFile}, {"CURRENT", 0, kCurrentFile}, {"LOCK", 0, kDBLockFile}, {"MANIFEST-2", 2, kDescriptorFile}, {"MANIFEST-7", 7, kDescriptorFile}, {"LOG", 0, kInfoLogFile}, {"LOG.old", 0, kInfoLogFile}, {"18446744073709551615.log", 18446744073709551615ull, kLogFile}, }; for (int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) { std::string f = cases[i].fname; ASSERT_TRUE(ParseFileName(f, &number, &type)) << f; ASSERT_EQ(cases[i].type, type) << f; ASSERT_EQ(cases[i].number, number) << f; } // Errors static const char* errors[] = {"", "foo", "foo-dx-100.log", ".log", "", "manifest", "CURREN", "CURRENTX", "MANIFES", "MANIFEST", "MANIFEST-", "XMANIFEST-3", "MANIFEST-3x", "LOC", "LOCKx", "LO", "LOGx", "18446744073709551616.log", "184467440737095516150.log", "100", "100.", "100.lop"}; for (int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) { std::string f = errors[i]; ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f; } } TEST(FileNameTest, Construction) { uint64_t number; FileType type; std::string fname; fname = CurrentFileName("foo"); ASSERT_EQ("foo/", std::string(fname.data(), 4)); ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); ASSERT_EQ(0, number); ASSERT_EQ(kCurrentFile, type); fname = LockFileName("foo"); ASSERT_EQ("foo/", std::string(fname.data(), 4)); ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); ASSERT_EQ(0, number); ASSERT_EQ(kDBLockFile, type); fname = LogFileName("foo", 192); ASSERT_EQ("foo/", std::string(fname.data(), 4)); ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); ASSERT_EQ(192, number); ASSERT_EQ(kLogFile, type); fname = TableFileName("bar", 200); ASSERT_EQ("bar/", std::string(fname.data(), 4)); ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); ASSERT_EQ(200, number); ASSERT_EQ(kTableFile, type); fname = DescriptorFileName("bar", 100); ASSERT_EQ("bar/", std::string(fname.data(), 4)); ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); ASSERT_EQ(100, number); ASSERT_EQ(kDescriptorFile, type); fname = TempFileName("tmp", 999); ASSERT_EQ("tmp/", std::string(fname.data(), 4)); ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); ASSERT_EQ(999, number); ASSERT_EQ(kTempFile, type); fname = InfoLogFileName("foo"); ASSERT_EQ("foo/", std::string(fname.data(), 4)); ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); ASSERT_EQ(0, number); ASSERT_EQ(kInfoLogFile, type); fname = OldInfoLogFileName("foo"); ASSERT_EQ("foo/", std::string(fname.data(), 4)); ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); ASSERT_EQ(0, number); ASSERT_EQ(kInfoLogFile, type); } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/memtable.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_DB_MEMTABLE_H_ #define STORAGE_LEVELDB_DB_MEMTABLE_H_ #include <string> #include "db/dbformat.h" #include "db/skiplist.h" #include "leveldb/db.h" #include "util/arena.h" namespace leveldb { class InternalKeyComparator; class MemTableIterator; class MemTable { public: // MemTables are reference counted. The initial reference count // is zero and the caller must call Ref() at least once. explicit MemTable(const InternalKeyComparator& comparator); MemTable(const MemTable&) = delete; MemTable& operator=(const MemTable&) = delete; // Increase reference count. void Ref() { ++refs_; } // Drop reference count. Delete if no more references exist. void Unref() { --refs_; assert(refs_ >= 0); if (refs_ <= 0) { delete this; } } // Returns an estimate of the number of bytes of data in use by this // data structure. It is safe to call when MemTable is being modified. size_t ApproximateMemoryUsage(); // Return an iterator that yields the contents of the memtable. // // The caller must ensure that the underlying MemTable remains live // while the returned iterator is live. The keys returned by this // iterator are internal keys encoded by AppendInternalKey in the // db/format.{h,cc} module. Iterator* NewIterator(); // Add an entry into memtable that maps key to value at the // specified sequence number and with the specified type. // Typically value will be empty if type==kTypeDeletion. void Add(SequenceNumber seq, ValueType type, const Slice& key, const Slice& value); // If memtable contains a value for key, store it in *value and return true. // If memtable contains a deletion for key, store a NotFound() error // in *status and return true. // Else, return false. bool Get(const LookupKey& key, std::string* value, Status* s); private: friend class MemTableIterator; friend class MemTableBackwardIterator; struct KeyComparator { const InternalKeyComparator comparator; explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) {} int operator()(const char* a, const char* b) const; }; typedef SkipList<const char*, KeyComparator> Table; ~MemTable(); // Private since only Unref() should be used to delete it KeyComparator comparator_; int refs_; Arena arena_; Table table_; }; } // namespace leveldb #endif // STORAGE_LEVELDB_DB_MEMTABLE_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/version_edit.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_DB_VERSION_EDIT_H_ #define STORAGE_LEVELDB_DB_VERSION_EDIT_H_ #include <set> #include <utility> #include <vector> #include "db/dbformat.h" namespace leveldb { class VersionSet; struct FileMetaData { FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {} int refs; int allowed_seeks; // Seeks allowed until compaction uint64_t number; uint64_t file_size; // File size in bytes InternalKey smallest; // Smallest internal key served by table InternalKey largest; // Largest internal key served by table }; class VersionEdit { public: VersionEdit() { Clear(); } ~VersionEdit() = default; void Clear(); void SetComparatorName(const Slice& name) { has_comparator_ = true; comparator_ = name.ToString(); } void SetLogNumber(uint64_t num) { has_log_number_ = true; log_number_ = num; } void SetPrevLogNumber(uint64_t num) { has_prev_log_number_ = true; prev_log_number_ = num; } void SetNextFile(uint64_t num) { has_next_file_number_ = true; next_file_number_ = num; } void SetLastSequence(SequenceNumber seq) { has_last_sequence_ = true; last_sequence_ = seq; } void SetCompactPointer(int level, const InternalKey& key) { compact_pointers_.push_back(std::make_pair(level, key)); } // Add the specified file at the specified number. // REQUIRES: This version has not been saved (see VersionSet::SaveTo) // REQUIRES: "smallest" and "largest" are smallest and largest keys in file void AddFile(int level, uint64_t file, uint64_t file_size, const InternalKey& smallest, const InternalKey& largest) { FileMetaData f; f.number = file; f.file_size = file_size; f.smallest = smallest; f.largest = largest; new_files_.push_back(std::make_pair(level, f)); } // Delete the specified "file" from the specified "level". void DeleteFile(int level, uint64_t file) { deleted_files_.insert(std::make_pair(level, file)); } void EncodeTo(std::string* dst) const; Status DecodeFrom(const Slice& src); std::string DebugString() const; private: friend class VersionSet; typedef std::set<std::pair<int, uint64_t>> DeletedFileSet; std::string comparator_; uint64_t log_number_; uint64_t prev_log_number_; uint64_t next_file_number_; SequenceNumber last_sequence_; bool has_comparator_; bool has_log_number_; bool has_prev_log_number_; bool has_next_file_number_; bool has_last_sequence_; std::vector<std::pair<int, InternalKey>> compact_pointers_; DeletedFileSet deleted_files_; std::vector<std::pair<int, FileMetaData>> new_files_; }; } // namespace leveldb #endif // STORAGE_LEVELDB_DB_VERSION_EDIT_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/version_set.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // The representation of a DBImpl consists of a set of Versions. The // newest version is called "current". Older versions may be kept // around to provide a consistent view to live iterators. // // Each Version keeps track of a set of Table files per level. The // entire set of versions is maintained in a VersionSet. // // Version,VersionSet are thread-compatible, but require external // synchronization on all accesses. #ifndef STORAGE_LEVELDB_DB_VERSION_SET_H_ #define STORAGE_LEVELDB_DB_VERSION_SET_H_ #include <map> #include <set> #include <vector> #include "db/dbformat.h" #include "db/version_edit.h" #include "port/port.h" #include "port/thread_annotations.h" namespace leveldb { namespace log { class Writer; } class Compaction; class Iterator; class MemTable; class TableBuilder; class TableCache; class Version; class VersionSet; class WritableFile; // Return the smallest index i such that files[i]->largest >= key. // Return files.size() if there is no such file. // REQUIRES: "files" contains a sorted list of non-overlapping files. int FindFile(const InternalKeyComparator& icmp, const std::vector<FileMetaData*>& files, const Slice& key); // Returns true iff some file in "files" overlaps the user key range // [*smallest,*largest]. // smallest==nullptr represents a key smaller than all keys in the DB. // largest==nullptr represents a key largest than all keys in the DB. // REQUIRES: If disjoint_sorted_files, files[] contains disjoint ranges // in sorted order. bool SomeFileOverlapsRange(const InternalKeyComparator& icmp, bool disjoint_sorted_files, const std::vector<FileMetaData*>& files, const Slice* smallest_user_key, const Slice* largest_user_key); class Version { public: // Lookup the value for key. If found, store it in *val and // return OK. Else return a non-OK status. Fills *stats. // REQUIRES: lock is not held struct GetStats { FileMetaData* seek_file; int seek_file_level; }; // Append to *iters a sequence of iterators that will // yield the contents of this Version when merged together. // REQUIRES: This version has been saved (see VersionSet::SaveTo) void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters); Status Get(const ReadOptions&, const LookupKey& key, std::string* val, GetStats* stats); // Adds "stats" into the current state. Returns true if a new // compaction may need to be triggered, false otherwise. // REQUIRES: lock is held bool UpdateStats(const GetStats& stats); // Record a sample of bytes read at the specified internal key. // Samples are taken approximately once every config::kReadBytesPeriod // bytes. Returns true if a new compaction may need to be triggered. // REQUIRES: lock is held bool RecordReadSample(Slice key); // Reference count management (so Versions do not disappear out from // under live iterators) void Ref(); void Unref(); void GetOverlappingInputs( int level, const InternalKey* begin, // nullptr means before all keys const InternalKey* end, // nullptr means after all keys std::vector<FileMetaData*>* inputs); // Returns true iff some file in the specified level overlaps // some part of [*smallest_user_key,*largest_user_key]. // smallest_user_key==nullptr represents a key smaller than all the DB's keys. // largest_user_key==nullptr represents a key largest than all the DB's keys. bool OverlapInLevel(int level, const Slice* smallest_user_key, const Slice* largest_user_key); // Return the level at which we should place a new memtable compaction // result that covers the range [smallest_user_key,largest_user_key]. int PickLevelForMemTableOutput(const Slice& smallest_user_key, const Slice& largest_user_key); int NumFiles(int level) const { return files_[level].size(); } // Return a human readable string that describes this version's contents. std::string DebugString() const; private: friend class Compaction; friend class VersionSet; class LevelFileNumIterator; explicit Version(VersionSet* vset) : vset_(vset), next_(this), prev_(this), refs_(0), file_to_compact_(nullptr), file_to_compact_level_(-1), compaction_score_(-1), compaction_level_(-1) {} Version(const Version&) = delete; Version& operator=(const Version&) = delete; ~Version(); Iterator* NewConcatenatingIterator(const ReadOptions&, int level) const; // Call func(arg, level, f) for every file that overlaps user_key in // order from newest to oldest. If an invocation of func returns // false, makes no more calls. // // REQUIRES: user portion of internal_key == user_key. void ForEachOverlapping(Slice user_key, Slice internal_key, void* arg, bool (*func)(void*, int, FileMetaData*)); VersionSet* vset_; // VersionSet to which this Version belongs Version* next_; // Next version in linked list Version* prev_; // Previous version in linked list int refs_; // Number of live refs to this version // List of files per level std::vector<FileMetaData*> files_[config::kNumLevels]; // Next file to compact based on seek stats. FileMetaData* file_to_compact_; int file_to_compact_level_; // Level that should be compacted next and its compaction score. // Score < 1 means compaction is not strictly needed. These fields // are initialized by Finalize(). double compaction_score_; int compaction_level_; }; class VersionSet { public: VersionSet(const std::string& dbname, const Options* options, TableCache* table_cache, const InternalKeyComparator*); VersionSet(const VersionSet&) = delete; VersionSet& operator=(const VersionSet&) = delete; ~VersionSet(); // Apply *edit to the current version to form a new descriptor that // is both saved to persistent state and installed as the new // current version. Will release *mu while actually writing to the file. // REQUIRES: *mu is held on entry. // REQUIRES: no other thread concurrently calls LogAndApply() Status LogAndApply(VersionEdit* edit, port::Mutex* mu) EXCLUSIVE_LOCKS_REQUIRED(mu); // Recover the last saved descriptor from persistent storage. Status Recover(bool* save_manifest); // Return the current version. Version* current() const { return current_; } // Return the current manifest file number uint64_t ManifestFileNumber() const { return manifest_file_number_; } // Allocate and return a new file number uint64_t NewFileNumber() { return next_file_number_++; } // Arrange to reuse "file_number" unless a newer file number has // already been allocated. // REQUIRES: "file_number" was returned by a call to NewFileNumber(). void ReuseFileNumber(uint64_t file_number) { if (next_file_number_ == file_number + 1) { next_file_number_ = file_number; } } // Return the number of Table files at the specified level. int NumLevelFiles(int level) const; // Return the combined file size of all files at the specified level. int64_t NumLevelBytes(int level) const; // Return the last sequence number. uint64_t LastSequence() const { return last_sequence_; } // Set the last sequence number to s. void SetLastSequence(uint64_t s) { assert(s >= last_sequence_); last_sequence_ = s; } // Mark the specified file number as used. void MarkFileNumberUsed(uint64_t number); // Return the current log file number. uint64_t LogNumber() const { return log_number_; } // Return the log file number for the log file that is currently // being compacted, or zero if there is no such log file. uint64_t PrevLogNumber() const { return prev_log_number_; } // Pick level and inputs for a new compaction. // Returns nullptr if there is no compaction to be done. // Otherwise returns a pointer to a heap-allocated object that // describes the compaction. Caller should delete the result. Compaction* PickCompaction(); // Return a compaction object for compacting the range [begin,end] in // the specified level. Returns nullptr if there is nothing in that // level that overlaps the specified range. Caller should delete // the result. Compaction* CompactRange(int level, const InternalKey* begin, const InternalKey* end); // Return the maximum overlapping data (in bytes) at next level for any // file at a level >= 1. int64_t MaxNextLevelOverlappingBytes(); // Create an iterator that reads over the compaction inputs for "*c". // The caller should delete the iterator when no longer needed. Iterator* MakeInputIterator(Compaction* c); // Returns true iff some level needs a compaction. bool NeedsCompaction() const { Version* v = current_; return (v->compaction_score_ >= 1) || (v->file_to_compact_ != nullptr); } // Add all files listed in any live version to *live. // May also mutate some internal state. void AddLiveFiles(std::set<uint64_t>* live); // Return the approximate offset in the database of the data for // "key" as of version "v". uint64_t ApproximateOffsetOf(Version* v, const InternalKey& key); // Return a human-readable short (single-line) summary of the number // of files per level. Uses *scratch as backing store. struct LevelSummaryStorage { char buffer[100]; }; const char* LevelSummary(LevelSummaryStorage* scratch) const; private: class Builder; friend class Compaction; friend class Version; bool ReuseManifest(const std::string& dscname, const std::string& dscbase); void Finalize(Version* v); void GetRange(const std::vector<FileMetaData*>& inputs, InternalKey* smallest, InternalKey* largest); void GetRange2(const std::vector<FileMetaData*>& inputs1, const std::vector<FileMetaData*>& inputs2, InternalKey* smallest, InternalKey* largest); void SetupOtherInputs(Compaction* c); // Save current contents to *log Status WriteSnapshot(log::Writer* log); void AppendVersion(Version* v); Env* const env_; const std::string dbname_; const Options* const options_; TableCache* const table_cache_; const InternalKeyComparator icmp_; uint64_t next_file_number_; uint64_t manifest_file_number_; uint64_t last_sequence_; uint64_t log_number_; uint64_t prev_log_number_; // 0 or backing store for memtable being compacted // Opened lazily WritableFile* descriptor_file_; log::Writer* descriptor_log_; Version dummy_versions_; // Head of circular doubly-linked list of versions. Version* current_; // == dummy_versions_.prev_ // Per-level key at which the next compaction at that level should start. // Either an empty string, or a valid InternalKey. std::string compact_pointer_[config::kNumLevels]; }; // A Compaction encapsulates information about a compaction. class Compaction { public: ~Compaction(); // Return the level that is being compacted. Inputs from "level" // and "level+1" will be merged to produce a set of "level+1" files. int level() const { return level_; } // Return the object that holds the edits to the descriptor done // by this compaction. VersionEdit* edit() { return &edit_; } // "which" must be either 0 or 1 int num_input_files(int which) const { return inputs_[which].size(); } // Return the ith input file at "level()+which" ("which" must be 0 or 1). FileMetaData* input(int which, int i) const { return inputs_[which][i]; } // Maximum size of files to build during this compaction. uint64_t MaxOutputFileSize() const { return max_output_file_size_; } // Is this a trivial compaction that can be implemented by just // moving a single input file to the next level (no merging or splitting) bool IsTrivialMove() const; // Add all inputs to this compaction as delete operations to *edit. void AddInputDeletions(VersionEdit* edit); // Returns true if the information we have available guarantees that // the compaction is producing data in "level+1" for which no data exists // in levels greater than "level+1". bool IsBaseLevelForKey(const Slice& user_key); // Returns true iff we should stop building the current output // before processing "internal_key". bool ShouldStopBefore(const Slice& internal_key); // Release the input version for the compaction, once the compaction // is successful. void ReleaseInputs(); private: friend class Version; friend class VersionSet; Compaction(const Options* options, int level); int level_; uint64_t max_output_file_size_; Version* input_version_; VersionEdit edit_; // Each compaction reads inputs from "level_" and "level_+1" std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs // State used to check for number of overlapping grandparent files // (parent == level_ + 1, grandparent == level_ + 2) std::vector<FileMetaData*> grandparents_; size_t grandparent_index_; // Index in grandparent_starts_ bool seen_key_; // Some output key has been seen int64_t overlapped_bytes_; // Bytes of overlap between current output // and grandparent files // State for implementing IsBaseLevelForKey // level_ptrs_ holds indices into input_version_->levels_: our state // is that we are positioned at one of the file ranges for each // higher level than the ones involved in this compaction (i.e. for // all L >= level_ + 2). size_t level_ptrs_[config::kNumLevels]; }; } // namespace leveldb #endif // STORAGE_LEVELDB_DB_VERSION_SET_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/log_test.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/log_reader.h" #include "db/log_writer.h" #include "leveldb/env.h" #include "util/coding.h" #include "util/crc32c.h" #include "util/random.h" #include "util/testharness.h" namespace leveldb { namespace log { // Construct a string of the specified length made out of the supplied // partial string. static std::string BigString(const std::string& partial_string, size_t n) { std::string result; while (result.size() < n) { result.append(partial_string); } result.resize(n); return result; } // Construct a string from a number static std::string NumberString(int n) { char buf[50]; snprintf(buf, sizeof(buf), "%d.", n); return std::string(buf); } // Return a skewed potentially long string static std::string RandomSkewedString(int i, Random* rnd) { return BigString(NumberString(i), rnd->Skewed(17)); } class LogTest { public: LogTest() : reading_(false), writer_(new Writer(&dest_)), reader_(new Reader(&source_, &report_, true /*checksum*/, 0 /*initial_offset*/)) {} ~LogTest() { delete writer_; delete reader_; } void ReopenForAppend() { delete writer_; writer_ = new Writer(&dest_, dest_.contents_.size()); } void Write(const std::string& msg) { ASSERT_TRUE(!reading_) << "Write() after starting to read"; writer_->AddRecord(Slice(msg)); } size_t WrittenBytes() const { return dest_.contents_.size(); } std::string Read() { if (!reading_) { reading_ = true; source_.contents_ = Slice(dest_.contents_); } std::string scratch; Slice record; if (reader_->ReadRecord(&record, &scratch)) { return record.ToString(); } else { return "EOF"; } } void IncrementByte(int offset, int delta) { dest_.contents_[offset] += delta; } void SetByte(int offset, char new_byte) { dest_.contents_[offset] = new_byte; } void ShrinkSize(int bytes) { dest_.contents_.resize(dest_.contents_.size() - bytes); } void FixChecksum(int header_offset, int len) { // Compute crc of type/len/data uint32_t crc = crc32c::Value(&dest_.contents_[header_offset + 6], 1 + len); crc = crc32c::Mask(crc); EncodeFixed32(&dest_.contents_[header_offset], crc); } void ForceError() { source_.force_error_ = true; } size_t DroppedBytes() const { return report_.dropped_bytes_; } std::string ReportMessage() const { return report_.message_; } // Returns OK iff recorded error message contains "msg" std::string MatchError(const std::string& msg) const { if (report_.message_.find(msg) == std::string::npos) { return report_.message_; } else { return "OK"; } } void WriteInitialOffsetLog() { for (int i = 0; i < num_initial_offset_records_; i++) { std::string record(initial_offset_record_sizes_[i], static_cast<char>('a' + i)); Write(record); } } void StartReadingAt(uint64_t initial_offset) { delete reader_; reader_ = new Reader(&source_, &report_, true /*checksum*/, initial_offset); } void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) { WriteInitialOffsetLog(); reading_ = true; source_.contents_ = Slice(dest_.contents_); Reader* offset_reader = new Reader(&source_, &report_, true /*checksum*/, WrittenBytes() + offset_past_end); Slice record; std::string scratch; ASSERT_TRUE(!offset_reader->ReadRecord(&record, &scratch)); delete offset_reader; } void CheckInitialOffsetRecord(uint64_t initial_offset, int expected_record_offset) { WriteInitialOffsetLog(); reading_ = true; source_.contents_ = Slice(dest_.contents_); Reader* offset_reader = new Reader(&source_, &report_, true /*checksum*/, initial_offset); // Read all records from expected_record_offset through the last one. ASSERT_LT(expected_record_offset, num_initial_offset_records_); for (; expected_record_offset < num_initial_offset_records_; ++expected_record_offset) { Slice record; std::string scratch; ASSERT_TRUE(offset_reader->ReadRecord(&record, &scratch)); ASSERT_EQ(initial_offset_record_sizes_[expected_record_offset], record.size()); ASSERT_EQ(initial_offset_last_record_offsets_[expected_record_offset], offset_reader->LastRecordOffset()); ASSERT_EQ((char)('a' + expected_record_offset), record.data()[0]); } delete offset_reader; } private: class StringDest : public WritableFile { public: Status Close() override { return Status::OK(); } Status Flush() override { return Status::OK(); } Status Sync() override { return Status::OK(); } Status Append(const Slice& slice) override { contents_.append(slice.data(), slice.size()); return Status::OK(); } std::string GetName() const override { return ""; } std::string contents_; }; class StringSource : public SequentialFile { public: StringSource() : force_error_(false), returned_partial_(false) {} Status Read(size_t n, Slice* result, char* scratch) override { ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error"; if (force_error_) { force_error_ = false; returned_partial_ = true; return Status::Corruption("read error"); } if (contents_.size() < n) { n = contents_.size(); returned_partial_ = true; } *result = Slice(contents_.data(), n); contents_.remove_prefix(n); return Status::OK(); } Status Skip(uint64_t n) override { if (n > contents_.size()) { contents_.clear(); return Status::NotFound("in-memory file skipped past end"); } contents_.remove_prefix(n); return Status::OK(); } std::string GetName() const { return ""; } Slice contents_; bool force_error_; bool returned_partial_; }; class ReportCollector : public Reader::Reporter { public: ReportCollector() : dropped_bytes_(0) {} void Corruption(size_t bytes, const Status& status) override { dropped_bytes_ += bytes; message_.append(status.ToString()); } size_t dropped_bytes_; std::string message_; }; // Record metadata for testing initial offset functionality static size_t initial_offset_record_sizes_[]; static uint64_t initial_offset_last_record_offsets_[]; static int num_initial_offset_records_; StringDest dest_; StringSource source_; ReportCollector report_; bool reading_; Writer* writer_; Reader* reader_; }; size_t LogTest::initial_offset_record_sizes_[] = { 10000, // Two sizable records in first block 10000, 2 * log::kBlockSize - 1000, // Span three blocks 1, 13716, // Consume all but two bytes of block 3. log::kBlockSize - kHeaderSize, // Consume the entirety of block 4. }; uint64_t LogTest::initial_offset_last_record_offsets_[] = { 0, kHeaderSize + 10000, 2 * (kHeaderSize + 10000), 2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize, 2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize + kHeaderSize + 1, 3 * log::kBlockSize, }; // LogTest::initial_offset_last_record_offsets_ must be defined before this. int LogTest::num_initial_offset_records_ = sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t); TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); } TEST(LogTest, ReadWrite) { Write("foo"); Write("bar"); Write(""); Write("xxxx"); ASSERT_EQ("foo", Read()); ASSERT_EQ("bar", Read()); ASSERT_EQ("", Read()); ASSERT_EQ("xxxx", Read()); ASSERT_EQ("EOF", Read()); ASSERT_EQ("EOF", Read()); // Make sure reads at eof work } TEST(LogTest, ManyBlocks) { for (int i = 0; i < 100000; i++) { Write(NumberString(i)); } for (int i = 0; i < 100000; i++) { ASSERT_EQ(NumberString(i), Read()); } ASSERT_EQ("EOF", Read()); } TEST(LogTest, Fragmentation) { Write("small"); Write(BigString("medium", 50000)); Write(BigString("large", 100000)); ASSERT_EQ("small", Read()); ASSERT_EQ(BigString("medium", 50000), Read()); ASSERT_EQ(BigString("large", 100000), Read()); ASSERT_EQ("EOF", Read()); } TEST(LogTest, MarginalTrailer) { // Make a trailer that is exactly the same length as an empty record. const int n = kBlockSize - 2 * kHeaderSize; Write(BigString("foo", n)); ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes()); Write(""); Write("bar"); ASSERT_EQ(BigString("foo", n), Read()); ASSERT_EQ("", Read()); ASSERT_EQ("bar", Read()); ASSERT_EQ("EOF", Read()); } TEST(LogTest, MarginalTrailer2) { // Make a trailer that is exactly the same length as an empty record. const int n = kBlockSize - 2 * kHeaderSize; Write(BigString("foo", n)); ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes()); Write("bar"); ASSERT_EQ(BigString("foo", n), Read()); ASSERT_EQ("bar", Read()); ASSERT_EQ("EOF", Read()); ASSERT_EQ(0, DroppedBytes()); ASSERT_EQ("", ReportMessage()); } TEST(LogTest, ShortTrailer) { const int n = kBlockSize - 2 * kHeaderSize + 4; Write(BigString("foo", n)); ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes()); Write(""); Write("bar"); ASSERT_EQ(BigString("foo", n), Read()); ASSERT_EQ("", Read()); ASSERT_EQ("bar", Read()); ASSERT_EQ("EOF", Read()); } TEST(LogTest, AlignedEof) { const int n = kBlockSize - 2 * kHeaderSize + 4; Write(BigString("foo", n)); ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes()); ASSERT_EQ(BigString("foo", n), Read()); ASSERT_EQ("EOF", Read()); } TEST(LogTest, OpenForAppend) { Write("hello"); ReopenForAppend(); Write("world"); ASSERT_EQ("hello", Read()); ASSERT_EQ("world", Read()); ASSERT_EQ("EOF", Read()); } TEST(LogTest, RandomRead) { const int N = 500; Random write_rnd(301); for (int i = 0; i < N; i++) { Write(RandomSkewedString(i, &write_rnd)); } Random read_rnd(301); for (int i = 0; i < N; i++) { ASSERT_EQ(RandomSkewedString(i, &read_rnd), Read()); } ASSERT_EQ("EOF", Read()); } // Tests of all the error paths in log_reader.cc follow: TEST(LogTest, ReadError) { Write("foo"); ForceError(); ASSERT_EQ("EOF", Read()); ASSERT_EQ(kBlockSize, DroppedBytes()); ASSERT_EQ("OK", MatchError("read error")); } TEST(LogTest, BadRecordType) { Write("foo"); // Type is stored in header[6] IncrementByte(6, 100); FixChecksum(0, 3); ASSERT_EQ("EOF", Read()); ASSERT_EQ(3, DroppedBytes()); ASSERT_EQ("OK", MatchError("unknown record type")); } TEST(LogTest, TruncatedTrailingRecordIsIgnored) { Write("foo"); ShrinkSize(4); // Drop all payload as well as a header byte ASSERT_EQ("EOF", Read()); // Truncated last record is ignored, not treated as an error. ASSERT_EQ(0, DroppedBytes()); ASSERT_EQ("", ReportMessage()); } TEST(LogTest, BadLength) { const int kPayloadSize = kBlockSize - kHeaderSize; Write(BigString("bar", kPayloadSize)); Write("foo"); // Least significant size byte is stored in header[4]. IncrementByte(4, 1); ASSERT_EQ("foo", Read()); ASSERT_EQ(kBlockSize, DroppedBytes()); ASSERT_EQ("OK", MatchError("bad record length")); } TEST(LogTest, BadLengthAtEndIsIgnored) { Write("foo"); ShrinkSize(1); ASSERT_EQ("EOF", Read()); ASSERT_EQ(0, DroppedBytes()); ASSERT_EQ("", ReportMessage()); } TEST(LogTest, ChecksumMismatch) { Write("foo"); IncrementByte(0, 10); ASSERT_EQ("EOF", Read()); ASSERT_EQ(10, DroppedBytes()); ASSERT_EQ("OK", MatchError("checksum mismatch")); } TEST(LogTest, UnexpectedMiddleType) { Write("foo"); SetByte(6, kMiddleType); FixChecksum(0, 3); ASSERT_EQ("EOF", Read()); ASSERT_EQ(3, DroppedBytes()); ASSERT_EQ("OK", MatchError("missing start")); } TEST(LogTest, UnexpectedLastType) { Write("foo"); SetByte(6, kLastType); FixChecksum(0, 3); ASSERT_EQ("EOF", Read()); ASSERT_EQ(3, DroppedBytes()); ASSERT_EQ("OK", MatchError("missing start")); } TEST(LogTest, UnexpectedFullType) { Write("foo"); Write("bar"); SetByte(6, kFirstType); FixChecksum(0, 3); ASSERT_EQ("bar", Read()); ASSERT_EQ("EOF", Read()); ASSERT_EQ(3, DroppedBytes()); ASSERT_EQ("OK", MatchError("partial record without end")); } TEST(LogTest, UnexpectedFirstType) { Write("foo"); Write(BigString("bar", 100000)); SetByte(6, kFirstType); FixChecksum(0, 3); ASSERT_EQ(BigString("bar", 100000), Read()); ASSERT_EQ("EOF", Read()); ASSERT_EQ(3, DroppedBytes()); ASSERT_EQ("OK", MatchError("partial record without end")); } TEST(LogTest, MissingLastIsIgnored) { Write(BigString("bar", kBlockSize)); // Remove the LAST block, including header. ShrinkSize(14); ASSERT_EQ("EOF", Read()); ASSERT_EQ("", ReportMessage()); ASSERT_EQ(0, DroppedBytes()); } TEST(LogTest, PartialLastIsIgnored) { Write(BigString("bar", kBlockSize)); // Cause a bad record length in the LAST block. ShrinkSize(1); ASSERT_EQ("EOF", Read()); ASSERT_EQ("", ReportMessage()); ASSERT_EQ(0, DroppedBytes()); } TEST(LogTest, SkipIntoMultiRecord) { // Consider a fragmented record: // first(R1), middle(R1), last(R1), first(R2) // If initial_offset points to a record after first(R1) but before first(R2) // incomplete fragment errors are not actual errors, and must be suppressed // until a new first or full record is encountered. Write(BigString("foo", 3 * kBlockSize)); Write("correct"); StartReadingAt(kBlockSize); ASSERT_EQ("correct", Read()); ASSERT_EQ("", ReportMessage()); ASSERT_EQ(0, DroppedBytes()); ASSERT_EQ("EOF", Read()); } TEST(LogTest, ErrorJoinsRecords) { // Consider two fragmented records: // first(R1) last(R1) first(R2) last(R2) // where the middle two fragments disappear. We do not want // first(R1),last(R2) to get joined and returned as a valid record. // Write records that span two blocks Write(BigString("foo", kBlockSize)); Write(BigString("bar", kBlockSize)); Write("correct"); // Wipe the middle block for (int offset = kBlockSize; offset < 2 * kBlockSize; offset++) { SetByte(offset, 'x'); } ASSERT_EQ("correct", Read()); ASSERT_EQ("EOF", Read()); const size_t dropped = DroppedBytes(); ASSERT_LE(dropped, 2 * kBlockSize + 100); ASSERT_GE(dropped, 2 * kBlockSize); } TEST(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); } TEST(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); } TEST(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); } TEST(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); } TEST(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); } TEST(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); } TEST(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); } TEST(LogTest, ReadFourthFirstBlockTrailer) { CheckInitialOffsetRecord(log::kBlockSize - 4, 3); } TEST(LogTest, ReadFourthMiddleBlock) { CheckInitialOffsetRecord(log::kBlockSize + 1, 3); } TEST(LogTest, ReadFourthLastBlock) { CheckInitialOffsetRecord(2 * log::kBlockSize + 1, 3); } TEST(LogTest, ReadFourthStart) { CheckInitialOffsetRecord( 2 * (kHeaderSize + 1000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize, 3); } TEST(LogTest, ReadInitialOffsetIntoBlockPadding) { CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5); } TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); } TEST(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); } } // namespace log } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/version_set_test.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/version_set.h" #include "util/logging.h" #include "util/testharness.h" #include "util/testutil.h" namespace leveldb { class FindFileTest { public: FindFileTest() : disjoint_sorted_files_(true) {} ~FindFileTest() { for (int i = 0; i < files_.size(); i++) { delete files_[i]; } } void Add(const char* smallest, const char* largest, SequenceNumber smallest_seq = 100, SequenceNumber largest_seq = 100) { FileMetaData* f = new FileMetaData; f->number = files_.size() + 1; f->smallest = InternalKey(smallest, smallest_seq, kTypeValue); f->largest = InternalKey(largest, largest_seq, kTypeValue); files_.push_back(f); } int Find(const char* key) { InternalKey target(key, 100, kTypeValue); InternalKeyComparator cmp(BytewiseComparator()); return FindFile(cmp, files_, target.Encode()); } bool Overlaps(const char* smallest, const char* largest) { InternalKeyComparator cmp(BytewiseComparator()); Slice s(smallest != nullptr ? smallest : ""); Slice l(largest != nullptr ? largest : ""); return SomeFileOverlapsRange(cmp, disjoint_sorted_files_, files_, (smallest != nullptr ? &s : nullptr), (largest != nullptr ? &l : nullptr)); } bool disjoint_sorted_files_; private: std::vector<FileMetaData*> files_; }; TEST(FindFileTest, Empty) { ASSERT_EQ(0, Find("foo")); ASSERT_TRUE(!Overlaps("a", "z")); ASSERT_TRUE(!Overlaps(nullptr, "z")); ASSERT_TRUE(!Overlaps("a", nullptr)); ASSERT_TRUE(!Overlaps(nullptr, nullptr)); } TEST(FindFileTest, Single) { Add("p", "q"); ASSERT_EQ(0, Find("a")); ASSERT_EQ(0, Find("p")); ASSERT_EQ(0, Find("p1")); ASSERT_EQ(0, Find("q")); ASSERT_EQ(1, Find("q1")); ASSERT_EQ(1, Find("z")); ASSERT_TRUE(!Overlaps("a", "b")); ASSERT_TRUE(!Overlaps("z1", "z2")); ASSERT_TRUE(Overlaps("a", "p")); ASSERT_TRUE(Overlaps("a", "q")); ASSERT_TRUE(Overlaps("a", "z")); ASSERT_TRUE(Overlaps("p", "p1")); ASSERT_TRUE(Overlaps("p", "q")); ASSERT_TRUE(Overlaps("p", "z")); ASSERT_TRUE(Overlaps("p1", "p2")); ASSERT_TRUE(Overlaps("p1", "z")); ASSERT_TRUE(Overlaps("q", "q")); ASSERT_TRUE(Overlaps("q", "q1")); ASSERT_TRUE(!Overlaps(nullptr, "j")); ASSERT_TRUE(!Overlaps("r", nullptr)); ASSERT_TRUE(Overlaps(nullptr, "p")); ASSERT_TRUE(Overlaps(nullptr, "p1")); ASSERT_TRUE(Overlaps("q", nullptr)); ASSERT_TRUE(Overlaps(nullptr, nullptr)); } TEST(FindFileTest, Multiple) { Add("150", "200"); Add("200", "250"); Add("300", "350"); Add("400", "450"); ASSERT_EQ(0, Find("100")); ASSERT_EQ(0, Find("150")); ASSERT_EQ(0, Find("151")); ASSERT_EQ(0, Find("199")); ASSERT_EQ(0, Find("200")); ASSERT_EQ(1, Find("201")); ASSERT_EQ(1, Find("249")); ASSERT_EQ(1, Find("250")); ASSERT_EQ(2, Find("251")); ASSERT_EQ(2, Find("299")); ASSERT_EQ(2, Find("300")); ASSERT_EQ(2, Find("349")); ASSERT_EQ(2, Find("350")); ASSERT_EQ(3, Find("351")); ASSERT_EQ(3, Find("400")); ASSERT_EQ(3, Find("450")); ASSERT_EQ(4, Find("451")); ASSERT_TRUE(!Overlaps("100", "149")); ASSERT_TRUE(!Overlaps("251", "299")); ASSERT_TRUE(!Overlaps("451", "500")); ASSERT_TRUE(!Overlaps("351", "399")); ASSERT_TRUE(Overlaps("100", "150")); ASSERT_TRUE(Overlaps("100", "200")); ASSERT_TRUE(Overlaps("100", "300")); ASSERT_TRUE(Overlaps("100", "400")); ASSERT_TRUE(Overlaps("100", "500")); ASSERT_TRUE(Overlaps("375", "400")); ASSERT_TRUE(Overlaps("450", "450")); ASSERT_TRUE(Overlaps("450", "500")); } TEST(FindFileTest, MultipleNullBoundaries) { Add("150", "200"); Add("200", "250"); Add("300", "350"); Add("400", "450"); ASSERT_TRUE(!Overlaps(nullptr, "149")); ASSERT_TRUE(!Overlaps("451", nullptr)); ASSERT_TRUE(Overlaps(nullptr, nullptr)); ASSERT_TRUE(Overlaps(nullptr, "150")); ASSERT_TRUE(Overlaps(nullptr, "199")); ASSERT_TRUE(Overlaps(nullptr, "200")); ASSERT_TRUE(Overlaps(nullptr, "201")); ASSERT_TRUE(Overlaps(nullptr, "400")); ASSERT_TRUE(Overlaps(nullptr, "800")); ASSERT_TRUE(Overlaps("100", nullptr)); ASSERT_TRUE(Overlaps("200", nullptr)); ASSERT_TRUE(Overlaps("449", nullptr)); ASSERT_TRUE(Overlaps("450", nullptr)); } TEST(FindFileTest, OverlapSequenceChecks) { Add("200", "200", 5000, 3000); ASSERT_TRUE(!Overlaps("199", "199")); ASSERT_TRUE(!Overlaps("201", "300")); ASSERT_TRUE(Overlaps("200", "200")); ASSERT_TRUE(Overlaps("190", "200")); ASSERT_TRUE(Overlaps("200", "210")); } TEST(FindFileTest, OverlappingFiles) { Add("150", "600"); Add("400", "500"); disjoint_sorted_files_ = false; ASSERT_TRUE(!Overlaps("100", "149")); ASSERT_TRUE(!Overlaps("601", "700")); ASSERT_TRUE(Overlaps("100", "150")); ASSERT_TRUE(Overlaps("100", "200")); ASSERT_TRUE(Overlaps("100", "300")); ASSERT_TRUE(Overlaps("100", "400")); ASSERT_TRUE(Overlaps("100", "500")); ASSERT_TRUE(Overlaps("375", "400")); ASSERT_TRUE(Overlaps("450", "450")); ASSERT_TRUE(Overlaps("450", "500")); ASSERT_TRUE(Overlaps("450", "700")); ASSERT_TRUE(Overlaps("600", "700")); } void AddBoundaryInputs(const InternalKeyComparator& icmp, const std::vector<FileMetaData*>& level_files, std::vector<FileMetaData*>* compaction_files); class AddBoundaryInputsTest { public: std::vector<FileMetaData*> level_files_; std::vector<FileMetaData*> compaction_files_; std::vector<FileMetaData*> all_files_; InternalKeyComparator icmp_; AddBoundaryInputsTest() : icmp_(BytewiseComparator()) {} ~AddBoundaryInputsTest() { for (size_t i = 0; i < all_files_.size(); ++i) { delete all_files_[i]; } all_files_.clear(); } FileMetaData* CreateFileMetaData(uint64_t number, InternalKey smallest, InternalKey largest) { FileMetaData* f = new FileMetaData(); f->number = number; f->smallest = smallest; f->largest = largest; all_files_.push_back(f); return f; } }; TEST(AddBoundaryInputsTest, TestEmptyFileSets) { AddBoundaryInputs(icmp_, level_files_, &compaction_files_); ASSERT_TRUE(compaction_files_.empty()); ASSERT_TRUE(level_files_.empty()); } TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) { FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("100", 1, kTypeValue))); compaction_files_.push_back(f1); AddBoundaryInputs(icmp_, level_files_, &compaction_files_); ASSERT_EQ(1, compaction_files_.size()); ASSERT_EQ(f1, compaction_files_[0]); ASSERT_TRUE(level_files_.empty()); } TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) { FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("100", 1, kTypeValue))); level_files_.push_back(f1); AddBoundaryInputs(icmp_, level_files_, &compaction_files_); ASSERT_TRUE(compaction_files_.empty()); ASSERT_EQ(1, level_files_.size()); ASSERT_EQ(f1, level_files_[0]); } TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) { FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("100", 1, kTypeValue))); FileMetaData* f2 = CreateFileMetaData(1, InternalKey("200", 2, kTypeValue), InternalKey(InternalKey("200", 1, kTypeValue))); FileMetaData* f3 = CreateFileMetaData(1, InternalKey("300", 2, kTypeValue), InternalKey(InternalKey("300", 1, kTypeValue))); level_files_.push_back(f3); level_files_.push_back(f2); level_files_.push_back(f1); compaction_files_.push_back(f2); compaction_files_.push_back(f3); AddBoundaryInputs(icmp_, level_files_, &compaction_files_); ASSERT_EQ(2, compaction_files_.size()); } TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) { FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 3, kTypeValue), InternalKey(InternalKey("100", 2, kTypeValue))); FileMetaData* f2 = CreateFileMetaData(1, InternalKey("100", 1, kTypeValue), InternalKey(InternalKey("200", 3, kTypeValue))); FileMetaData* f3 = CreateFileMetaData(1, InternalKey("300", 2, kTypeValue), InternalKey(InternalKey("300", 1, kTypeValue))); level_files_.push_back(f3); level_files_.push_back(f2); level_files_.push_back(f1); compaction_files_.push_back(f1); AddBoundaryInputs(icmp_, level_files_, &compaction_files_); ASSERT_EQ(2, compaction_files_.size()); ASSERT_EQ(f1, compaction_files_[0]); ASSERT_EQ(f2, compaction_files_[1]); } TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) { FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), InternalKey(InternalKey("100", 5, kTypeValue))); FileMetaData* f2 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("300", 1, kTypeValue))); FileMetaData* f3 = CreateFileMetaData(1, InternalKey("100", 4, kTypeValue), InternalKey(InternalKey("100", 3, kTypeValue))); level_files_.push_back(f2); level_files_.push_back(f3); level_files_.push_back(f1); compaction_files_.push_back(f1); AddBoundaryInputs(icmp_, level_files_, &compaction_files_); ASSERT_EQ(3, compaction_files_.size()); ASSERT_EQ(f1, compaction_files_[0]); ASSERT_EQ(f3, compaction_files_[1]); ASSERT_EQ(f2, compaction_files_[2]); } TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) { FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), InternalKey(InternalKey("100", 5, kTypeValue))); FileMetaData* f2 = CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), InternalKey(InternalKey("100", 5, kTypeValue))); FileMetaData* f3 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("300", 1, kTypeValue))); FileMetaData* f4 = CreateFileMetaData(1, InternalKey("100", 4, kTypeValue), InternalKey(InternalKey("100", 3, kTypeValue))); level_files_.push_back(f2); level_files_.push_back(f3); level_files_.push_back(f4); compaction_files_.push_back(f1); AddBoundaryInputs(icmp_, level_files_, &compaction_files_); ASSERT_EQ(3, compaction_files_.size()); ASSERT_EQ(f1, compaction_files_[0]); ASSERT_EQ(f4, compaction_files_[1]); ASSERT_EQ(f3, compaction_files_[2]); } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/db_test.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "leveldb/db.h" #include <atomic> #include <string> #include "db/db_impl.h" #include "db/filename.h" #include "db/version_set.h" #include "db/write_batch_internal.h" #include "leveldb/cache.h" #include "leveldb/env.h" #include "leveldb/filter_policy.h" #include "leveldb/table.h" #include "port/port.h" #include "port/thread_annotations.h" #include "util/hash.h" #include "util/logging.h" #include "util/mutexlock.h" #include "util/testharness.h" #include "util/testutil.h" namespace leveldb { static std::string RandomString(Random* rnd, int len) { std::string r; test::RandomString(rnd, len, &r); return r; } static std::string RandomKey(Random* rnd) { int len = (rnd->OneIn(3) ? 1 // Short sometimes to encourage collisions : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10))); return test::RandomKey(rnd, len); } namespace { class AtomicCounter { public: AtomicCounter() : count_(0) {} void Increment() { IncrementBy(1); } void IncrementBy(int count) LOCKS_EXCLUDED(mu_) { MutexLock l(&mu_); count_ += count; } int Read() LOCKS_EXCLUDED(mu_) { MutexLock l(&mu_); return count_; } void Reset() LOCKS_EXCLUDED(mu_) { MutexLock l(&mu_); count_ = 0; } private: port::Mutex mu_; int count_ GUARDED_BY(mu_); }; void DelayMilliseconds(int millis) { Env::Default()->SleepForMicroseconds(millis * 1000); } } // namespace // Test Env to override default Env behavior for testing. class TestEnv : public EnvWrapper { public: explicit TestEnv(Env* base) : EnvWrapper(base), ignore_dot_files_(false) {} void SetIgnoreDotFiles(bool ignored) { ignore_dot_files_ = ignored; } Status GetChildren(const std::string& dir, std::vector<std::string>* result) override { Status s = target()->GetChildren(dir, result); if (!s.ok() || !ignore_dot_files_) { return s; } std::vector<std::string>::iterator it = result->begin(); while (it != result->end()) { if ((*it == ".") || (*it == "..")) { it = result->erase(it); } else { ++it; } } return s; } private: bool ignore_dot_files_; }; // Special Env used to delay background operations. class SpecialEnv : public EnvWrapper { public: // sstable/log Sync() calls are blocked while this pointer is non-null. std::atomic<bool> delay_data_sync_; // sstable/log Sync() calls return an error. std::atomic<bool> data_sync_error_; // Simulate no-space errors while this pointer is non-null. std::atomic<bool> no_space_; // Simulate non-writable file system while this pointer is non-null. std::atomic<bool> non_writable_; // Force sync of manifest files to fail while this pointer is non-null. std::atomic<bool> manifest_sync_error_; // Force write to manifest files to fail while this pointer is non-null. std::atomic<bool> manifest_write_error_; bool count_random_reads_; AtomicCounter random_read_counter_; explicit SpecialEnv(Env* base) : EnvWrapper(base), delay_data_sync_(false), data_sync_error_(false), no_space_(false), non_writable_(false), manifest_sync_error_(false), manifest_write_error_(false), count_random_reads_(false) {} Status NewWritableFile(const std::string& f, WritableFile** r) { class DataFile : public WritableFile { private: SpecialEnv* const env_; WritableFile* const base_; public: DataFile(SpecialEnv* env, WritableFile* base) : env_(env), base_(base) {} ~DataFile() { delete base_; } Status Append(const Slice& data) { if (env_->no_space_.load(std::memory_order_acquire)) { // Drop writes on the floor return Status::OK(); } else { return base_->Append(data); } } Status Close() { return base_->Close(); } Status Flush() { return base_->Flush(); } Status Sync() { if (env_->data_sync_error_.load(std::memory_order_acquire)) { return Status::IOError("simulated data sync error"); } while (env_->delay_data_sync_.load(std::memory_order_acquire)) { DelayMilliseconds(100); } return base_->Sync(); } std::string GetName() const override { return ""; } }; class ManifestFile : public WritableFile { private: SpecialEnv* env_; WritableFile* base_; public: ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) {} ~ManifestFile() { delete base_; } Status Append(const Slice& data) { if (env_->manifest_write_error_.load(std::memory_order_acquire)) { return Status::IOError("simulated writer error"); } else { return base_->Append(data); } } Status Close() { return base_->Close(); } Status Flush() { return base_->Flush(); } Status Sync() { if (env_->manifest_sync_error_.load(std::memory_order_acquire)) { return Status::IOError("simulated sync error"); } else { return base_->Sync(); } } std::string GetName() const override { return ""; } }; if (non_writable_.load(std::memory_order_acquire)) { return Status::IOError("simulated write error"); } Status s = target()->NewWritableFile(f, r); if (s.ok()) { if (strstr(f.c_str(), ".ldb") != nullptr || strstr(f.c_str(), ".log") != nullptr) { *r = new DataFile(this, *r); } else if (strstr(f.c_str(), "MANIFEST") != nullptr) { *r = new ManifestFile(this, *r); } } return s; } Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) { class CountingFile : public RandomAccessFile { private: RandomAccessFile* target_; AtomicCounter* counter_; public: CountingFile(RandomAccessFile* target, AtomicCounter* counter) : target_(target), counter_(counter) {} ~CountingFile() override { delete target_; } Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const override { counter_->Increment(); return target_->Read(offset, n, result, scratch); } std::string GetName() const override { return ""; } }; Status s = target()->NewRandomAccessFile(f, r); if (s.ok() && count_random_reads_) { *r = new CountingFile(*r, &random_read_counter_); } return s; } }; class DBTest { public: std::string dbname_; SpecialEnv* env_; DB* db_; Options last_options_; DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) { filter_policy_ = NewBloomFilterPolicy(10); dbname_ = test::TmpDir() + "/db_test"; DestroyDB(dbname_, Options()); db_ = nullptr; Reopen(); } ~DBTest() { delete db_; DestroyDB(dbname_, Options()); delete env_; delete filter_policy_; } // Switch to a fresh database with the next option configuration to // test. Return false if there are no more configurations to test. bool ChangeOptions() { option_config_++; if (option_config_ >= kEnd) { return false; } else { DestroyAndReopen(); return true; } } // Return the current option configuration. Options CurrentOptions() { Options options; options.reuse_logs = false; switch (option_config_) { case kReuse: options.reuse_logs = true; break; case kFilter: options.filter_policy = filter_policy_; break; case kUncompressed: options.compression = kNoCompression; break; default: break; } return options; } DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); } void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); } void Close() { delete db_; db_ = nullptr; } void DestroyAndReopen(Options* options = nullptr) { delete db_; db_ = nullptr; DestroyDB(dbname_, Options()); ASSERT_OK(TryReopen(options)); } Status TryReopen(Options* options) { delete db_; db_ = nullptr; Options opts; if (options != nullptr) { opts = *options; } else { opts = CurrentOptions(); opts.create_if_missing = true; } last_options_ = opts; return DB::Open(opts, dbname_, &db_); } Status Put(const std::string& k, const std::string& v) { return db_->Put(WriteOptions(), k, v); } Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); } std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) { ReadOptions options; options.snapshot = snapshot; std::string result; Status s = db_->Get(options, k, &result); if (s.IsNotFound()) { result = "NOT_FOUND"; } else if (!s.ok()) { result = s.ToString(); } return result; } // Return a string that contains all key,value pairs in order, // formatted like "(k1->v1)(k2->v2)". std::string Contents() { std::vector<std::string> forward; std::string result; Iterator* iter = db_->NewIterator(ReadOptions()); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { std::string s = IterStatus(iter); result.push_back('('); result.append(s); result.push_back(')'); forward.push_back(s); } // Check reverse iteration results are the reverse of forward results size_t matched = 0; for (iter->SeekToLast(); iter->Valid(); iter->Prev()) { ASSERT_LT(matched, forward.size()); ASSERT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]); matched++; } ASSERT_EQ(matched, forward.size()); delete iter; return result; } std::string AllEntriesFor(const Slice& user_key) { Iterator* iter = dbfull()->TEST_NewInternalIterator(); InternalKey target(user_key, kMaxSequenceNumber, kTypeValue); iter->Seek(target.Encode()); std::string result; if (!iter->status().ok()) { result = iter->status().ToString(); } else { result = "[ "; bool first = true; while (iter->Valid()) { ParsedInternalKey ikey; if (!ParseInternalKey(iter->key(), &ikey)) { result += "CORRUPTED"; } else { if (last_options_.comparator->Compare(ikey.user_key, user_key) != 0) { break; } if (!first) { result += ", "; } first = false; switch (ikey.type) { case kTypeValue: result += iter->value().ToString(); break; case kTypeDeletion: result += "DEL"; break; } } iter->Next(); } if (!first) { result += " "; } result += "]"; } delete iter; return result; } int NumTableFilesAtLevel(int level) { std::string property; ASSERT_TRUE(db_->GetProperty( "leveldb.num-files-at-level" + NumberToString(level), &property)); return std::stoi(property); } int TotalTableFiles() { int result = 0; for (int level = 0; level < config::kNumLevels; level++) { result += NumTableFilesAtLevel(level); } return result; } // Return spread of files per level std::string FilesPerLevel() { std::string result; int last_non_zero_offset = 0; for (int level = 0; level < config::kNumLevels; level++) { int f = NumTableFilesAtLevel(level); char buf[100]; snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f); result += buf; if (f > 0) { last_non_zero_offset = result.size(); } } result.resize(last_non_zero_offset); return result; } int CountFiles() { std::vector<std::string> files; env_->GetChildren(dbname_, &files); return static_cast<int>(files.size()); } uint64_t Size(const Slice& start, const Slice& limit) { Range r(start, limit); uint64_t size; db_->GetApproximateSizes(&r, 1, &size); return size; } void Compact(const Slice& start, const Slice& limit) { db_->CompactRange(&start, &limit); } // Do n memtable compactions, each of which produces an sstable // covering the range [small_key,large_key]. void MakeTables(int n, const std::string& small_key, const std::string& large_key) { for (int i = 0; i < n; i++) { Put(small_key, "begin"); Put(large_key, "end"); dbfull()->TEST_CompactMemTable(); } } // Prevent pushing of new sstables into deeper levels by adding // tables that cover a specified range to all levels. void FillLevels(const std::string& smallest, const std::string& largest) { MakeTables(config::kNumLevels, smallest, largest); } void DumpFileCounts(const char* label) { fprintf(stderr, "---\n%s:\n", label); fprintf( stderr, "maxoverlap: %lld\n", static_cast<long long>(dbfull()->TEST_MaxNextLevelOverlappingBytes())); for (int level = 0; level < config::kNumLevels; level++) { int num = NumTableFilesAtLevel(level); if (num > 0) { fprintf(stderr, " level %3d : %d files\n", level, num); } } } std::string DumpSSTableList() { std::string property; db_->GetProperty("leveldb.sstables", &property); return property; } std::string IterStatus(Iterator* iter) { std::string result; if (iter->Valid()) { result = iter->key().ToString() + "->" + iter->value().ToString(); } else { result = "(invalid)"; } return result; } bool DeleteAnSSTFile() { std::vector<std::string> filenames; ASSERT_OK(env_->GetChildren(dbname_, &filenames)); uint64_t number; FileType type; for (size_t i = 0; i < filenames.size(); i++) { if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) { ASSERT_OK(env_->DeleteFile(TableFileName(dbname_, number))); return true; } } return false; } // Returns number of files renamed. int RenameLDBToSST() { std::vector<std::string> filenames; ASSERT_OK(env_->GetChildren(dbname_, &filenames)); uint64_t number; FileType type; int files_renamed = 0; for (size_t i = 0; i < filenames.size(); i++) { if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) { const std::string from = TableFileName(dbname_, number); const std::string to = SSTTableFileName(dbname_, number); ASSERT_OK(env_->RenameFile(from, to)); files_renamed++; } } return files_renamed; } private: // Sequence of option configurations to try enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd }; const FilterPolicy* filter_policy_; int option_config_; }; TEST(DBTest, Empty) { do { ASSERT_TRUE(db_ != nullptr); ASSERT_EQ("NOT_FOUND", Get("foo")); } while (ChangeOptions()); } TEST(DBTest, EmptyKey) { do { ASSERT_OK(Put("", "v1")); ASSERT_EQ("v1", Get("")); ASSERT_OK(Put("", "v2")); ASSERT_EQ("v2", Get("")); } while (ChangeOptions()); } TEST(DBTest, EmptyValue) { do { ASSERT_OK(Put("key", "v1")); ASSERT_EQ("v1", Get("key")); ASSERT_OK(Put("key", "")); ASSERT_EQ("", Get("key")); ASSERT_OK(Put("key", "v2")); ASSERT_EQ("v2", Get("key")); } while (ChangeOptions()); } TEST(DBTest, ReadWrite) { do { ASSERT_OK(Put("foo", "v1")); ASSERT_EQ("v1", Get("foo")); ASSERT_OK(Put("bar", "v2")); ASSERT_OK(Put("foo", "v3")); ASSERT_EQ("v3", Get("foo")); ASSERT_EQ("v2", Get("bar")); } while (ChangeOptions()); } TEST(DBTest, PutDeleteGet) { do { ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1")); ASSERT_EQ("v1", Get("foo")); ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2")); ASSERT_EQ("v2", Get("foo")); ASSERT_OK(db_->Delete(WriteOptions(), "foo")); ASSERT_EQ("NOT_FOUND", Get("foo")); } while (ChangeOptions()); } TEST(DBTest, GetFromImmutableLayer) { do { Options options = CurrentOptions(); options.env = env_; options.write_buffer_size = 100000; // Small write buffer Reopen(&options); ASSERT_OK(Put("foo", "v1")); ASSERT_EQ("v1", Get("foo")); // Block sync calls. env_->delay_data_sync_.store(true, std::memory_order_release); Put("k1", std::string(100000, 'x')); // Fill memtable. Put("k2", std::string(100000, 'y')); // Trigger compaction. ASSERT_EQ("v1", Get("foo")); // Release sync calls. env_->delay_data_sync_.store(false, std::memory_order_release); } while (ChangeOptions()); } TEST(DBTest, GetFromVersions) { do { ASSERT_OK(Put("foo", "v1")); dbfull()->TEST_CompactMemTable(); ASSERT_EQ("v1", Get("foo")); } while (ChangeOptions()); } TEST(DBTest, GetMemUsage) { do { ASSERT_OK(Put("foo", "v1")); std::string val; ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val)); int mem_usage = std::stoi(val); ASSERT_GT(mem_usage, 0); ASSERT_LT(mem_usage, 5 * 1024 * 1024); } while (ChangeOptions()); } TEST(DBTest, GetSnapshot) { do { // Try with both a short key and a long key for (int i = 0; i < 2; i++) { std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x'); ASSERT_OK(Put(key, "v1")); const Snapshot* s1 = db_->GetSnapshot(); ASSERT_OK(Put(key, "v2")); ASSERT_EQ("v2", Get(key)); ASSERT_EQ("v1", Get(key, s1)); dbfull()->TEST_CompactMemTable(); ASSERT_EQ("v2", Get(key)); ASSERT_EQ("v1", Get(key, s1)); db_->ReleaseSnapshot(s1); } } while (ChangeOptions()); } TEST(DBTest, GetIdenticalSnapshots) { do { // Try with both a short key and a long key for (int i = 0; i < 2; i++) { std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x'); ASSERT_OK(Put(key, "v1")); const Snapshot* s1 = db_->GetSnapshot(); const Snapshot* s2 = db_->GetSnapshot(); const Snapshot* s3 = db_->GetSnapshot(); ASSERT_OK(Put(key, "v2")); ASSERT_EQ("v2", Get(key)); ASSERT_EQ("v1", Get(key, s1)); ASSERT_EQ("v1", Get(key, s2)); ASSERT_EQ("v1", Get(key, s3)); db_->ReleaseSnapshot(s1); dbfull()->TEST_CompactMemTable(); ASSERT_EQ("v2", Get(key)); ASSERT_EQ("v1", Get(key, s2)); db_->ReleaseSnapshot(s2); ASSERT_EQ("v1", Get(key, s3)); db_->ReleaseSnapshot(s3); } } while (ChangeOptions()); } TEST(DBTest, IterateOverEmptySnapshot) { do { const Snapshot* snapshot = db_->GetSnapshot(); ReadOptions read_options; read_options.snapshot = snapshot; ASSERT_OK(Put("foo", "v1")); ASSERT_OK(Put("foo", "v2")); Iterator* iterator1 = db_->NewIterator(read_options); iterator1->SeekToFirst(); ASSERT_TRUE(!iterator1->Valid()); delete iterator1; dbfull()->TEST_CompactMemTable(); Iterator* iterator2 = db_->NewIterator(read_options); iterator2->SeekToFirst(); ASSERT_TRUE(!iterator2->Valid()); delete iterator2; db_->ReleaseSnapshot(snapshot); } while (ChangeOptions()); } TEST(DBTest, GetLevel0Ordering) { do { // Check that we process level-0 files in correct order. The code // below generates two level-0 files where the earlier one comes // before the later one in the level-0 file list since the earlier // one has a smaller "smallest" key. ASSERT_OK(Put("bar", "b")); ASSERT_OK(Put("foo", "v1")); dbfull()->TEST_CompactMemTable(); ASSERT_OK(Put("foo", "v2")); dbfull()->TEST_CompactMemTable(); ASSERT_EQ("v2", Get("foo")); } while (ChangeOptions()); } TEST(DBTest, GetOrderedByLevels) { do { ASSERT_OK(Put("foo", "v1")); Compact("a", "z"); ASSERT_EQ("v1", Get("foo")); ASSERT_OK(Put("foo", "v2")); ASSERT_EQ("v2", Get("foo")); dbfull()->TEST_CompactMemTable(); ASSERT_EQ("v2", Get("foo")); } while (ChangeOptions()); } TEST(DBTest, GetPicksCorrectFile) { do { // Arrange to have multiple files in a non-level-0 level. ASSERT_OK(Put("a", "va")); Compact("a", "b"); ASSERT_OK(Put("x", "vx")); Compact("x", "y"); ASSERT_OK(Put("f", "vf")); Compact("f", "g"); ASSERT_EQ("va", Get("a")); ASSERT_EQ("vf", Get("f")); ASSERT_EQ("vx", Get("x")); } while (ChangeOptions()); } TEST(DBTest, GetEncountersEmptyLevel) { do { // Arrange for the following to happen: // * sstable A in level 0 // * nothing in level 1 // * sstable B in level 2 // Then do enough Get() calls to arrange for an automatic compaction // of sstable A. A bug would cause the compaction to be marked as // occurring at level 1 (instead of the correct level 0). // Step 1: First place sstables in levels 0 and 2 int compaction_count = 0; while (NumTableFilesAtLevel(0) == 0 || NumTableFilesAtLevel(2) == 0) { ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2"; compaction_count++; Put("a", "begin"); Put("z", "end"); dbfull()->TEST_CompactMemTable(); } // Step 2: clear level 1 if necessary. dbfull()->TEST_CompactRange(1, nullptr, nullptr); ASSERT_EQ(NumTableFilesAtLevel(0), 1); ASSERT_EQ(NumTableFilesAtLevel(1), 0); ASSERT_EQ(NumTableFilesAtLevel(2), 1); // Step 3: read a bunch of times for (int i = 0; i < 1000; i++) { ASSERT_EQ("NOT_FOUND", Get("missing")); } // Step 4: Wait for compaction to finish DelayMilliseconds(1000); ASSERT_EQ(NumTableFilesAtLevel(0), 0); } while (ChangeOptions()); } TEST(DBTest, IterEmpty) { Iterator* iter = db_->NewIterator(ReadOptions()); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "(invalid)"); iter->SeekToLast(); ASSERT_EQ(IterStatus(iter), "(invalid)"); iter->Seek("foo"); ASSERT_EQ(IterStatus(iter), "(invalid)"); delete iter; } TEST(DBTest, IterSingle) { ASSERT_OK(Put("a", "va")); Iterator* iter = db_->NewIterator(ReadOptions()); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Next(); ASSERT_EQ(IterStatus(iter), "(invalid)"); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Prev(); ASSERT_EQ(IterStatus(iter), "(invalid)"); iter->SeekToLast(); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Next(); ASSERT_EQ(IterStatus(iter), "(invalid)"); iter->SeekToLast(); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Prev(); ASSERT_EQ(IterStatus(iter), "(invalid)"); iter->Seek(""); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Next(); ASSERT_EQ(IterStatus(iter), "(invalid)"); iter->Seek("a"); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Next(); ASSERT_EQ(IterStatus(iter), "(invalid)"); iter->Seek("b"); ASSERT_EQ(IterStatus(iter), "(invalid)"); delete iter; } TEST(DBTest, IterMulti) { ASSERT_OK(Put("a", "va")); ASSERT_OK(Put("b", "vb")); ASSERT_OK(Put("c", "vc")); Iterator* iter = db_->NewIterator(ReadOptions()); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Next(); ASSERT_EQ(IterStatus(iter), "b->vb"); iter->Next(); ASSERT_EQ(IterStatus(iter), "c->vc"); iter->Next(); ASSERT_EQ(IterStatus(iter), "(invalid)"); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Prev(); ASSERT_EQ(IterStatus(iter), "(invalid)"); iter->SeekToLast(); ASSERT_EQ(IterStatus(iter), "c->vc"); iter->Prev(); ASSERT_EQ(IterStatus(iter), "b->vb"); iter->Prev(); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Prev(); ASSERT_EQ(IterStatus(iter), "(invalid)"); iter->SeekToLast(); ASSERT_EQ(IterStatus(iter), "c->vc"); iter->Next(); ASSERT_EQ(IterStatus(iter), "(invalid)"); iter->Seek(""); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Seek("a"); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Seek("ax"); ASSERT_EQ(IterStatus(iter), "b->vb"); iter->Seek("b"); ASSERT_EQ(IterStatus(iter), "b->vb"); iter->Seek("z"); ASSERT_EQ(IterStatus(iter), "(invalid)"); // Switch from reverse to forward iter->SeekToLast(); iter->Prev(); iter->Prev(); iter->Next(); ASSERT_EQ(IterStatus(iter), "b->vb"); // Switch from forward to reverse iter->SeekToFirst(); iter->Next(); iter->Next(); iter->Prev(); ASSERT_EQ(IterStatus(iter), "b->vb"); // Make sure iter stays at snapshot ASSERT_OK(Put("a", "va2")); ASSERT_OK(Put("a2", "va3")); ASSERT_OK(Put("b", "vb2")); ASSERT_OK(Put("c", "vc2")); ASSERT_OK(Delete("b")); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Next(); ASSERT_EQ(IterStatus(iter), "b->vb"); iter->Next(); ASSERT_EQ(IterStatus(iter), "c->vc"); iter->Next(); ASSERT_EQ(IterStatus(iter), "(invalid)"); iter->SeekToLast(); ASSERT_EQ(IterStatus(iter), "c->vc"); iter->Prev(); ASSERT_EQ(IterStatus(iter), "b->vb"); iter->Prev(); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Prev(); ASSERT_EQ(IterStatus(iter), "(invalid)"); delete iter; } TEST(DBTest, IterSmallAndLargeMix) { ASSERT_OK(Put("a", "va")); ASSERT_OK(Put("b", std::string(100000, 'b'))); ASSERT_OK(Put("c", "vc")); ASSERT_OK(Put("d", std::string(100000, 'd'))); ASSERT_OK(Put("e", std::string(100000, 'e'))); Iterator* iter = db_->NewIterator(ReadOptions()); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Next(); ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b')); iter->Next(); ASSERT_EQ(IterStatus(iter), "c->vc"); iter->Next(); ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd')); iter->Next(); ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e')); iter->Next(); ASSERT_EQ(IterStatus(iter), "(invalid)"); iter->SeekToLast(); ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e')); iter->Prev(); ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd')); iter->Prev(); ASSERT_EQ(IterStatus(iter), "c->vc"); iter->Prev(); ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b')); iter->Prev(); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Prev(); ASSERT_EQ(IterStatus(iter), "(invalid)"); delete iter; } TEST(DBTest, IterMultiWithDelete) { do { ASSERT_OK(Put("a", "va")); ASSERT_OK(Put("b", "vb")); ASSERT_OK(Put("c", "vc")); ASSERT_OK(Delete("b")); ASSERT_EQ("NOT_FOUND", Get("b")); Iterator* iter = db_->NewIterator(ReadOptions()); iter->Seek("c"); ASSERT_EQ(IterStatus(iter), "c->vc"); iter->Prev(); ASSERT_EQ(IterStatus(iter), "a->va"); delete iter; } while (ChangeOptions()); } TEST(DBTest, Recover) { do { ASSERT_OK(Put("foo", "v1")); ASSERT_OK(Put("baz", "v5")); Reopen(); ASSERT_EQ("v1", Get("foo")); ASSERT_EQ("v1", Get("foo")); ASSERT_EQ("v5", Get("baz")); ASSERT_OK(Put("bar", "v2")); ASSERT_OK(Put("foo", "v3")); Reopen(); ASSERT_EQ("v3", Get("foo")); ASSERT_OK(Put("foo", "v4")); ASSERT_EQ("v4", Get("foo")); ASSERT_EQ("v2", Get("bar")); ASSERT_EQ("v5", Get("baz")); } while (ChangeOptions()); } TEST(DBTest, RecoveryWithEmptyLog) { do { ASSERT_OK(Put("foo", "v1")); ASSERT_OK(Put("foo", "v2")); Reopen(); Reopen(); ASSERT_OK(Put("foo", "v3")); Reopen(); ASSERT_EQ("v3", Get("foo")); } while (ChangeOptions()); } // Check that writes done during a memtable compaction are recovered // if the database is shutdown during the memtable compaction. TEST(DBTest, RecoverDuringMemtableCompaction) { do { Options options = CurrentOptions(); options.env = env_; options.write_buffer_size = 1000000; Reopen(&options); // Trigger a long memtable compaction and reopen the database during it ASSERT_OK(Put("foo", "v1")); // Goes to 1st log file ASSERT_OK(Put("big1", std::string(10000000, 'x'))); // Fills memtable ASSERT_OK(Put("big2", std::string(1000, 'y'))); // Triggers compaction ASSERT_OK(Put("bar", "v2")); // Goes to new log file Reopen(&options); ASSERT_EQ("v1", Get("foo")); ASSERT_EQ("v2", Get("bar")); ASSERT_EQ(std::string(10000000, 'x'), Get("big1")); ASSERT_EQ(std::string(1000, 'y'), Get("big2")); } while (ChangeOptions()); } static std::string Key(int i) { char buf[100]; snprintf(buf, sizeof(buf), "key%06d", i); return std::string(buf); } TEST(DBTest, MinorCompactionsHappen) { Options options = CurrentOptions(); options.write_buffer_size = 10000; Reopen(&options); const int N = 500; int starting_num_tables = TotalTableFiles(); for (int i = 0; i < N; i++) { ASSERT_OK(Put(Key(i), Key(i) + std::string(1000, 'v'))); } int ending_num_tables = TotalTableFiles(); ASSERT_GT(ending_num_tables, starting_num_tables); for (int i = 0; i < N; i++) { ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i))); } Reopen(); for (int i = 0; i < N; i++) { ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i))); } } TEST(DBTest, RecoverWithLargeLog) { { Options options = CurrentOptions(); Reopen(&options); ASSERT_OK(Put("big1", std::string(200000, '1'))); ASSERT_OK(Put("big2", std::string(200000, '2'))); ASSERT_OK(Put("small3", std::string(10, '3'))); ASSERT_OK(Put("small4", std::string(10, '4'))); ASSERT_EQ(NumTableFilesAtLevel(0), 0); } // Make sure that if we re-open with a small write buffer size that // we flush table files in the middle of a large log file. Options options = CurrentOptions(); options.write_buffer_size = 100000; Reopen(&options); ASSERT_EQ(NumTableFilesAtLevel(0), 3); ASSERT_EQ(std::string(200000, '1'), Get("big1")); ASSERT_EQ(std::string(200000, '2'), Get("big2")); ASSERT_EQ(std::string(10, '3'), Get("small3")); ASSERT_EQ(std::string(10, '4'), Get("small4")); ASSERT_GT(NumTableFilesAtLevel(0), 1); } TEST(DBTest, CompactionsGenerateMultipleFiles) { Options options = CurrentOptions(); options.write_buffer_size = 100000000; // Large write buffer Reopen(&options); Random rnd(301); // Write 8MB (80 values, each 100K) ASSERT_EQ(NumTableFilesAtLevel(0), 0); std::vector<std::string> values; for (int i = 0; i < 80; i++) { values.push_back(RandomString(&rnd, 100000)); ASSERT_OK(Put(Key(i), values[i])); } // Reopening moves updates to level-0 Reopen(&options); dbfull()->TEST_CompactRange(0, nullptr, nullptr); ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_GT(NumTableFilesAtLevel(1), 1); for (int i = 0; i < 80; i++) { ASSERT_EQ(Get(Key(i)), values[i]); } } TEST(DBTest, RepeatedWritesToSameKey) { Options options = CurrentOptions(); options.env = env_; options.write_buffer_size = 100000; // Small write buffer Reopen(&options); // We must have at most one file per level except for level-0, // which may have up to kL0_StopWritesTrigger files. const int kMaxFiles = config::kNumLevels + config::kL0_StopWritesTrigger; Random rnd(301); std::string value = RandomString(&rnd, 2 * options.write_buffer_size); for (int i = 0; i < 5 * kMaxFiles; i++) { Put("key", value); ASSERT_LE(TotalTableFiles(), kMaxFiles); fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles()); } } TEST(DBTest, SparseMerge) { Options options = CurrentOptions(); options.compression = kNoCompression; Reopen(&options); FillLevels("A", "Z"); // Suppose there is: // small amount of data with prefix A // large amount of data with prefix B // small amount of data with prefix C // and that recent updates have made small changes to all three prefixes. // Check that we do not do a compaction that merges all of B in one shot. const std::string value(1000, 'x'); Put("A", "va"); // Write approximately 100MB of "B" values for (int i = 0; i < 100000; i++) { char key[100]; snprintf(key, sizeof(key), "B%010d", i); Put(key, value); } Put("C", "vc"); dbfull()->TEST_CompactMemTable(); dbfull()->TEST_CompactRange(0, nullptr, nullptr); // Make sparse update Put("A", "va2"); Put("B100", "bvalue2"); Put("C", "vc2"); dbfull()->TEST_CompactMemTable(); // Compactions should not cause us to create a situation where // a file overlaps too much data at the next level. ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576); dbfull()->TEST_CompactRange(0, nullptr, nullptr); ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576); dbfull()->TEST_CompactRange(1, nullptr, nullptr); ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576); } static bool Between(uint64_t val, uint64_t low, uint64_t high) { bool result = (val >= low) && (val <= high); if (!result) { fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n", (unsigned long long)(val), (unsigned long long)(low), (unsigned long long)(high)); } return result; } TEST(DBTest, ApproximateSizes) { do { Options options = CurrentOptions(); options.write_buffer_size = 100000000; // Large write buffer options.compression = kNoCompression; DestroyAndReopen(); ASSERT_TRUE(Between(Size("", "xyz"), 0, 0)); Reopen(&options); ASSERT_TRUE(Between(Size("", "xyz"), 0, 0)); // Write 8MB (80 values, each 100K) ASSERT_EQ(NumTableFilesAtLevel(0), 0); const int N = 80; static const int S1 = 100000; static const int S2 = 105000; // Allow some expansion from metadata Random rnd(301); for (int i = 0; i < N; i++) { ASSERT_OK(Put(Key(i), RandomString(&rnd, S1))); } // 0 because GetApproximateSizes() does not account for memtable space ASSERT_TRUE(Between(Size("", Key(50)), 0, 0)); if (options.reuse_logs) { // Recovery will reuse memtable, and GetApproximateSizes() does not // account for memtable usage; Reopen(&options); ASSERT_TRUE(Between(Size("", Key(50)), 0, 0)); continue; } // Check sizes across recovery by reopening a few times for (int run = 0; run < 3; run++) { Reopen(&options); for (int compact_start = 0; compact_start < N; compact_start += 10) { for (int i = 0; i < N; i += 10) { ASSERT_TRUE(Between(Size("", Key(i)), S1 * i, S2 * i)); ASSERT_TRUE(Between(Size("", Key(i) + ".suffix"), S1 * (i + 1), S2 * (i + 1))); ASSERT_TRUE(Between(Size(Key(i), Key(i + 10)), S1 * 10, S2 * 10)); } ASSERT_TRUE(Between(Size("", Key(50)), S1 * 50, S2 * 50)); ASSERT_TRUE(Between(Size("", Key(50) + ".suffix"), S1 * 50, S2 * 50)); std::string cstart_str = Key(compact_start); std::string cend_str = Key(compact_start + 9); Slice cstart = cstart_str; Slice cend = cend_str; dbfull()->TEST_CompactRange(0, &cstart, &cend); } ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_GT(NumTableFilesAtLevel(1), 0); } } while (ChangeOptions()); } TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) { do { Options options = CurrentOptions(); options.compression = kNoCompression; Reopen(); Random rnd(301); std::string big1 = RandomString(&rnd, 100000); ASSERT_OK(Put(Key(0), RandomString(&rnd, 10000))); ASSERT_OK(Put(Key(1), RandomString(&rnd, 10000))); ASSERT_OK(Put(Key(2), big1)); ASSERT_OK(Put(Key(3), RandomString(&rnd, 10000))); ASSERT_OK(Put(Key(4), big1)); ASSERT_OK(Put(Key(5), RandomString(&rnd, 10000))); ASSERT_OK(Put(Key(6), RandomString(&rnd, 300000))); ASSERT_OK(Put(Key(7), RandomString(&rnd, 10000))); if (options.reuse_logs) { // Need to force a memtable compaction since recovery does not do so. ASSERT_OK(dbfull()->TEST_CompactMemTable()); } // Check sizes across recovery by reopening a few times for (int run = 0; run < 3; run++) { Reopen(&options); ASSERT_TRUE(Between(Size("", Key(0)), 0, 0)); ASSERT_TRUE(Between(Size("", Key(1)), 10000, 11000)); ASSERT_TRUE(Between(Size("", Key(2)), 20000, 21000)); ASSERT_TRUE(Between(Size("", Key(3)), 120000, 121000)); ASSERT_TRUE(Between(Size("", Key(4)), 130000, 131000)); ASSERT_TRUE(Between(Size("", Key(5)), 230000, 231000)); ASSERT_TRUE(Between(Size("", Key(6)), 240000, 241000)); ASSERT_TRUE(Between(Size("", Key(7)), 540000, 541000)); ASSERT_TRUE(Between(Size("", Key(8)), 550000, 560000)); ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000)); dbfull()->TEST_CompactRange(0, nullptr, nullptr); } } while (ChangeOptions()); } TEST(DBTest, IteratorPinsRef) { Put("foo", "hello"); // Get iterator that will yield the current contents of the DB. Iterator* iter = db_->NewIterator(ReadOptions()); // Write to force compactions Put("foo", "newvalue1"); for (int i = 0; i < 100; i++) { ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values } Put("foo", "newvalue2"); iter->SeekToFirst(); ASSERT_TRUE(iter->Valid()); ASSERT_EQ("foo", iter->key().ToString()); ASSERT_EQ("hello", iter->value().ToString()); iter->Next(); ASSERT_TRUE(!iter->Valid()); delete iter; } TEST(DBTest, Snapshot) { do { Put("foo", "v1"); const Snapshot* s1 = db_->GetSnapshot(); Put("foo", "v2"); const Snapshot* s2 = db_->GetSnapshot(); Put("foo", "v3"); const Snapshot* s3 = db_->GetSnapshot(); Put("foo", "v4"); ASSERT_EQ("v1", Get("foo", s1)); ASSERT_EQ("v2", Get("foo", s2)); ASSERT_EQ("v3", Get("foo", s3)); ASSERT_EQ("v4", Get("foo")); db_->ReleaseSnapshot(s3); ASSERT_EQ("v1", Get("foo", s1)); ASSERT_EQ("v2", Get("foo", s2)); ASSERT_EQ("v4", Get("foo")); db_->ReleaseSnapshot(s1); ASSERT_EQ("v2", Get("foo", s2)); ASSERT_EQ("v4", Get("foo")); db_->ReleaseSnapshot(s2); ASSERT_EQ("v4", Get("foo")); } while (ChangeOptions()); } TEST(DBTest, HiddenValuesAreRemoved) { do { Random rnd(301); FillLevels("a", "z"); std::string big = RandomString(&rnd, 50000); Put("foo", big); Put("pastfoo", "v"); const Snapshot* snapshot = db_->GetSnapshot(); Put("foo", "tiny"); Put("pastfoo2", "v2"); // Advance sequence number one more ASSERT_OK(dbfull()->TEST_CompactMemTable()); ASSERT_GT(NumTableFilesAtLevel(0), 0); ASSERT_EQ(big, Get("foo", snapshot)); ASSERT_TRUE(Between(Size("", "pastfoo"), 50000, 60000)); db_->ReleaseSnapshot(snapshot); ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]"); Slice x("x"); dbfull()->TEST_CompactRange(0, nullptr, &x); ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]"); ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_GE(NumTableFilesAtLevel(1), 1); dbfull()->TEST_CompactRange(1, nullptr, &x); ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]"); ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000)); } while (ChangeOptions()); } TEST(DBTest, DeletionMarkers1) { Put("foo", "v1"); ASSERT_OK(dbfull()->TEST_CompactMemTable()); const int last = config::kMaxMemCompactLevel; ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level // Place a table at level last-1 to prevent merging with preceding mutation Put("a", "begin"); Put("z", "end"); dbfull()->TEST_CompactMemTable(); ASSERT_EQ(NumTableFilesAtLevel(last), 1); ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1); Delete("foo"); Put("foo", "v2"); ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]"); ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2 ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]"); Slice z("z"); dbfull()->TEST_CompactRange(last - 2, nullptr, &z); // DEL eliminated, but v1 remains because we aren't compacting that level // (DEL can be eliminated because v2 hides v1). ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]"); dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr); // Merging last-1 w/ last, so we are the base level for "foo", so // DEL is removed. (as is v1). ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]"); } TEST(DBTest, DeletionMarkers2) { Put("foo", "v1"); ASSERT_OK(dbfull()->TEST_CompactMemTable()); const int last = config::kMaxMemCompactLevel; ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level // Place a table at level last-1 to prevent merging with preceding mutation Put("a", "begin"); Put("z", "end"); dbfull()->TEST_CompactMemTable(); ASSERT_EQ(NumTableFilesAtLevel(last), 1); ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1); Delete("foo"); ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2 ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr); // DEL kept: "last" file overlaps ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr); // Merging last-1 w/ last, so we are the base level for "foo", so // DEL is removed. (as is v1). ASSERT_EQ(AllEntriesFor("foo"), "[ ]"); } TEST(DBTest, OverlapInLevel0) { do { ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config"; // Fill levels 1 and 2 to disable the pushing of new memtables to levels > // 0. ASSERT_OK(Put("100", "v100")); ASSERT_OK(Put("999", "v999")); dbfull()->TEST_CompactMemTable(); ASSERT_OK(Delete("100")); ASSERT_OK(Delete("999")); dbfull()->TEST_CompactMemTable(); ASSERT_EQ("0,1,1", FilesPerLevel()); // Make files spanning the following ranges in level-0: // files[0] 200 .. 900 // files[1] 300 .. 500 // Note that files are sorted by smallest key. ASSERT_OK(Put("300", "v300")); ASSERT_OK(Put("500", "v500")); dbfull()->TEST_CompactMemTable(); ASSERT_OK(Put("200", "v200")); ASSERT_OK(Put("600", "v600")); ASSERT_OK(Put("900", "v900")); dbfull()->TEST_CompactMemTable(); ASSERT_EQ("2,1,1", FilesPerLevel()); // Compact away the placeholder files we created initially dbfull()->TEST_CompactRange(1, nullptr, nullptr); dbfull()->TEST_CompactRange(2, nullptr, nullptr); ASSERT_EQ("2", FilesPerLevel()); // Do a memtable compaction. Before bug-fix, the compaction would // not detect the overlap with level-0 files and would incorrectly place // the deletion in a deeper level. ASSERT_OK(Delete("600")); dbfull()->TEST_CompactMemTable(); ASSERT_EQ("3", FilesPerLevel()); ASSERT_EQ("NOT_FOUND", Get("600")); } while (ChangeOptions()); } TEST(DBTest, L0_CompactionBug_Issue44_a) { Reopen(); ASSERT_OK(Put("b", "v")); Reopen(); ASSERT_OK(Delete("b")); ASSERT_OK(Delete("a")); Reopen(); ASSERT_OK(Delete("a")); Reopen(); ASSERT_OK(Put("a", "v")); Reopen(); Reopen(); ASSERT_EQ("(a->v)", Contents()); DelayMilliseconds(1000); // Wait for compaction to finish ASSERT_EQ("(a->v)", Contents()); } TEST(DBTest, L0_CompactionBug_Issue44_b) { Reopen(); Put("", ""); Reopen(); Delete("e"); Put("", ""); Reopen(); Put("c", "cv"); Reopen(); Put("", ""); Reopen(); Put("", ""); DelayMilliseconds(1000); // Wait for compaction to finish Reopen(); Put("d", "dv"); Reopen(); Put("", ""); Reopen(); Delete("d"); Delete("b"); Reopen(); ASSERT_EQ("(->)(c->cv)", Contents()); DelayMilliseconds(1000); // Wait for compaction to finish ASSERT_EQ("(->)(c->cv)", Contents()); } TEST(DBTest, Fflush_Issue474) { static const int kNum = 100000; Random rnd(test::RandomSeed()); for (int i = 0; i < kNum; i++) { fflush(nullptr); ASSERT_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100))); } } TEST(DBTest, ComparatorCheck) { class NewComparator : public Comparator { public: const char* Name() const override { return "leveldb.NewComparator"; } int Compare(const Slice& a, const Slice& b) const override { return BytewiseComparator()->Compare(a, b); } void FindShortestSeparator(std::string* s, const Slice& l) const override { BytewiseComparator()->FindShortestSeparator(s, l); } void FindShortSuccessor(std::string* key) const override { BytewiseComparator()->FindShortSuccessor(key); } }; NewComparator cmp; Options new_options = CurrentOptions(); new_options.comparator = &cmp; Status s = TryReopen(&new_options); ASSERT_TRUE(!s.ok()); ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos) << s.ToString(); } TEST(DBTest, CustomComparator) { class NumberComparator : public Comparator { public: const char* Name() const override { return "test.NumberComparator"; } int Compare(const Slice& a, const Slice& b) const override { return ToNumber(a) - ToNumber(b); } void FindShortestSeparator(std::string* s, const Slice& l) const override { ToNumber(*s); // Check format ToNumber(l); // Check format } void FindShortSuccessor(std::string* key) const override { ToNumber(*key); // Check format } private: static int ToNumber(const Slice& x) { // Check that there are no extra characters. ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']') << EscapeString(x); int val; char ignored; ASSERT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1) << EscapeString(x); return val; } }; NumberComparator cmp; Options new_options = CurrentOptions(); new_options.create_if_missing = true; new_options.comparator = &cmp; new_options.filter_policy = nullptr; // Cannot use bloom filters new_options.write_buffer_size = 1000; // Compact more often DestroyAndReopen(&new_options); ASSERT_OK(Put("[10]", "ten")); ASSERT_OK(Put("[0x14]", "twenty")); for (int i = 0; i < 2; i++) { ASSERT_EQ("ten", Get("[10]")); ASSERT_EQ("ten", Get("[0xa]")); ASSERT_EQ("twenty", Get("[20]")); ASSERT_EQ("twenty", Get("[0x14]")); ASSERT_EQ("NOT_FOUND", Get("[15]")); ASSERT_EQ("NOT_FOUND", Get("[0xf]")); Compact("[0]", "[9999]"); } for (int run = 0; run < 2; run++) { for (int i = 0; i < 1000; i++) { char buf[100]; snprintf(buf, sizeof(buf), "[%d]", i * 10); ASSERT_OK(Put(buf, buf)); } Compact("[0]", "[1000000]"); } } TEST(DBTest, ManualCompaction) { ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Need to update this test to match kMaxMemCompactLevel"; MakeTables(3, "p", "q"); ASSERT_EQ("1,1,1", FilesPerLevel()); // Compaction range falls before files Compact("", "c"); ASSERT_EQ("1,1,1", FilesPerLevel()); // Compaction range falls after files Compact("r", "z"); ASSERT_EQ("1,1,1", FilesPerLevel()); // Compaction range overlaps files Compact("p1", "p9"); ASSERT_EQ("0,0,1", FilesPerLevel()); // Populate a different range MakeTables(3, "c", "e"); ASSERT_EQ("1,1,2", FilesPerLevel()); // Compact just the new range Compact("b", "f"); ASSERT_EQ("0,0,2", FilesPerLevel()); // Compact all MakeTables(1, "a", "z"); ASSERT_EQ("0,1,2", FilesPerLevel()); db_->CompactRange(nullptr, nullptr); ASSERT_EQ("0,0,1", FilesPerLevel()); } TEST(DBTest, DBOpen_Options) { std::string dbname = test::TmpDir() + "/db_options_test"; DestroyDB(dbname, Options()); // Does not exist, and create_if_missing == false: error DB* db = nullptr; Options opts; opts.create_if_missing = false; Status s = DB::Open(opts, dbname, &db); ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr); ASSERT_TRUE(db == nullptr); // Does not exist, and create_if_missing == true: OK opts.create_if_missing = true; s = DB::Open(opts, dbname, &db); ASSERT_OK(s); ASSERT_TRUE(db != nullptr); delete db; db = nullptr; // Does exist, and error_if_exists == true: error opts.create_if_missing = false; opts.error_if_exists = true; s = DB::Open(opts, dbname, &db); ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr); ASSERT_TRUE(db == nullptr); // Does exist, and error_if_exists == false: OK opts.create_if_missing = true; opts.error_if_exists = false; s = DB::Open(opts, dbname, &db); ASSERT_OK(s); ASSERT_TRUE(db != nullptr); delete db; db = nullptr; } TEST(DBTest, DestroyEmptyDir) { std::string dbname = test::TmpDir() + "/db_empty_dir"; TestEnv env(Env::Default()); env.DeleteDir(dbname); ASSERT_TRUE(!env.FileExists(dbname)); Options opts; opts.env = &env; ASSERT_OK(env.CreateDir(dbname)); ASSERT_TRUE(env.FileExists(dbname)); std::vector<std::string> children; ASSERT_OK(env.GetChildren(dbname, &children)); // The stock Env's do not filter out '.' and '..' special files. ASSERT_EQ(2, children.size()); ASSERT_OK(DestroyDB(dbname, opts)); ASSERT_TRUE(!env.FileExists(dbname)); // Should also be destroyed if Env is filtering out dot files. env.SetIgnoreDotFiles(true); ASSERT_OK(env.CreateDir(dbname)); ASSERT_TRUE(env.FileExists(dbname)); ASSERT_OK(env.GetChildren(dbname, &children)); ASSERT_EQ(0, children.size()); ASSERT_OK(DestroyDB(dbname, opts)); ASSERT_TRUE(!env.FileExists(dbname)); } TEST(DBTest, DestroyOpenDB) { std::string dbname = test::TmpDir() + "/open_db_dir"; env_->DeleteDir(dbname); ASSERT_TRUE(!env_->FileExists(dbname)); Options opts; opts.create_if_missing = true; DB* db = nullptr; ASSERT_OK(DB::Open(opts, dbname, &db)); ASSERT_TRUE(db != nullptr); // Must fail to destroy an open db. ASSERT_TRUE(env_->FileExists(dbname)); ASSERT_TRUE(!DestroyDB(dbname, Options()).ok()); ASSERT_TRUE(env_->FileExists(dbname)); delete db; db = nullptr; // Should succeed destroying a closed db. ASSERT_OK(DestroyDB(dbname, Options())); ASSERT_TRUE(!env_->FileExists(dbname)); } TEST(DBTest, Locking) { DB* db2 = nullptr; Status s = DB::Open(CurrentOptions(), dbname_, &db2); ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db"; } // Check that number of files does not grow when we are out of space TEST(DBTest, NoSpace) { Options options = CurrentOptions(); options.env = env_; Reopen(&options); ASSERT_OK(Put("foo", "v1")); ASSERT_EQ("v1", Get("foo")); Compact("a", "z"); const int num_files = CountFiles(); // Force out-of-space errors. env_->no_space_.store(true, std::memory_order_release); for (int i = 0; i < 10; i++) { for (int level = 0; level < config::kNumLevels - 1; level++) { dbfull()->TEST_CompactRange(level, nullptr, nullptr); } } env_->no_space_.store(false, std::memory_order_release); ASSERT_LT(CountFiles(), num_files + 3); } TEST(DBTest, NonWritableFileSystem) { Options options = CurrentOptions(); options.write_buffer_size = 1000; options.env = env_; Reopen(&options); ASSERT_OK(Put("foo", "v1")); // Force errors for new files. env_->non_writable_.store(true, std::memory_order_release); std::string big(100000, 'x'); int errors = 0; for (int i = 0; i < 20; i++) { fprintf(stderr, "iter %d; errors %d\n", i, errors); if (!Put("foo", big).ok()) { errors++; DelayMilliseconds(100); } } ASSERT_GT(errors, 0); env_->non_writable_.store(false, std::memory_order_release); } TEST(DBTest, WriteSyncError) { // Check that log sync errors cause the DB to disallow future writes. // (a) Cause log sync calls to fail Options options = CurrentOptions(); options.env = env_; Reopen(&options); env_->data_sync_error_.store(true, std::memory_order_release); // (b) Normal write should succeed WriteOptions w; ASSERT_OK(db_->Put(w, "k1", "v1")); ASSERT_EQ("v1", Get("k1")); // (c) Do a sync write; should fail w.sync = true; ASSERT_TRUE(!db_->Put(w, "k2", "v2").ok()); ASSERT_EQ("v1", Get("k1")); ASSERT_EQ("NOT_FOUND", Get("k2")); // (d) make sync behave normally env_->data_sync_error_.store(false, std::memory_order_release); // (e) Do a non-sync write; should fail w.sync = false; ASSERT_TRUE(!db_->Put(w, "k3", "v3").ok()); ASSERT_EQ("v1", Get("k1")); ASSERT_EQ("NOT_FOUND", Get("k2")); ASSERT_EQ("NOT_FOUND", Get("k3")); } TEST(DBTest, ManifestWriteError) { // Test for the following problem: // (a) Compaction produces file F // (b) Log record containing F is written to MANIFEST file, but Sync() fails // (c) GC deletes F // (d) After reopening DB, reads fail since deleted F is named in log record // We iterate twice. In the second iteration, everything is the // same except the log record never makes it to the MANIFEST file. for (int iter = 0; iter < 2; iter++) { std::atomic<bool>* error_type = (iter == 0) ? &env_->manifest_sync_error_ : &env_->manifest_write_error_; // Insert foo=>bar mapping Options options = CurrentOptions(); options.env = env_; options.create_if_missing = true; options.error_if_exists = false; DestroyAndReopen(&options); ASSERT_OK(Put("foo", "bar")); ASSERT_EQ("bar", Get("foo")); // Memtable compaction (will succeed) dbfull()->TEST_CompactMemTable(); ASSERT_EQ("bar", Get("foo")); const int last = config::kMaxMemCompactLevel; ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level // Merging compaction (will fail) error_type->store(true, std::memory_order_release); dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail ASSERT_EQ("bar", Get("foo")); // Recovery: should not lose data error_type->store(false, std::memory_order_release); Reopen(&options); ASSERT_EQ("bar", Get("foo")); } } TEST(DBTest, MissingSSTFile) { ASSERT_OK(Put("foo", "bar")); ASSERT_EQ("bar", Get("foo")); // Dump the memtable to disk. dbfull()->TEST_CompactMemTable(); ASSERT_EQ("bar", Get("foo")); Close(); ASSERT_TRUE(DeleteAnSSTFile()); Options options = CurrentOptions(); options.paranoid_checks = true; Status s = TryReopen(&options); ASSERT_TRUE(!s.ok()); ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString(); } TEST(DBTest, StillReadSST) { ASSERT_OK(Put("foo", "bar")); ASSERT_EQ("bar", Get("foo")); // Dump the memtable to disk. dbfull()->TEST_CompactMemTable(); ASSERT_EQ("bar", Get("foo")); Close(); ASSERT_GT(RenameLDBToSST(), 0); Options options = CurrentOptions(); options.paranoid_checks = true; Status s = TryReopen(&options); ASSERT_TRUE(s.ok()); ASSERT_EQ("bar", Get("foo")); } TEST(DBTest, FilesDeletedAfterCompaction) { ASSERT_OK(Put("foo", "v2")); Compact("a", "z"); const int num_files = CountFiles(); for (int i = 0; i < 10; i++) { ASSERT_OK(Put("foo", "v2")); Compact("a", "z"); } ASSERT_EQ(CountFiles(), num_files); } TEST(DBTest, BloomFilter) { env_->count_random_reads_ = true; Options options = CurrentOptions(); options.env = env_; options.block_cache = NewLRUCache(0); // Prevent cache hits options.filter_policy = NewBloomFilterPolicy(10); Reopen(&options); // Populate multiple layers const int N = 10000; for (int i = 0; i < N; i++) { ASSERT_OK(Put(Key(i), Key(i))); } Compact("a", "z"); for (int i = 0; i < N; i += 100) { ASSERT_OK(Put(Key(i), Key(i))); } dbfull()->TEST_CompactMemTable(); // Prevent auto compactions triggered by seeks env_->delay_data_sync_.store(true, std::memory_order_release); // Lookup present keys. Should rarely read from small sstable. env_->random_read_counter_.Reset(); for (int i = 0; i < N; i++) { ASSERT_EQ(Key(i), Get(Key(i))); } int reads = env_->random_read_counter_.Read(); fprintf(stderr, "%d present => %d reads\n", N, reads); ASSERT_GE(reads, N); ASSERT_LE(reads, N + 2 * N / 100); // Lookup present keys. Should rarely read from either sstable. env_->random_read_counter_.Reset(); for (int i = 0; i < N; i++) { ASSERT_EQ("NOT_FOUND", Get(Key(i) + ".missing")); } reads = env_->random_read_counter_.Read(); fprintf(stderr, "%d missing => %d reads\n", N, reads); ASSERT_LE(reads, 3 * N / 100); env_->delay_data_sync_.store(false, std::memory_order_release); Close(); delete options.block_cache; delete options.filter_policy; } // Multi-threaded test: namespace { static const int kNumThreads = 4; static const int kTestSeconds = 10; static const int kNumKeys = 1000; struct MTState { DBTest* test; std::atomic<bool> stop; std::atomic<int> counter[kNumThreads]; std::atomic<bool> thread_done[kNumThreads]; }; struct MTThread { MTState* state; int id; }; static void MTThreadBody(void* arg) { MTThread* t = reinterpret_cast<MTThread*>(arg); int id = t->id; DB* db = t->state->test->db_; int counter = 0; fprintf(stderr, "... starting thread %d\n", id); Random rnd(1000 + id); std::string value; char valbuf[1500]; while (!t->state->stop.load(std::memory_order_acquire)) { t->state->counter[id].store(counter, std::memory_order_release); int key = rnd.Uniform(kNumKeys); char keybuf[20]; snprintf(keybuf, sizeof(keybuf), "%016d", key); if (rnd.OneIn(2)) { // Write values of the form <key, my id, counter>. // We add some padding for force compactions. snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id, static_cast<int>(counter)); ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf))); } else { // Read a value and verify that it matches the pattern written above. Status s = db->Get(ReadOptions(), Slice(keybuf), &value); if (s.IsNotFound()) { // Key has not yet been written } else { // Check that the writer thread counter is >= the counter in the value ASSERT_OK(s); int k, w, c; ASSERT_EQ(3, sscanf(value.c_str(), "%d.%d.%d", &k, &w, &c)) << value; ASSERT_EQ(k, key); ASSERT_GE(w, 0); ASSERT_LT(w, kNumThreads); ASSERT_LE(c, t->state->counter[w].load(std::memory_order_acquire)); } } counter++; } t->state->thread_done[id].store(true, std::memory_order_release); fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter); } } // namespace TEST(DBTest, MultiThreaded) { do { // Initialize state MTState mt; mt.test = this; mt.stop.store(false, std::memory_order_release); for (int id = 0; id < kNumThreads; id++) { mt.counter[id].store(false, std::memory_order_release); mt.thread_done[id].store(false, std::memory_order_release); } // Start threads MTThread thread[kNumThreads]; for (int id = 0; id < kNumThreads; id++) { thread[id].state = &mt; thread[id].id = id; env_->StartThread(MTThreadBody, &thread[id]); } // Let them run for a while DelayMilliseconds(kTestSeconds * 1000); // Stop the threads and wait for them to finish mt.stop.store(true, std::memory_order_release); for (int id = 0; id < kNumThreads; id++) { while (!mt.thread_done[id].load(std::memory_order_acquire)) { DelayMilliseconds(100); } } } while (ChangeOptions()); } namespace { typedef std::map<std::string, std::string> KVMap; } class ModelDB : public DB { public: class ModelSnapshot : public Snapshot { public: KVMap map_; }; explicit ModelDB(const Options& options) : options_(options) {} ~ModelDB() override = default; Status Put(const WriteOptions& o, const Slice& k, const Slice& v) override { return DB::Put(o, k, v); } Status Delete(const WriteOptions& o, const Slice& key) override { return DB::Delete(o, key); } Status Get(const ReadOptions& options, const Slice& key, std::string* value) override { assert(false); // Not implemented return Status::NotFound(key); } Iterator* NewIterator(const ReadOptions& options) override { if (options.snapshot == nullptr) { KVMap* saved = new KVMap; *saved = map_; return new ModelIter(saved, true); } else { const KVMap* snapshot_state = &(reinterpret_cast<const ModelSnapshot*>(options.snapshot)->map_); return new ModelIter(snapshot_state, false); } } const Snapshot* GetSnapshot() override { ModelSnapshot* snapshot = new ModelSnapshot; snapshot->map_ = map_; return snapshot; } void ReleaseSnapshot(const Snapshot* snapshot) override { delete reinterpret_cast<const ModelSnapshot*>(snapshot); } Status Write(const WriteOptions& options, WriteBatch* batch) override { class Handler : public WriteBatch::Handler { public: KVMap* map_; void Put(const Slice& key, const Slice& value) override { (*map_)[key.ToString()] = value.ToString(); } void Delete(const Slice& key) override { map_->erase(key.ToString()); } }; Handler handler; handler.map_ = &map_; return batch->Iterate(&handler); } bool GetProperty(const Slice& property, std::string* value) override { return false; } void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) override { for (int i = 0; i < n; i++) { sizes[i] = 0; } } void CompactRange(const Slice* start, const Slice* end) override {} private: class ModelIter : public Iterator { public: ModelIter(const KVMap* map, bool owned) : map_(map), owned_(owned), iter_(map_->end()) {} ~ModelIter() override { if (owned_) delete map_; } bool Valid() const override { return iter_ != map_->end(); } void SeekToFirst() override { iter_ = map_->begin(); } void SeekToLast() override { if (map_->empty()) { iter_ = map_->end(); } else { iter_ = map_->find(map_->rbegin()->first); } } void Seek(const Slice& k) override { iter_ = map_->lower_bound(k.ToString()); } void Next() override { ++iter_; } void Prev() override { --iter_; } Slice key() const override { return iter_->first; } Slice value() const override { return iter_->second; } Status status() const override { return Status::OK(); } private: const KVMap* const map_; const bool owned_; // Do we own map_ KVMap::const_iterator iter_; }; const Options options_; KVMap map_; }; static bool CompareIterators(int step, DB* model, DB* db, const Snapshot* model_snap, const Snapshot* db_snap) { ReadOptions options; options.snapshot = model_snap; Iterator* miter = model->NewIterator(options); options.snapshot = db_snap; Iterator* dbiter = db->NewIterator(options); bool ok = true; int count = 0; for (miter->SeekToFirst(), dbiter->SeekToFirst(); ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) { count++; if (miter->key().compare(dbiter->key()) != 0) { fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step, EscapeString(miter->key()).c_str(), EscapeString(dbiter->key()).c_str()); ok = false; break; } if (miter->value().compare(dbiter->value()) != 0) { fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n", step, EscapeString(miter->key()).c_str(), EscapeString(miter->value()).c_str(), EscapeString(miter->value()).c_str()); ok = false; } } if (ok) { if (miter->Valid() != dbiter->Valid()) { fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n", step, miter->Valid(), dbiter->Valid()); ok = false; } } fprintf(stderr, "%d entries compared: ok=%d\n", count, ok); delete miter; delete dbiter; return ok; } TEST(DBTest, Randomized) { Random rnd(test::RandomSeed()); do { ModelDB model(CurrentOptions()); const int N = 10000; const Snapshot* model_snap = nullptr; const Snapshot* db_snap = nullptr; std::string k, v; for (int step = 0; step < N; step++) { if (step % 100 == 0) { fprintf(stderr, "Step %d of %d\n", step, N); } // TODO(sanjay): Test Get() works int p = rnd.Uniform(100); if (p < 45) { // Put k = RandomKey(&rnd); v = RandomString( &rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8)); ASSERT_OK(model.Put(WriteOptions(), k, v)); ASSERT_OK(db_->Put(WriteOptions(), k, v)); } else if (p < 90) { // Delete k = RandomKey(&rnd); ASSERT_OK(model.Delete(WriteOptions(), k)); ASSERT_OK(db_->Delete(WriteOptions(), k)); } else { // Multi-element batch WriteBatch b; const int num = rnd.Uniform(8); for (int i = 0; i < num; i++) { if (i == 0 || !rnd.OneIn(10)) { k = RandomKey(&rnd); } else { // Periodically re-use the same key from the previous iter, so // we have multiple entries in the write batch for the same key } if (rnd.OneIn(2)) { v = RandomString(&rnd, rnd.Uniform(10)); b.Put(k, v); } else { b.Delete(k); } } ASSERT_OK(model.Write(WriteOptions(), &b)); ASSERT_OK(db_->Write(WriteOptions(), &b)); } if ((step % 100) == 0) { ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr)); ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap)); // Save a snapshot from each DB this time that we'll use next // time we compare things, to make sure the current state is // preserved with the snapshot if (model_snap != nullptr) model.ReleaseSnapshot(model_snap); if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap); Reopen(); ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr)); model_snap = model.GetSnapshot(); db_snap = db_->GetSnapshot(); } } if (model_snap != nullptr) model.ReleaseSnapshot(model_snap); if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap); } while (ChangeOptions()); } std::string MakeKey(unsigned int num) { char buf[30]; snprintf(buf, sizeof(buf), "%016u", num); return std::string(buf); } void BM_LogAndApply(int iters, int num_base_files) { std::string dbname = test::TmpDir() + "/leveldb_test_benchmark"; DestroyDB(dbname, Options()); DB* db = nullptr; Options opts; opts.create_if_missing = true; Status s = DB::Open(opts, dbname, &db); ASSERT_OK(s); ASSERT_TRUE(db != nullptr); delete db; db = nullptr; Env* env = Env::Default(); port::Mutex mu; MutexLock l(&mu); InternalKeyComparator cmp(BytewiseComparator()); Options options; VersionSet vset(dbname, &options, nullptr, &cmp); bool save_manifest; ASSERT_OK(vset.Recover(&save_manifest)); VersionEdit vbase; uint64_t fnum = 1; for (int i = 0; i < num_base_files; i++) { InternalKey start(MakeKey(2 * fnum), 1, kTypeValue); InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion); vbase.AddFile(2, fnum++, 1 /* file size */, start, limit); } ASSERT_OK(vset.LogAndApply(&vbase, &mu)); uint64_t start_micros = env->NowMicros(); for (int i = 0; i < iters; i++) { VersionEdit vedit; vedit.DeleteFile(2, fnum); InternalKey start(MakeKey(2 * fnum), 1, kTypeValue); InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion); vedit.AddFile(2, fnum++, 1 /* file size */, start, limit); vset.LogAndApply(&vedit, &mu); } uint64_t stop_micros = env->NowMicros(); unsigned int us = stop_micros - start_micros; char buf[16]; snprintf(buf, sizeof(buf), "%d", num_base_files); fprintf(stderr, "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n", buf, iters, us, ((float)us) / iters); } } // namespace leveldb int main(int argc, char** argv) { if (argc > 1 && std::string(argv[1]) == "--benchmark") { leveldb::BM_LogAndApply(1000, 1); leveldb::BM_LogAndApply(1000, 100); leveldb::BM_LogAndApply(1000, 10000); leveldb::BM_LogAndApply(100, 100000); return 0; } return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/builder.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_DB_BUILDER_H_ #define STORAGE_LEVELDB_DB_BUILDER_H_ #include "leveldb/status.h" namespace leveldb { struct Options; struct FileMetaData; class Env; class Iterator; class TableCache; class VersionEdit; // Build a Table file from the contents of *iter. The generated file // will be named according to meta->number. On success, the rest of // *meta will be filled with metadata about the generated table. // If no data is present in *iter, meta->file_size will be set to // zero, and no Table file will be produced. Status BuildTable(const std::string& dbname, Env* env, const Options& options, TableCache* table_cache, Iterator* iter, FileMetaData* meta); } // namespace leveldb #endif // STORAGE_LEVELDB_DB_BUILDER_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/version_set.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/version_set.h" #include <stdio.h> #include <algorithm> #include "db/filename.h" #include "db/log_reader.h" #include "db/log_writer.h" #include "db/memtable.h" #include "db/table_cache.h" #include "leveldb/env.h" #include "leveldb/table_builder.h" #include "table/merger.h" #include "table/two_level_iterator.h" #include "util/coding.h" #include "util/logging.h" namespace leveldb { static size_t TargetFileSize(const Options* options) { return options->max_file_size; } // Maximum bytes of overlaps in grandparent (i.e., level+2) before we // stop building a single file in a level->level+1 compaction. static int64_t MaxGrandParentOverlapBytes(const Options* options) { return 10 * TargetFileSize(options); } // Maximum number of bytes in all compacted files. We avoid expanding // the lower level file set of a compaction if it would make the // total compaction cover more than this many bytes. static int64_t ExpandedCompactionByteSizeLimit(const Options* options) { return 25 * TargetFileSize(options); } static double MaxBytesForLevel(const Options* options, int level) { // Note: the result for level zero is not really used since we set // the level-0 compaction threshold based on number of files. // Result for both level-0 and level-1 double result = 10. * 1048576.0; while (level > 1) { result *= 10; level--; } return result; } static uint64_t MaxFileSizeForLevel(const Options* options, int level) { // We could vary per level to reduce number of files? return TargetFileSize(options); } static int64_t TotalFileSize(const std::vector<FileMetaData*>& files) { int64_t sum = 0; for (size_t i = 0; i < files.size(); i++) { sum += files[i]->file_size; } return sum; } Version::~Version() { assert(refs_ == 0); // Remove from linked list prev_->next_ = next_; next_->prev_ = prev_; // Drop references to files for (int level = 0; level < config::kNumLevels; level++) { for (size_t i = 0; i < files_[level].size(); i++) { FileMetaData* f = files_[level][i]; assert(f->refs > 0); f->refs--; if (f->refs <= 0) { delete f; } } } } int FindFile(const InternalKeyComparator& icmp, const std::vector<FileMetaData*>& files, const Slice& key) { uint32_t left = 0; uint32_t right = files.size(); while (left < right) { uint32_t mid = (left + right) / 2; const FileMetaData* f = files[mid]; if (icmp.InternalKeyComparator::Compare(f->largest.Encode(), key) < 0) { // Key at "mid.largest" is < "target". Therefore all // files at or before "mid" are uninteresting. left = mid + 1; } else { // Key at "mid.largest" is >= "target". Therefore all files // after "mid" are uninteresting. right = mid; } } return right; } static bool AfterFile(const Comparator* ucmp, const Slice* user_key, const FileMetaData* f) { // null user_key occurs before all keys and is therefore never after *f return (user_key != nullptr && ucmp->Compare(*user_key, f->largest.user_key()) > 0); } static bool BeforeFile(const Comparator* ucmp, const Slice* user_key, const FileMetaData* f) { // null user_key occurs after all keys and is therefore never before *f return (user_key != nullptr && ucmp->Compare(*user_key, f->smallest.user_key()) < 0); } bool SomeFileOverlapsRange(const InternalKeyComparator& icmp, bool disjoint_sorted_files, const std::vector<FileMetaData*>& files, const Slice* smallest_user_key, const Slice* largest_user_key) { const Comparator* ucmp = icmp.user_comparator(); if (!disjoint_sorted_files) { // Need to check against all files for (size_t i = 0; i < files.size(); i++) { const FileMetaData* f = files[i]; if (AfterFile(ucmp, smallest_user_key, f) || BeforeFile(ucmp, largest_user_key, f)) { // No overlap } else { return true; // Overlap } } return false; } // Binary search over file list uint32_t index = 0; if (smallest_user_key != nullptr) { // Find the earliest possible internal key for smallest_user_key InternalKey small_key(*smallest_user_key, kMaxSequenceNumber, kValueTypeForSeek); index = FindFile(icmp, files, small_key.Encode()); } if (index >= files.size()) { // beginning of range is after all files, so no overlap. return false; } return !BeforeFile(ucmp, largest_user_key, files[index]); } // An internal iterator. For a given version/level pair, yields // information about the files in the level. For a given entry, key() // is the largest key that occurs in the file, and value() is an // 16-byte value containing the file number and file size, both // encoded using EncodeFixed64. class Version::LevelFileNumIterator : public Iterator { public: LevelFileNumIterator(const InternalKeyComparator& icmp, const std::vector<FileMetaData*>* flist) : icmp_(icmp), flist_(flist), index_(flist->size()) { // Marks as invalid } bool Valid() const override { return index_ < flist_->size(); } void Seek(const Slice& target) override { index_ = FindFile(icmp_, *flist_, target); } void SeekToFirst() override { index_ = 0; } void SeekToLast() override { index_ = flist_->empty() ? 0 : flist_->size() - 1; } void Next() override { assert(Valid()); index_++; } void Prev() override { assert(Valid()); if (index_ == 0) { index_ = flist_->size(); // Marks as invalid } else { index_--; } } Slice key() const override { assert(Valid()); return (*flist_)[index_]->largest.Encode(); } Slice value() const override { assert(Valid()); EncodeFixed64(value_buf_, (*flist_)[index_]->number); EncodeFixed64(value_buf_ + 8, (*flist_)[index_]->file_size); return Slice(value_buf_, sizeof(value_buf_)); } Status status() const override { return Status::OK(); } private: const InternalKeyComparator icmp_; const std::vector<FileMetaData*>* const flist_; uint32_t index_; // Backing store for value(). Holds the file number and size. mutable char value_buf_[16]; }; static Iterator* GetFileIterator(void* arg, const ReadOptions& options, const Slice& file_value) { TableCache* cache = reinterpret_cast<TableCache*>(arg); if (file_value.size() != 16) { return NewErrorIterator( Status::Corruption("FileReader invoked with unexpected value")); } else { return cache->NewIterator(options, DecodeFixed64(file_value.data()), DecodeFixed64(file_value.data() + 8)); } } Iterator* Version::NewConcatenatingIterator(const ReadOptions& options, int level) const { return NewTwoLevelIterator( new LevelFileNumIterator(vset_->icmp_, &files_[level]), &GetFileIterator, vset_->table_cache_, options); } void Version::AddIterators(const ReadOptions& options, std::vector<Iterator*>* iters) { // Merge all level zero files together since they may overlap for (size_t i = 0; i < files_[0].size(); i++) { iters->push_back(vset_->table_cache_->NewIterator( options, files_[0][i]->number, files_[0][i]->file_size)); } // For levels > 0, we can use a concatenating iterator that sequentially // walks through the non-overlapping files in the level, opening them // lazily. for (int level = 1; level < config::kNumLevels; level++) { if (!files_[level].empty()) { iters->push_back(NewConcatenatingIterator(options, level)); } } } // Callback from TableCache::Get() namespace { enum SaverState { kNotFound, kFound, kDeleted, kCorrupt, }; struct Saver { SaverState state; const Comparator* ucmp; Slice user_key; std::string* value; }; } // namespace static void SaveValue(void* arg, const Slice& ikey, const Slice& v) { Saver* s = reinterpret_cast<Saver*>(arg); ParsedInternalKey parsed_key; if (!ParseInternalKey(ikey, &parsed_key)) { s->state = kCorrupt; } else { if (s->ucmp->Compare(parsed_key.user_key, s->user_key) == 0) { s->state = (parsed_key.type == kTypeValue) ? kFound : kDeleted; if (s->state == kFound) { s->value->assign(v.data(), v.size()); } } } } static bool NewestFirst(FileMetaData* a, FileMetaData* b) { return a->number > b->number; } void Version::ForEachOverlapping(Slice user_key, Slice internal_key, void* arg, bool (*func)(void*, int, FileMetaData*)) { const Comparator* ucmp = vset_->icmp_.user_comparator(); // Search level-0 in order from newest to oldest. std::vector<FileMetaData*> tmp; tmp.reserve(files_[0].size()); for (uint32_t i = 0; i < files_[0].size(); i++) { FileMetaData* f = files_[0][i]; if (ucmp->Compare(user_key, f->smallest.user_key()) >= 0 && ucmp->Compare(user_key, f->largest.user_key()) <= 0) { tmp.push_back(f); } } if (!tmp.empty()) { std::sort(tmp.begin(), tmp.end(), NewestFirst); for (uint32_t i = 0; i < tmp.size(); i++) { if (!(*func)(arg, 0, tmp[i])) { return; } } } // Search other levels. for (int level = 1; level < config::kNumLevels; level++) { size_t num_files = files_[level].size(); if (num_files == 0) continue; // Binary search to find earliest index whose largest key >= internal_key. uint32_t index = FindFile(vset_->icmp_, files_[level], internal_key); if (index < num_files) { FileMetaData* f = files_[level][index]; if (ucmp->Compare(user_key, f->smallest.user_key()) < 0) { // All of "f" is past any data for user_key } else { if (!(*func)(arg, level, f)) { return; } } } } } Status Version::Get(const ReadOptions& options, const LookupKey& k, std::string* value, GetStats* stats) { stats->seek_file = nullptr; stats->seek_file_level = -1; struct State { Saver saver; GetStats* stats; const ReadOptions* options; Slice ikey; FileMetaData* last_file_read; int last_file_read_level; VersionSet* vset; Status s; bool found; static bool Match(void* arg, int level, FileMetaData* f) { State* state = reinterpret_cast<State*>(arg); if (state->stats->seek_file == nullptr && state->last_file_read != nullptr) { // We have had more than one seek for this read. Charge the 1st file. state->stats->seek_file = state->last_file_read; state->stats->seek_file_level = state->last_file_read_level; } state->last_file_read = f; state->last_file_read_level = level; state->s = state->vset->table_cache_->Get(*state->options, f->number, f->file_size, state->ikey, &state->saver, SaveValue); if (!state->s.ok()) { state->found = true; return false; } switch (state->saver.state) { case kNotFound: return true; // Keep searching in other files case kFound: state->found = true; return false; case kDeleted: return false; case kCorrupt: state->s = Status::Corruption("corrupted key for ", state->saver.user_key); state->found = true; return false; } // Not reached. Added to avoid false compilation warnings of // "control reaches end of non-void function". return false; } }; State state; state.found = false; state.stats = stats; state.last_file_read = nullptr; state.last_file_read_level = -1; state.options = &options; state.ikey = k.internal_key(); state.vset = vset_; state.saver.state = kNotFound; state.saver.ucmp = vset_->icmp_.user_comparator(); state.saver.user_key = k.user_key(); state.saver.value = value; ForEachOverlapping(state.saver.user_key, state.ikey, &state, &State::Match); return state.found ? state.s : Status::NotFound(Slice()); } bool Version::UpdateStats(const GetStats& stats) { FileMetaData* f = stats.seek_file; if (f != nullptr) { f->allowed_seeks--; if (f->allowed_seeks <= 0 && file_to_compact_ == nullptr) { file_to_compact_ = f; file_to_compact_level_ = stats.seek_file_level; return true; } } return false; } bool Version::RecordReadSample(Slice internal_key) { ParsedInternalKey ikey; if (!ParseInternalKey(internal_key, &ikey)) { return false; } struct State { GetStats stats; // Holds first matching file int matches; static bool Match(void* arg, int level, FileMetaData* f) { State* state = reinterpret_cast<State*>(arg); state->matches++; if (state->matches == 1) { // Remember first match. state->stats.seek_file = f; state->stats.seek_file_level = level; } // We can stop iterating once we have a second match. return state->matches < 2; } }; State state; state.matches = 0; ForEachOverlapping(ikey.user_key, internal_key, &state, &State::Match); // Must have at least two matches since we want to merge across // files. But what if we have a single file that contains many // overwrites and deletions? Should we have another mechanism for // finding such files? if (state.matches >= 2) { // 1MB cost is about 1 seek (see comment in Builder::Apply). return UpdateStats(state.stats); } return false; } void Version::Ref() { ++refs_; } void Version::Unref() { assert(this != &vset_->dummy_versions_); assert(refs_ >= 1); --refs_; if (refs_ == 0) { delete this; } } bool Version::OverlapInLevel(int level, const Slice* smallest_user_key, const Slice* largest_user_key) { return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level], smallest_user_key, largest_user_key); } int Version::PickLevelForMemTableOutput(const Slice& smallest_user_key, const Slice& largest_user_key) { int level = 0; if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) { // Push to next level if there is no overlap in next level, // and the #bytes overlapping in the level after that are limited. InternalKey start(smallest_user_key, kMaxSequenceNumber, kValueTypeForSeek); InternalKey limit(largest_user_key, 0, static_cast<ValueType>(0)); std::vector<FileMetaData*> overlaps; while (level < config::kMaxMemCompactLevel) { if (OverlapInLevel(level + 1, &smallest_user_key, &largest_user_key)) { break; } if (level + 2 < config::kNumLevels) { // Check that file does not overlap too many grandparent bytes. GetOverlappingInputs(level + 2, &start, &limit, &overlaps); const int64_t sum = TotalFileSize(overlaps); if (sum > MaxGrandParentOverlapBytes(vset_->options_)) { break; } } level++; } } return level; } // Store in "*inputs" all files in "level" that overlap [begin,end] void Version::GetOverlappingInputs(int level, const InternalKey* begin, const InternalKey* end, std::vector<FileMetaData*>* inputs) { assert(level >= 0); assert(level < config::kNumLevels); inputs->clear(); Slice user_begin, user_end; if (begin != nullptr) { user_begin = begin->user_key(); } if (end != nullptr) { user_end = end->user_key(); } const Comparator* user_cmp = vset_->icmp_.user_comparator(); for (size_t i = 0; i < files_[level].size();) { FileMetaData* f = files_[level][i++]; const Slice file_start = f->smallest.user_key(); const Slice file_limit = f->largest.user_key(); if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) { // "f" is completely before specified range; skip it } else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) { // "f" is completely after specified range; skip it } else { inputs->push_back(f); if (level == 0) { // Level-0 files may overlap each other. So check if the newly // added file has expanded the range. If so, restart search. if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) { user_begin = file_start; inputs->clear(); i = 0; } else if (end != nullptr && user_cmp->Compare(file_limit, user_end) > 0) { user_end = file_limit; inputs->clear(); i = 0; } } } } } std::string Version::DebugString() const { std::string r; for (int level = 0; level < config::kNumLevels; level++) { // E.g., // --- level 1 --- // 17:123['a' .. 'd'] // 20:43['e' .. 'g'] r.append("--- level "); AppendNumberTo(&r, level); r.append(" ---\n"); const std::vector<FileMetaData*>& files = files_[level]; for (size_t i = 0; i < files.size(); i++) { r.push_back(' '); AppendNumberTo(&r, files[i]->number); r.push_back(':'); AppendNumberTo(&r, files[i]->file_size); r.append("["); r.append(files[i]->smallest.DebugString()); r.append(" .. "); r.append(files[i]->largest.DebugString()); r.append("]\n"); } } return r; } // A helper class so we can efficiently apply a whole sequence // of edits to a particular state without creating intermediate // Versions that contain full copies of the intermediate state. class VersionSet::Builder { private: // Helper to sort by v->files_[file_number].smallest struct BySmallestKey { const InternalKeyComparator* internal_comparator; bool operator()(FileMetaData* f1, FileMetaData* f2) const { int r = internal_comparator->Compare(f1->smallest, f2->smallest); if (r != 0) { return (r < 0); } else { // Break ties by file number return (f1->number < f2->number); } } }; typedef std::set<FileMetaData*, BySmallestKey> FileSet; struct LevelState { std::set<uint64_t> deleted_files; FileSet* added_files; }; VersionSet* vset_; Version* base_; LevelState levels_[config::kNumLevels]; public: // Initialize a builder with the files from *base and other info from *vset Builder(VersionSet* vset, Version* base) : vset_(vset), base_(base) { base_->Ref(); BySmallestKey cmp; cmp.internal_comparator = &vset_->icmp_; for (int level = 0; level < config::kNumLevels; level++) { levels_[level].added_files = new FileSet(cmp); } } ~Builder() { for (int level = 0; level < config::kNumLevels; level++) { const FileSet* added = levels_[level].added_files; std::vector<FileMetaData*> to_unref; to_unref.reserve(added->size()); for (FileSet::const_iterator it = added->begin(); it != added->end(); ++it) { to_unref.push_back(*it); } delete added; for (uint32_t i = 0; i < to_unref.size(); i++) { FileMetaData* f = to_unref[i]; f->refs--; if (f->refs <= 0) { delete f; } } } base_->Unref(); } // Apply all of the edits in *edit to the current state. void Apply(VersionEdit* edit) { // Update compaction pointers for (size_t i = 0; i < edit->compact_pointers_.size(); i++) { const int level = edit->compact_pointers_[i].first; vset_->compact_pointer_[level] = edit->compact_pointers_[i].second.Encode().ToString(); } // Delete files for (const auto& deleted_file_set_kvp : edit->deleted_files_) { const int level = deleted_file_set_kvp.first; const uint64_t number = deleted_file_set_kvp.second; levels_[level].deleted_files.insert(number); } // Add new files for (size_t i = 0; i < edit->new_files_.size(); i++) { const int level = edit->new_files_[i].first; FileMetaData* f = new FileMetaData(edit->new_files_[i].second); f->refs = 1; // We arrange to automatically compact this file after // a certain number of seeks. Let's assume: // (1) One seek costs 10ms // (2) Writing or reading 1MB costs 10ms (100MB/s) // (3) A compaction of 1MB does 25MB of IO: // 1MB read from this level // 10-12MB read from next level (boundaries may be misaligned) // 10-12MB written to next level // This implies that 25 seeks cost the same as the compaction // of 1MB of data. I.e., one seek costs approximately the // same as the compaction of 40KB of data. We are a little // conservative and allow approximately one seek for every 16KB // of data before triggering a compaction. f->allowed_seeks = static_cast<int>((f->file_size / 16384U)); if (f->allowed_seeks < 100) f->allowed_seeks = 100; levels_[level].deleted_files.erase(f->number); levels_[level].added_files->insert(f); } } // Save the current state in *v. void SaveTo(Version* v) { BySmallestKey cmp; cmp.internal_comparator = &vset_->icmp_; for (int level = 0; level < config::kNumLevels; level++) { // Merge the set of added files with the set of pre-existing files. // Drop any deleted files. Store the result in *v. const std::vector<FileMetaData*>& base_files = base_->files_[level]; std::vector<FileMetaData*>::const_iterator base_iter = base_files.begin(); std::vector<FileMetaData*>::const_iterator base_end = base_files.end(); const FileSet* added_files = levels_[level].added_files; v->files_[level].reserve(base_files.size() + added_files->size()); for (const auto& added_file : *added_files) { // Add all smaller files listed in base_ for (std::vector<FileMetaData*>::const_iterator bpos = std::upper_bound(base_iter, base_end, added_file, cmp); base_iter != bpos; ++base_iter) { MaybeAddFile(v, level, *base_iter); } MaybeAddFile(v, level, added_file); } // Add remaining base files for (; base_iter != base_end; ++base_iter) { MaybeAddFile(v, level, *base_iter); } #ifndef NDEBUG // Make sure there is no overlap in levels > 0 if (level > 0) { for (uint32_t i = 1; i < v->files_[level].size(); i++) { const InternalKey& prev_end = v->files_[level][i - 1]->largest; const InternalKey& this_begin = v->files_[level][i]->smallest; if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) { fprintf(stderr, "overlapping ranges in same level %s vs. %s\n", prev_end.DebugString().c_str(), this_begin.DebugString().c_str()); abort(); } } } #endif } } void MaybeAddFile(Version* v, int level, FileMetaData* f) { if (levels_[level].deleted_files.count(f->number) > 0) { // File is deleted: do nothing } else { std::vector<FileMetaData*>* files = &v->files_[level]; if (level > 0 && !files->empty()) { // Must not overlap assert(vset_->icmp_.Compare((*files)[files->size() - 1]->largest, f->smallest) < 0); } f->refs++; files->push_back(f); } } }; VersionSet::VersionSet(const std::string& dbname, const Options* options, TableCache* table_cache, const InternalKeyComparator* cmp) : env_(options->env), dbname_(dbname), options_(options), table_cache_(table_cache), icmp_(*cmp), next_file_number_(2), manifest_file_number_(0), // Filled by Recover() last_sequence_(0), log_number_(0), prev_log_number_(0), descriptor_file_(nullptr), descriptor_log_(nullptr), dummy_versions_(this), current_(nullptr) { AppendVersion(new Version(this)); } VersionSet::~VersionSet() { current_->Unref(); assert(dummy_versions_.next_ == &dummy_versions_); // List must be empty delete descriptor_log_; delete descriptor_file_; } void VersionSet::AppendVersion(Version* v) { // Make "v" current assert(v->refs_ == 0); assert(v != current_); if (current_ != nullptr) { current_->Unref(); } current_ = v; v->Ref(); // Append to linked list v->prev_ = dummy_versions_.prev_; v->next_ = &dummy_versions_; v->prev_->next_ = v; v->next_->prev_ = v; } Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { if (edit->has_log_number_) { assert(edit->log_number_ >= log_number_); assert(edit->log_number_ < next_file_number_); } else { edit->SetLogNumber(log_number_); } if (!edit->has_prev_log_number_) { edit->SetPrevLogNumber(prev_log_number_); } edit->SetNextFile(next_file_number_); edit->SetLastSequence(last_sequence_); Version* v = new Version(this); { Builder builder(this, current_); builder.Apply(edit); builder.SaveTo(v); } Finalize(v); // Initialize new descriptor log file if necessary by creating // a temporary file that contains a snapshot of the current version. std::string new_manifest_file; Status s; if (descriptor_log_ == nullptr) { // No reason to unlock *mu here since we only hit this path in the // first call to LogAndApply (when opening the database). assert(descriptor_file_ == nullptr); new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_); edit->SetNextFile(next_file_number_); s = env_->NewWritableFile(new_manifest_file, &descriptor_file_); if (s.ok()) { descriptor_log_ = new log::Writer(descriptor_file_); s = WriteSnapshot(descriptor_log_); } } // Unlock during expensive MANIFEST log write { mu->Unlock(); // Write new record to MANIFEST log if (s.ok()) { std::string record; edit->EncodeTo(&record); s = descriptor_log_->AddRecord(record); if (s.ok()) { s = descriptor_file_->Sync(); } if (!s.ok()) { Log(options_->info_log, "MANIFEST write: %s\n", s.ToString().c_str()); } } // If we just created a new descriptor file, install it by writing a // new CURRENT file that points to it. if (s.ok() && !new_manifest_file.empty()) { s = SetCurrentFile(env_, dbname_, manifest_file_number_); } mu->Lock(); } // Install the new version if (s.ok()) { AppendVersion(v); log_number_ = edit->log_number_; prev_log_number_ = edit->prev_log_number_; } else { delete v; if (!new_manifest_file.empty()) { delete descriptor_log_; delete descriptor_file_; descriptor_log_ = nullptr; descriptor_file_ = nullptr; env_->DeleteFile(new_manifest_file); } } return s; } Status VersionSet::Recover(bool* save_manifest) { struct LogReporter : public log::Reader::Reporter { Status* status; void Corruption(size_t bytes, const Status& s) override { if (this->status->ok()) *this->status = s; } }; // Read "CURRENT" file, which contains a pointer to the current manifest file std::string current; Status s = ReadFileToString(env_, CurrentFileName(dbname_), &current); if (!s.ok()) { return s; } if (current.empty() || current[current.size() - 1] != '\n') { return Status::Corruption("CURRENT file does not end with newline"); } current.resize(current.size() - 1); std::string dscname = dbname_ + "/" + current; SequentialFile* file; s = env_->NewSequentialFile(dscname, &file); if (!s.ok()) { if (s.IsNotFound()) { return Status::Corruption("CURRENT points to a non-existent file", s.ToString()); } return s; } bool have_log_number = false; bool have_prev_log_number = false; bool have_next_file = false; bool have_last_sequence = false; uint64_t next_file = 0; uint64_t last_sequence = 0; uint64_t log_number = 0; uint64_t prev_log_number = 0; Builder builder(this, current_); { LogReporter reporter; reporter.status = &s; log::Reader reader(file, &reporter, true /*checksum*/, 0 /*initial_offset*/); Slice record; std::string scratch; while (reader.ReadRecord(&record, &scratch) && s.ok()) { VersionEdit edit; s = edit.DecodeFrom(record); if (s.ok()) { if (edit.has_comparator_ && edit.comparator_ != icmp_.user_comparator()->Name()) { s = Status::InvalidArgument( edit.comparator_ + " does not match existing comparator ", icmp_.user_comparator()->Name()); } } if (s.ok()) { builder.Apply(&edit); } if (edit.has_log_number_) { log_number = edit.log_number_; have_log_number = true; } if (edit.has_prev_log_number_) { prev_log_number = edit.prev_log_number_; have_prev_log_number = true; } if (edit.has_next_file_number_) { next_file = edit.next_file_number_; have_next_file = true; } if (edit.has_last_sequence_) { last_sequence = edit.last_sequence_; have_last_sequence = true; } } } delete file; file = nullptr; if (s.ok()) { if (!have_next_file) { s = Status::Corruption("no meta-nextfile entry in descriptor"); } else if (!have_log_number) { s = Status::Corruption("no meta-lognumber entry in descriptor"); } else if (!have_last_sequence) { s = Status::Corruption("no last-sequence-number entry in descriptor"); } if (!have_prev_log_number) { prev_log_number = 0; } MarkFileNumberUsed(prev_log_number); MarkFileNumberUsed(log_number); } if (s.ok()) { Version* v = new Version(this); builder.SaveTo(v); // Install recovered version Finalize(v); AppendVersion(v); manifest_file_number_ = next_file; next_file_number_ = next_file + 1; last_sequence_ = last_sequence; log_number_ = log_number; prev_log_number_ = prev_log_number; // See if we can reuse the existing MANIFEST file. if (ReuseManifest(dscname, current)) { // No need to save new manifest } else { *save_manifest = true; } } return s; } bool VersionSet::ReuseManifest(const std::string& dscname, const std::string& dscbase) { if (!options_->reuse_logs) { return false; } FileType manifest_type; uint64_t manifest_number; uint64_t manifest_size; if (!ParseFileName(dscbase, &manifest_number, &manifest_type) || manifest_type != kDescriptorFile || !env_->GetFileSize(dscname, &manifest_size).ok() || // Make new compacted MANIFEST if old one is too big manifest_size >= TargetFileSize(options_)) { return false; } assert(descriptor_file_ == nullptr); assert(descriptor_log_ == nullptr); Status r = env_->NewAppendableFile(dscname, &descriptor_file_); if (!r.ok()) { Log(options_->info_log, "Reuse MANIFEST: %s\n", r.ToString().c_str()); assert(descriptor_file_ == nullptr); return false; } Log(options_->info_log, "Reusing MANIFEST %s\n", dscname.c_str()); descriptor_log_ = new log::Writer(descriptor_file_, manifest_size); manifest_file_number_ = manifest_number; return true; } void VersionSet::MarkFileNumberUsed(uint64_t number) { if (next_file_number_ <= number) { next_file_number_ = number + 1; } } void VersionSet::Finalize(Version* v) { // Precomputed best level for next compaction int best_level = -1; double best_score = -1; for (int level = 0; level < config::kNumLevels - 1; level++) { double score; if (level == 0) { // We treat level-0 specially by bounding the number of files // instead of number of bytes for two reasons: // // (1) With larger write-buffer sizes, it is nice not to do too // many level-0 compactions. // // (2) The files in level-0 are merged on every read and // therefore we wish to avoid too many files when the individual // file size is small (perhaps because of a small write-buffer // setting, or very high compression ratios, or lots of // overwrites/deletions). score = v->files_[level].size() / static_cast<double>(config::kL0_CompactionTrigger); } else { // Compute the ratio of current size to size limit. const uint64_t level_bytes = TotalFileSize(v->files_[level]); score = static_cast<double>(level_bytes) / MaxBytesForLevel(options_, level); } if (score > best_score) { best_level = level; best_score = score; } } v->compaction_level_ = best_level; v->compaction_score_ = best_score; } Status VersionSet::WriteSnapshot(log::Writer* log) { // TODO: Break up into multiple records to reduce memory usage on recovery? // Save metadata VersionEdit edit; edit.SetComparatorName(icmp_.user_comparator()->Name()); // Save compaction pointers for (int level = 0; level < config::kNumLevels; level++) { if (!compact_pointer_[level].empty()) { InternalKey key; key.DecodeFrom(compact_pointer_[level]); edit.SetCompactPointer(level, key); } } // Save files for (int level = 0; level < config::kNumLevels; level++) { const std::vector<FileMetaData*>& files = current_->files_[level]; for (size_t i = 0; i < files.size(); i++) { const FileMetaData* f = files[i]; edit.AddFile(level, f->number, f->file_size, f->smallest, f->largest); } } std::string record; edit.EncodeTo(&record); return log->AddRecord(record); } int VersionSet::NumLevelFiles(int level) const { assert(level >= 0); assert(level < config::kNumLevels); return current_->files_[level].size(); } const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const { // Update code if kNumLevels changes static_assert(config::kNumLevels == 7, ""); snprintf(scratch->buffer, sizeof(scratch->buffer), "files[ %d %d %d %d %d %d %d ]", int(current_->files_[0].size()), int(current_->files_[1].size()), int(current_->files_[2].size()), int(current_->files_[3].size()), int(current_->files_[4].size()), int(current_->files_[5].size()), int(current_->files_[6].size())); return scratch->buffer; } uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) { uint64_t result = 0; for (int level = 0; level < config::kNumLevels; level++) { const std::vector<FileMetaData*>& files = v->files_[level]; for (size_t i = 0; i < files.size(); i++) { if (icmp_.Compare(files[i]->largest, ikey) <= 0) { // Entire file is before "ikey", so just add the file size result += files[i]->file_size; } else if (icmp_.Compare(files[i]->smallest, ikey) > 0) { // Entire file is after "ikey", so ignore if (level > 0) { // Files other than level 0 are sorted by meta->smallest, so // no further files in this level will contain data for // "ikey". break; } } else { // "ikey" falls in the range for this table. Add the // approximate offset of "ikey" within the table. Table* tableptr; Iterator* iter = table_cache_->NewIterator( ReadOptions(), files[i]->number, files[i]->file_size, &tableptr); if (tableptr != nullptr) { result += tableptr->ApproximateOffsetOf(ikey.Encode()); } delete iter; } } } return result; } void VersionSet::AddLiveFiles(std::set<uint64_t>* live) { for (Version* v = dummy_versions_.next_; v != &dummy_versions_; v = v->next_) { for (int level = 0; level < config::kNumLevels; level++) { const std::vector<FileMetaData*>& files = v->files_[level]; for (size_t i = 0; i < files.size(); i++) { live->insert(files[i]->number); } } } } int64_t VersionSet::NumLevelBytes(int level) const { assert(level >= 0); assert(level < config::kNumLevels); return TotalFileSize(current_->files_[level]); } int64_t VersionSet::MaxNextLevelOverlappingBytes() { int64_t result = 0; std::vector<FileMetaData*> overlaps; for (int level = 1; level < config::kNumLevels - 1; level++) { for (size_t i = 0; i < current_->files_[level].size(); i++) { const FileMetaData* f = current_->files_[level][i]; current_->GetOverlappingInputs(level + 1, &f->smallest, &f->largest, &overlaps); const int64_t sum = TotalFileSize(overlaps); if (sum > result) { result = sum; } } } return result; } // Stores the minimal range that covers all entries in inputs in // *smallest, *largest. // REQUIRES: inputs is not empty void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs, InternalKey* smallest, InternalKey* largest) { assert(!inputs.empty()); smallest->Clear(); largest->Clear(); for (size_t i = 0; i < inputs.size(); i++) { FileMetaData* f = inputs[i]; if (i == 0) { *smallest = f->smallest; *largest = f->largest; } else { if (icmp_.Compare(f->smallest, *smallest) < 0) { *smallest = f->smallest; } if (icmp_.Compare(f->largest, *largest) > 0) { *largest = f->largest; } } } } // Stores the minimal range that covers all entries in inputs1 and inputs2 // in *smallest, *largest. // REQUIRES: inputs is not empty void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1, const std::vector<FileMetaData*>& inputs2, InternalKey* smallest, InternalKey* largest) { std::vector<FileMetaData*> all = inputs1; all.insert(all.end(), inputs2.begin(), inputs2.end()); GetRange(all, smallest, largest); } Iterator* VersionSet::MakeInputIterator(Compaction* c) { ReadOptions options; options.verify_checksums = options_->paranoid_checks; options.fill_cache = false; // Level-0 files have to be merged together. For other levels, // we will make a concatenating iterator per level. // TODO(opt): use concatenating iterator for level-0 if there is no overlap const int space = (c->level() == 0 ? c->inputs_[0].size() + 1 : 2); Iterator** list = new Iterator*[space]; int num = 0; for (int which = 0; which < 2; which++) { if (!c->inputs_[which].empty()) { if (c->level() + which == 0) { const std::vector<FileMetaData*>& files = c->inputs_[which]; for (size_t i = 0; i < files.size(); i++) { list[num++] = table_cache_->NewIterator(options, files[i]->number, files[i]->file_size); } } else { // Create concatenating iterator for the files from this level list[num++] = NewTwoLevelIterator( new Version::LevelFileNumIterator(icmp_, &c->inputs_[which]), &GetFileIterator, table_cache_, options); } } } assert(num <= space); Iterator* result = NewMergingIterator(&icmp_, list, num); delete[] list; return result; } Compaction* VersionSet::PickCompaction() { Compaction* c; int level; // We prefer compactions triggered by too much data in a level over // the compactions triggered by seeks. const bool size_compaction = (current_->compaction_score_ >= 1); const bool seek_compaction = (current_->file_to_compact_ != nullptr); if (size_compaction) { level = current_->compaction_level_; assert(level >= 0); assert(level + 1 < config::kNumLevels); c = new Compaction(options_, level); // Pick the first file that comes after compact_pointer_[level] for (size_t i = 0; i < current_->files_[level].size(); i++) { FileMetaData* f = current_->files_[level][i]; if (compact_pointer_[level].empty() || icmp_.Compare(f->largest.Encode(), compact_pointer_[level]) > 0) { c->inputs_[0].push_back(f); break; } } if (c->inputs_[0].empty()) { // Wrap-around to the beginning of the key space c->inputs_[0].push_back(current_->files_[level][0]); } } else if (seek_compaction) { level = current_->file_to_compact_level_; c = new Compaction(options_, level); c->inputs_[0].push_back(current_->file_to_compact_); } else { return nullptr; } c->input_version_ = current_; c->input_version_->Ref(); // Files in level 0 may overlap each other, so pick up all overlapping ones if (level == 0) { InternalKey smallest, largest; GetRange(c->inputs_[0], &smallest, &largest); // Note that the next call will discard the file we placed in // c->inputs_[0] earlier and replace it with an overlapping set // which will include the picked file. current_->GetOverlappingInputs(0, &smallest, &largest, &c->inputs_[0]); assert(!c->inputs_[0].empty()); } SetupOtherInputs(c); return c; } // Finds the largest key in a vector of files. Returns true if files it not // empty. bool FindLargestKey(const InternalKeyComparator& icmp, const std::vector<FileMetaData*>& files, InternalKey* largest_key) { if (files.empty()) { return false; } *largest_key = files[0]->largest; for (size_t i = 1; i < files.size(); ++i) { FileMetaData* f = files[i]; if (icmp.Compare(f->largest, *largest_key) > 0) { *largest_key = f->largest; } } return true; } // Finds minimum file b2=(l2, u2) in level file for which l2 > u1 and // user_key(l2) = user_key(u1) FileMetaData* FindSmallestBoundaryFile( const InternalKeyComparator& icmp, const std::vector<FileMetaData*>& level_files, const InternalKey& largest_key) { const Comparator* user_cmp = icmp.user_comparator(); FileMetaData* smallest_boundary_file = nullptr; for (size_t i = 0; i < level_files.size(); ++i) { FileMetaData* f = level_files[i]; if (icmp.Compare(f->smallest, largest_key) > 0 && user_cmp->Compare(f->smallest.user_key(), largest_key.user_key()) == 0) { if (smallest_boundary_file == nullptr || icmp.Compare(f->smallest, smallest_boundary_file->smallest) < 0) { smallest_boundary_file = f; } } } return smallest_boundary_file; } // Extracts the largest file b1 from |compaction_files| and then searches for a // b2 in |level_files| for which user_key(u1) = user_key(l2). If it finds such a // file b2 (known as a boundary file) it adds it to |compaction_files| and then // searches again using this new upper bound. // // If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and // user_key(u1) = user_key(l2), and if we compact b1 but not b2 then a // subsequent get operation will yield an incorrect result because it will // return the record from b2 in level i rather than from b1 because it searches // level by level for records matching the supplied user key. // // parameters: // in level_files: List of files to search for boundary files. // in/out compaction_files: List of files to extend by adding boundary files. void AddBoundaryInputs(const InternalKeyComparator& icmp, const std::vector<FileMetaData*>& level_files, std::vector<FileMetaData*>* compaction_files) { InternalKey largest_key; // Quick return if compaction_files is empty. if (!FindLargestKey(icmp, *compaction_files, &largest_key)) { return; } bool continue_searching = true; while (continue_searching) { FileMetaData* smallest_boundary_file = FindSmallestBoundaryFile(icmp, level_files, largest_key); // If a boundary file was found advance largest_key, otherwise we're done. if (smallest_boundary_file != NULL) { compaction_files->push_back(smallest_boundary_file); largest_key = smallest_boundary_file->largest; } else { continue_searching = false; } } } void VersionSet::SetupOtherInputs(Compaction* c) { const int level = c->level(); InternalKey smallest, largest; AddBoundaryInputs(icmp_, current_->files_[level], &c->inputs_[0]); GetRange(c->inputs_[0], &smallest, &largest); current_->GetOverlappingInputs(level + 1, &smallest, &largest, &c->inputs_[1]); // Get entire range covered by compaction InternalKey all_start, all_limit; GetRange2(c->inputs_[0], c->inputs_[1], &all_start, &all_limit); // See if we can grow the number of inputs in "level" without // changing the number of "level+1" files we pick up. if (!c->inputs_[1].empty()) { std::vector<FileMetaData*> expanded0; current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0); AddBoundaryInputs(icmp_, current_->files_[level], &expanded0); const int64_t inputs0_size = TotalFileSize(c->inputs_[0]); const int64_t inputs1_size = TotalFileSize(c->inputs_[1]); const int64_t expanded0_size = TotalFileSize(expanded0); if (expanded0.size() > c->inputs_[0].size() && inputs1_size + expanded0_size < ExpandedCompactionByteSizeLimit(options_)) { InternalKey new_start, new_limit; GetRange(expanded0, &new_start, &new_limit); std::vector<FileMetaData*> expanded1; current_->GetOverlappingInputs(level + 1, &new_start, &new_limit, &expanded1); if (expanded1.size() == c->inputs_[1].size()) { Log(options_->info_log, "Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n", level, int(c->inputs_[0].size()), int(c->inputs_[1].size()), long(inputs0_size), long(inputs1_size), int(expanded0.size()), int(expanded1.size()), long(expanded0_size), long(inputs1_size)); smallest = new_start; largest = new_limit; c->inputs_[0] = expanded0; c->inputs_[1] = expanded1; GetRange2(c->inputs_[0], c->inputs_[1], &all_start, &all_limit); } } } // Compute the set of grandparent files that overlap this compaction // (parent == level+1; grandparent == level+2) if (level + 2 < config::kNumLevels) { current_->GetOverlappingInputs(level + 2, &all_start, &all_limit, &c->grandparents_); } // Update the place where we will do the next compaction for this level. // We update this immediately instead of waiting for the VersionEdit // to be applied so that if the compaction fails, we will try a different // key range next time. compact_pointer_[level] = largest.Encode().ToString(); c->edit_.SetCompactPointer(level, largest); } Compaction* VersionSet::CompactRange(int level, const InternalKey* begin, const InternalKey* end) { std::vector<FileMetaData*> inputs; current_->GetOverlappingInputs(level, begin, end, &inputs); if (inputs.empty()) { return nullptr; } // Avoid compacting too much in one shot in case the range is large. // But we cannot do this for level-0 since level-0 files can overlap // and we must not pick one file and drop another older file if the // two files overlap. if (level > 0) { const uint64_t limit = MaxFileSizeForLevel(options_, level); uint64_t total = 0; for (size_t i = 0; i < inputs.size(); i++) { uint64_t s = inputs[i]->file_size; total += s; if (total >= limit) { inputs.resize(i + 1); break; } } } Compaction* c = new Compaction(options_, level); c->input_version_ = current_; c->input_version_->Ref(); c->inputs_[0] = inputs; SetupOtherInputs(c); return c; } Compaction::Compaction(const Options* options, int level) : level_(level), max_output_file_size_(MaxFileSizeForLevel(options, level)), input_version_(nullptr), grandparent_index_(0), seen_key_(false), overlapped_bytes_(0) { for (int i = 0; i < config::kNumLevels; i++) { level_ptrs_[i] = 0; } } Compaction::~Compaction() { if (input_version_ != nullptr) { input_version_->Unref(); } } bool Compaction::IsTrivialMove() const { const VersionSet* vset = input_version_->vset_; // Avoid a move if there is lots of overlapping grandparent data. // Otherwise, the move could create a parent file that will require // a very expensive merge later on. return (num_input_files(0) == 1 && num_input_files(1) == 0 && TotalFileSize(grandparents_) <= MaxGrandParentOverlapBytes(vset->options_)); } void Compaction::AddInputDeletions(VersionEdit* edit) { for (int which = 0; which < 2; which++) { for (size_t i = 0; i < inputs_[which].size(); i++) { edit->DeleteFile(level_ + which, inputs_[which][i]->number); } } } bool Compaction::IsBaseLevelForKey(const Slice& user_key) { // Maybe use binary search to find right entry instead of linear search? const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator(); for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) { const std::vector<FileMetaData*>& files = input_version_->files_[lvl]; while (level_ptrs_[lvl] < files.size()) { FileMetaData* f = files[level_ptrs_[lvl]]; if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) { // We've advanced far enough if (user_cmp->Compare(user_key, f->smallest.user_key()) >= 0) { // Key falls in this file's range, so definitely not base level return false; } break; } level_ptrs_[lvl]++; } } return true; } bool Compaction::ShouldStopBefore(const Slice& internal_key) { const VersionSet* vset = input_version_->vset_; // Scan to find earliest grandparent file that contains key. const InternalKeyComparator* icmp = &vset->icmp_; while (grandparent_index_ < grandparents_.size() && icmp->Compare(internal_key, grandparents_[grandparent_index_]->largest.Encode()) > 0) { if (seen_key_) { overlapped_bytes_ += grandparents_[grandparent_index_]->file_size; } grandparent_index_++; } seen_key_ = true; if (overlapped_bytes_ > MaxGrandParentOverlapBytes(vset->options_)) { // Too much overlap for current output; start new output overlapped_bytes_ = 0; return true; } else { return false; } } void Compaction::ReleaseInputs() { if (input_version_ != nullptr) { input_version_->Unref(); input_version_ = nullptr; } } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/dumpfile.cc
// Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "leveldb/dumpfile.h" #include <stdio.h> #include "db/dbformat.h" #include "db/filename.h" #include "db/log_reader.h" #include "db/version_edit.h" #include "db/write_batch_internal.h" #include "leveldb/env.h" #include "leveldb/iterator.h" #include "leveldb/options.h" #include "leveldb/status.h" #include "leveldb/table.h" #include "leveldb/write_batch.h" #include "util/logging.h" namespace leveldb { namespace { bool GuessType(const std::string& fname, FileType* type) { size_t pos = fname.rfind('/'); std::string basename; if (pos == std::string::npos) { basename = fname; } else { basename = std::string(fname.data() + pos + 1, fname.size() - pos - 1); } uint64_t ignored; return ParseFileName(basename, &ignored, type); } // Notified when log reader encounters corruption. class CorruptionReporter : public log::Reader::Reporter { public: void Corruption(size_t bytes, const Status& status) override { std::string r = "corruption: "; AppendNumberTo(&r, bytes); r += " bytes; "; r += status.ToString(); r.push_back('\n'); dst_->Append(r); } WritableFile* dst_; }; // Print contents of a log file. (*func)() is called on every record. Status PrintLogContents(Env* env, const std::string& fname, void (*func)(uint64_t, Slice, WritableFile*), WritableFile* dst) { SequentialFile* file; Status s = env->NewSequentialFile(fname, &file); if (!s.ok()) { return s; } CorruptionReporter reporter; reporter.dst_ = dst; log::Reader reader(file, &reporter, true, 0); Slice record; std::string scratch; while (reader.ReadRecord(&record, &scratch)) { (*func)(reader.LastRecordOffset(), record, dst); } delete file; return Status::OK(); } // Called on every item found in a WriteBatch. class WriteBatchItemPrinter : public WriteBatch::Handler { public: void Put(const Slice& key, const Slice& value) override { std::string r = " put '"; AppendEscapedStringTo(&r, key); r += "' '"; AppendEscapedStringTo(&r, value); r += "'\n"; dst_->Append(r); } void Delete(const Slice& key) override { std::string r = " del '"; AppendEscapedStringTo(&r, key); r += "'\n"; dst_->Append(r); } WritableFile* dst_; }; // Called on every log record (each one of which is a WriteBatch) // found in a kLogFile. static void WriteBatchPrinter(uint64_t pos, Slice record, WritableFile* dst) { std::string r = "--- offset "; AppendNumberTo(&r, pos); r += "; "; if (record.size() < 12) { r += "log record length "; AppendNumberTo(&r, record.size()); r += " is too small\n"; dst->Append(r); return; } WriteBatch batch; WriteBatchInternal::SetContents(&batch, record); r += "sequence "; AppendNumberTo(&r, WriteBatchInternal::Sequence(&batch)); r.push_back('\n'); dst->Append(r); WriteBatchItemPrinter batch_item_printer; batch_item_printer.dst_ = dst; Status s = batch.Iterate(&batch_item_printer); if (!s.ok()) { dst->Append(" error: " + s.ToString() + "\n"); } } Status DumpLog(Env* env, const std::string& fname, WritableFile* dst) { return PrintLogContents(env, fname, WriteBatchPrinter, dst); } // Called on every log record (each one of which is a WriteBatch) // found in a kDescriptorFile. static void VersionEditPrinter(uint64_t pos, Slice record, WritableFile* dst) { std::string r = "--- offset "; AppendNumberTo(&r, pos); r += "; "; VersionEdit edit; Status s = edit.DecodeFrom(record); if (!s.ok()) { r += s.ToString(); r.push_back('\n'); } else { r += edit.DebugString(); } dst->Append(r); } Status DumpDescriptor(Env* env, const std::string& fname, WritableFile* dst) { return PrintLogContents(env, fname, VersionEditPrinter, dst); } Status DumpTable(Env* env, const std::string& fname, WritableFile* dst) { uint64_t file_size; RandomAccessFile* file = nullptr; Table* table = nullptr; Status s = env->GetFileSize(fname, &file_size); if (s.ok()) { s = env->NewRandomAccessFile(fname, &file); } if (s.ok()) { // We use the default comparator, which may or may not match the // comparator used in this database. However this should not cause // problems since we only use Table operations that do not require // any comparisons. In particular, we do not call Seek or Prev. s = Table::Open(Options(), file, file_size, &table); } if (!s.ok()) { delete table; delete file; return s; } ReadOptions ro; ro.fill_cache = false; Iterator* iter = table->NewIterator(ro); std::string r; for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { r.clear(); ParsedInternalKey key; if (!ParseInternalKey(iter->key(), &key)) { r = "badkey '"; AppendEscapedStringTo(&r, iter->key()); r += "' => '"; AppendEscapedStringTo(&r, iter->value()); r += "'\n"; dst->Append(r); } else { r = "'"; AppendEscapedStringTo(&r, key.user_key); r += "' @ "; AppendNumberTo(&r, key.sequence); r += " : "; if (key.type == kTypeDeletion) { r += "del"; } else if (key.type == kTypeValue) { r += "val"; } else { AppendNumberTo(&r, key.type); } r += " => '"; AppendEscapedStringTo(&r, iter->value()); r += "'\n"; dst->Append(r); } } s = iter->status(); if (!s.ok()) { dst->Append("iterator error: " + s.ToString() + "\n"); } delete iter; delete table; delete file; return Status::OK(); } } // namespace Status DumpFile(Env* env, const std::string& fname, WritableFile* dst) { FileType ftype; if (!GuessType(fname, &ftype)) { return Status::InvalidArgument(fname + ": unknown file type"); } switch (ftype) { case kLogFile: return DumpLog(env, fname, dst); case kDescriptorFile: return DumpDescriptor(env, fname, dst); case kTableFile: return DumpTable(env, fname, dst); default: break; } return Status::InvalidArgument(fname + ": not a dump-able file type"); } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/log_reader.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_DB_LOG_READER_H_ #define STORAGE_LEVELDB_DB_LOG_READER_H_ #include <stdint.h> #include "db/log_format.h" #include "leveldb/slice.h" #include "leveldb/status.h" namespace leveldb { class SequentialFile; namespace log { class Reader { public: // Interface for reporting errors. class Reporter { public: virtual ~Reporter(); // Some corruption was detected. "size" is the approximate number // of bytes dropped due to the corruption. virtual void Corruption(size_t bytes, const Status& status) = 0; }; // Create a reader that will return log records from "*file". // "*file" must remain live while this Reader is in use. // // If "reporter" is non-null, it is notified whenever some data is // dropped due to a detected corruption. "*reporter" must remain // live while this Reader is in use. // // If "checksum" is true, verify checksums if available. // // The Reader will start reading at the first record located at physical // position >= initial_offset within the file. Reader(SequentialFile* file, Reporter* reporter, bool checksum, uint64_t initial_offset); Reader(const Reader&) = delete; Reader& operator=(const Reader&) = delete; ~Reader(); // Read the next record into *record. Returns true if read // successfully, false if we hit end of the input. May use // "*scratch" as temporary storage. The contents filled in *record // will only be valid until the next mutating operation on this // reader or the next mutation to *scratch. bool ReadRecord(Slice* record, std::string* scratch); // Returns the physical offset of the last record returned by ReadRecord. // // Undefined before the first call to ReadRecord. uint64_t LastRecordOffset(); private: // Extend record types with the following special values enum { kEof = kMaxRecordType + 1, // Returned whenever we find an invalid physical record. // Currently there are three situations in which this happens: // * The record has an invalid CRC (ReadPhysicalRecord reports a drop) // * The record is a 0-length record (No drop is reported) // * The record is below constructor's initial_offset (No drop is reported) kBadRecord = kMaxRecordType + 2 }; // Skips all blocks that are completely before "initial_offset_". // // Returns true on success. Handles reporting. bool SkipToInitialBlock(); // Return type, or one of the preceding special values unsigned int ReadPhysicalRecord(Slice* result); // Reports dropped bytes to the reporter. // buffer_ must be updated to remove the dropped bytes prior to invocation. void ReportCorruption(uint64_t bytes, const char* reason); void ReportDrop(uint64_t bytes, const Status& reason); SequentialFile* const file_; Reporter* const reporter_; bool const checksum_; char* const backing_store_; Slice buffer_; bool eof_; // Last Read() indicated EOF by returning < kBlockSize // Offset of the last record returned by ReadRecord. uint64_t last_record_offset_; // Offset of the first location past the end of buffer_. uint64_t end_of_buffer_offset_; // Offset at which to start looking for the first record to return uint64_t const initial_offset_; // True if we are resynchronizing after a seek (initial_offset_ > 0). In // particular, a run of kMiddleType and kLastType records can be silently // skipped in this mode bool resyncing_; }; } // namespace log } // namespace leveldb #endif // STORAGE_LEVELDB_DB_LOG_READER_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/autocompact_test.cc
// Copyright (c) 2013 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/db_impl.h" #include "leveldb/cache.h" #include "leveldb/db.h" #include "util/testharness.h" #include "util/testutil.h" namespace leveldb { class AutoCompactTest { public: AutoCompactTest() { dbname_ = test::TmpDir() + "/autocompact_test"; tiny_cache_ = NewLRUCache(100); options_.block_cache = tiny_cache_; DestroyDB(dbname_, options_); options_.create_if_missing = true; options_.compression = kNoCompression; ASSERT_OK(DB::Open(options_, dbname_, &db_)); } ~AutoCompactTest() { delete db_; DestroyDB(dbname_, Options()); delete tiny_cache_; } std::string Key(int i) { char buf[100]; snprintf(buf, sizeof(buf), "key%06d", i); return std::string(buf); } uint64_t Size(const Slice& start, const Slice& limit) { Range r(start, limit); uint64_t size; db_->GetApproximateSizes(&r, 1, &size); return size; } void DoReads(int n); private: std::string dbname_; Cache* tiny_cache_; Options options_; DB* db_; }; static const int kValueSize = 200 * 1024; static const int kTotalSize = 100 * 1024 * 1024; static const int kCount = kTotalSize / kValueSize; // Read through the first n keys repeatedly and check that they get // compacted (verified by checking the size of the key space). void AutoCompactTest::DoReads(int n) { std::string value(kValueSize, 'x'); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); // Fill database for (int i = 0; i < kCount; i++) { ASSERT_OK(db_->Put(WriteOptions(), Key(i), value)); } ASSERT_OK(dbi->TEST_CompactMemTable()); // Delete everything for (int i = 0; i < kCount; i++) { ASSERT_OK(db_->Delete(WriteOptions(), Key(i))); } ASSERT_OK(dbi->TEST_CompactMemTable()); // Get initial measurement of the space we will be reading. const int64_t initial_size = Size(Key(0), Key(n)); const int64_t initial_other_size = Size(Key(n), Key(kCount)); // Read until size drops significantly. std::string limit_key = Key(n); for (int read = 0; true; read++) { ASSERT_LT(read, 100) << "Taking too long to compact"; Iterator* iter = db_->NewIterator(ReadOptions()); for (iter->SeekToFirst(); iter->Valid() && iter->key().ToString() < limit_key; iter->Next()) { // Drop data } delete iter; // Wait a little bit to allow any triggered compactions to complete. Env::Default()->SleepForMicroseconds(1000000); uint64_t size = Size(Key(0), Key(n)); fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1, size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0); if (size <= initial_size / 10) { break; } } // Verify that the size of the key space not touched by the reads // is pretty much unchanged. const int64_t final_other_size = Size(Key(n), Key(kCount)); ASSERT_LE(final_other_size, initial_other_size + 1048576); ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576); } TEST(AutoCompactTest, ReadAll) { DoReads(kCount); } TEST(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/write_batch_internal.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_ #define STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_ #include "db/dbformat.h" #include "leveldb/write_batch.h" namespace leveldb { class MemTable; // WriteBatchInternal provides static methods for manipulating a // WriteBatch that we don't want in the public WriteBatch interface. class WriteBatchInternal { public: // Return the number of entries in the batch. static int Count(const WriteBatch* batch); // Set the count for the number of entries in the batch. static void SetCount(WriteBatch* batch, int n); // Return the sequence number for the start of this batch. static SequenceNumber Sequence(const WriteBatch* batch); // Store the specified number as the sequence number for the start of // this batch. static void SetSequence(WriteBatch* batch, SequenceNumber seq); static Slice Contents(const WriteBatch* batch) { return Slice(batch->rep_); } static size_t ByteSize(const WriteBatch* batch) { return batch->rep_.size(); } static void SetContents(WriteBatch* batch, const Slice& contents); static Status InsertInto(const WriteBatch* batch, MemTable* memtable); static void Append(WriteBatch* dst, const WriteBatch* src); }; } // namespace leveldb #endif // STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/db_iter.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/db_iter.h" #include "db/db_impl.h" #include "db/dbformat.h" #include "db/filename.h" #include "leveldb/env.h" #include "leveldb/iterator.h" #include "port/port.h" #include "util/logging.h" #include "util/mutexlock.h" #include "util/random.h" namespace leveldb { #if 0 static void DumpInternalIter(Iterator* iter) { for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedInternalKey k; if (!ParseInternalKey(iter->key(), &k)) { fprintf(stderr, "Corrupt '%s'\n", EscapeString(iter->key()).c_str()); } else { fprintf(stderr, "@ '%s'\n", k.DebugString().c_str()); } } } #endif namespace { // Memtables and sstables that make the DB representation contain // (userkey,seq,type) => uservalue entries. DBIter // combines multiple entries for the same userkey found in the DB // representation into a single entry while accounting for sequence // numbers, deletion markers, overwrites, etc. class DBIter : public Iterator { public: // Which direction is the iterator currently moving? // (1) When moving forward, the internal iterator is positioned at // the exact entry that yields this->key(), this->value() // (2) When moving backwards, the internal iterator is positioned // just before all entries whose user key == this->key(). enum Direction { kForward, kReverse }; DBIter(DBImpl* db, const Comparator* cmp, Iterator* iter, SequenceNumber s, uint32_t seed) : db_(db), user_comparator_(cmp), iter_(iter), sequence_(s), direction_(kForward), valid_(false), rnd_(seed), bytes_until_read_sampling_(RandomCompactionPeriod()) {} DBIter(const DBIter&) = delete; DBIter& operator=(const DBIter&) = delete; ~DBIter() override { delete iter_; } bool Valid() const override { return valid_; } Slice key() const override { assert(valid_); return (direction_ == kForward) ? ExtractUserKey(iter_->key()) : saved_key_; } Slice value() const override { assert(valid_); return (direction_ == kForward) ? iter_->value() : saved_value_; } Status status() const override { if (status_.ok()) { return iter_->status(); } else { return status_; } } void Next() override; void Prev() override; void Seek(const Slice& target) override; void SeekToFirst() override; void SeekToLast() override; private: void FindNextUserEntry(bool skipping, std::string* skip); void FindPrevUserEntry(); bool ParseKey(ParsedInternalKey* key); inline void SaveKey(const Slice& k, std::string* dst) { dst->assign(k.data(), k.size()); } inline void ClearSavedValue() { if (saved_value_.capacity() > 1048576) { std::string empty; swap(empty, saved_value_); } else { saved_value_.clear(); } } // Picks the number of bytes that can be read until a compaction is scheduled. size_t RandomCompactionPeriod() { return rnd_.Uniform(2 * config::kReadBytesPeriod); } DBImpl* db_; const Comparator* const user_comparator_; Iterator* const iter_; SequenceNumber const sequence_; Status status_; std::string saved_key_; // == current key when direction_==kReverse std::string saved_value_; // == current raw value when direction_==kReverse Direction direction_; bool valid_; Random rnd_; size_t bytes_until_read_sampling_; }; inline bool DBIter::ParseKey(ParsedInternalKey* ikey) { Slice k = iter_->key(); size_t bytes_read = k.size() + iter_->value().size(); while (bytes_until_read_sampling_ < bytes_read) { bytes_until_read_sampling_ += RandomCompactionPeriod(); db_->RecordReadSample(k); } assert(bytes_until_read_sampling_ >= bytes_read); bytes_until_read_sampling_ -= bytes_read; if (!ParseInternalKey(k, ikey)) { status_ = Status::Corruption("corrupted internal key in DBIter"); return false; } else { return true; } } void DBIter::Next() { assert(valid_); if (direction_ == kReverse) { // Switch directions? direction_ = kForward; // iter_ is pointing just before the entries for this->key(), // so advance into the range of entries for this->key() and then // use the normal skipping code below. if (!iter_->Valid()) { iter_->SeekToFirst(); } else { iter_->Next(); } if (!iter_->Valid()) { valid_ = false; saved_key_.clear(); return; } // saved_key_ already contains the key to skip past. } else { // Store in saved_key_ the current key so we skip it below. SaveKey(ExtractUserKey(iter_->key()), &saved_key_); // iter_ is pointing to current key. We can now safely move to the next to // avoid checking current key. iter_->Next(); if (!iter_->Valid()) { valid_ = false; saved_key_.clear(); return; } } FindNextUserEntry(true, &saved_key_); } void DBIter::FindNextUserEntry(bool skipping, std::string* skip) { // Loop until we hit an acceptable entry to yield assert(iter_->Valid()); assert(direction_ == kForward); do { ParsedInternalKey ikey; if (ParseKey(&ikey) && ikey.sequence <= sequence_) { switch (ikey.type) { case kTypeDeletion: // Arrange to skip all upcoming entries for this key since // they are hidden by this deletion. SaveKey(ikey.user_key, skip); skipping = true; break; case kTypeValue: if (skipping && user_comparator_->Compare(ikey.user_key, *skip) <= 0) { // Entry hidden } else { valid_ = true; saved_key_.clear(); return; } break; } } iter_->Next(); } while (iter_->Valid()); saved_key_.clear(); valid_ = false; } void DBIter::Prev() { assert(valid_); if (direction_ == kForward) { // Switch directions? // iter_ is pointing at the current entry. Scan backwards until // the key changes so we can use the normal reverse scanning code. assert(iter_->Valid()); // Otherwise valid_ would have been false SaveKey(ExtractUserKey(iter_->key()), &saved_key_); while (true) { iter_->Prev(); if (!iter_->Valid()) { valid_ = false; saved_key_.clear(); ClearSavedValue(); return; } if (user_comparator_->Compare(ExtractUserKey(iter_->key()), saved_key_) < 0) { break; } } direction_ = kReverse; } FindPrevUserEntry(); } void DBIter::FindPrevUserEntry() { assert(direction_ == kReverse); ValueType value_type = kTypeDeletion; if (iter_->Valid()) { do { ParsedInternalKey ikey; if (ParseKey(&ikey) && ikey.sequence <= sequence_) { if ((value_type != kTypeDeletion) && user_comparator_->Compare(ikey.user_key, saved_key_) < 0) { // We encountered a non-deleted value in entries for previous keys, break; } value_type = ikey.type; if (value_type == kTypeDeletion) { saved_key_.clear(); ClearSavedValue(); } else { Slice raw_value = iter_->value(); if (saved_value_.capacity() > raw_value.size() + 1048576) { std::string empty; swap(empty, saved_value_); } SaveKey(ExtractUserKey(iter_->key()), &saved_key_); saved_value_.assign(raw_value.data(), raw_value.size()); } } iter_->Prev(); } while (iter_->Valid()); } if (value_type == kTypeDeletion) { // End valid_ = false; saved_key_.clear(); ClearSavedValue(); direction_ = kForward; } else { valid_ = true; } } void DBIter::Seek(const Slice& target) { direction_ = kForward; ClearSavedValue(); saved_key_.clear(); AppendInternalKey(&saved_key_, ParsedInternalKey(target, sequence_, kValueTypeForSeek)); iter_->Seek(saved_key_); if (iter_->Valid()) { FindNextUserEntry(false, &saved_key_ /* temporary storage */); } else { valid_ = false; } } void DBIter::SeekToFirst() { direction_ = kForward; ClearSavedValue(); iter_->SeekToFirst(); if (iter_->Valid()) { FindNextUserEntry(false, &saved_key_ /* temporary storage */); } else { valid_ = false; } } void DBIter::SeekToLast() { direction_ = kReverse; ClearSavedValue(); iter_->SeekToLast(); FindPrevUserEntry(); } } // anonymous namespace Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator, Iterator* internal_iter, SequenceNumber sequence, uint32_t seed) { return new DBIter(db, user_key_comparator, internal_iter, sequence, seed); } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/repair.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // We recover the contents of the descriptor from the other files we find. // (1) Any log files are first converted to tables // (2) We scan every table to compute // (a) smallest/largest for the table // (b) largest sequence number in the table // (3) We generate descriptor contents: // - log number is set to zero // - next-file-number is set to 1 + largest file number we found // - last-sequence-number is set to largest sequence# found across // all tables (see 2c) // - compaction pointers are cleared // - every table file is added at level 0 // // Possible optimization 1: // (a) Compute total size and use to pick appropriate max-level M // (b) Sort tables by largest sequence# in the table // (c) For each table: if it overlaps earlier table, place in level-0, // else place in level-M. // Possible optimization 2: // Store per-table metadata (smallest, largest, largest-seq#, ...) // in the table's meta section to speed up ScanTable. #include "db/builder.h" #include "db/db_impl.h" #include "db/dbformat.h" #include "db/filename.h" #include "db/log_reader.h" #include "db/log_writer.h" #include "db/memtable.h" #include "db/table_cache.h" #include "db/version_edit.h" #include "db/write_batch_internal.h" #include "leveldb/comparator.h" #include "leveldb/db.h" #include "leveldb/env.h" namespace leveldb { namespace { class Repairer { public: Repairer(const std::string& dbname, const Options& options) : dbname_(dbname), env_(options.env), icmp_(options.comparator), ipolicy_(options.filter_policy), options_(SanitizeOptions(dbname, &icmp_, &ipolicy_, options)), owns_info_log_(options_.info_log != options.info_log), owns_cache_(options_.block_cache != options.block_cache), next_file_number_(1) { // TableCache can be small since we expect each table to be opened once. table_cache_ = new TableCache(dbname_, options_, 10); } ~Repairer() { delete table_cache_; if (owns_info_log_) { delete options_.info_log; } if (owns_cache_) { delete options_.block_cache; } } Status Run() { Status status = FindFiles(); if (status.ok()) { ConvertLogFilesToTables(); ExtractMetaData(); status = WriteDescriptor(); } if (status.ok()) { unsigned long long bytes = 0; for (size_t i = 0; i < tables_.size(); i++) { bytes += tables_[i].meta.file_size; } Log(options_.info_log, "**** Repaired leveldb %s; " "recovered %d files; %llu bytes. " "Some data may have been lost. " "****", dbname_.c_str(), static_cast<int>(tables_.size()), bytes); } return status; } private: struct TableInfo { FileMetaData meta; SequenceNumber max_sequence; }; Status FindFiles() { std::vector<std::string> filenames; Status status = env_->GetChildren(dbname_, &filenames); if (!status.ok()) { return status; } if (filenames.empty()) { return Status::IOError(dbname_, "repair found no files"); } uint64_t number; FileType type; for (size_t i = 0; i < filenames.size(); i++) { if (ParseFileName(filenames[i], &number, &type)) { if (type == kDescriptorFile) { manifests_.push_back(filenames[i]); } else { if (number + 1 > next_file_number_) { next_file_number_ = number + 1; } if (type == kLogFile) { logs_.push_back(number); } else if (type == kTableFile) { table_numbers_.push_back(number); } else { // Ignore other files } } } } return status; } void ConvertLogFilesToTables() { for (size_t i = 0; i < logs_.size(); i++) { std::string logname = LogFileName(dbname_, logs_[i]); Status status = ConvertLogToTable(logs_[i]); if (!status.ok()) { Log(options_.info_log, "Log #%llu: ignoring conversion error: %s", (unsigned long long)logs_[i], status.ToString().c_str()); } ArchiveFile(logname); } } Status ConvertLogToTable(uint64_t log) { struct LogReporter : public log::Reader::Reporter { Env* env; Logger* info_log; uint64_t lognum; void Corruption(size_t bytes, const Status& s) override { // We print error messages for corruption, but continue repairing. Log(info_log, "Log #%llu: dropping %d bytes; %s", (unsigned long long)lognum, static_cast<int>(bytes), s.ToString().c_str()); } }; // Open the log file std::string logname = LogFileName(dbname_, log); SequentialFile* lfile; Status status = env_->NewSequentialFile(logname, &lfile); if (!status.ok()) { return status; } // Create the log reader. LogReporter reporter; reporter.env = env_; reporter.info_log = options_.info_log; reporter.lognum = log; // We intentionally make log::Reader do checksumming so that // corruptions cause entire commits to be skipped instead of // propagating bad information (like overly large sequence // numbers). log::Reader reader(lfile, &reporter, false /*do not checksum*/, 0 /*initial_offset*/); // Read all the records and add to a memtable std::string scratch; Slice record; WriteBatch batch; MemTable* mem = new MemTable(icmp_); mem->Ref(); int counter = 0; while (reader.ReadRecord(&record, &scratch)) { if (record.size() < 12) { reporter.Corruption(record.size(), Status::Corruption("log record too small", logname)); continue; } WriteBatchInternal::SetContents(&batch, record); status = WriteBatchInternal::InsertInto(&batch, mem); if (status.ok()) { counter += WriteBatchInternal::Count(&batch); } else { Log(options_.info_log, "Log #%llu: ignoring %s", (unsigned long long)log, status.ToString().c_str()); status = Status::OK(); // Keep going with rest of file } } delete lfile; // Do not record a version edit for this conversion to a Table // since ExtractMetaData() will also generate edits. FileMetaData meta; meta.number = next_file_number_++; Iterator* iter = mem->NewIterator(); status = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta); delete iter; mem->Unref(); mem = nullptr; if (status.ok()) { if (meta.file_size > 0) { table_numbers_.push_back(meta.number); } } Log(options_.info_log, "Log #%llu: %d ops saved to Table #%llu %s", (unsigned long long)log, counter, (unsigned long long)meta.number, status.ToString().c_str()); return status; } void ExtractMetaData() { for (size_t i = 0; i < table_numbers_.size(); i++) { ScanTable(table_numbers_[i]); } } Iterator* NewTableIterator(const FileMetaData& meta) { // Same as compaction iterators: if paranoid_checks are on, turn // on checksum verification. ReadOptions r; r.verify_checksums = options_.paranoid_checks; return table_cache_->NewIterator(r, meta.number, meta.file_size); } void ScanTable(uint64_t number) { TableInfo t; t.meta.number = number; std::string fname = TableFileName(dbname_, number); Status status = env_->GetFileSize(fname, &t.meta.file_size); if (!status.ok()) { // Try alternate file name. fname = SSTTableFileName(dbname_, number); Status s2 = env_->GetFileSize(fname, &t.meta.file_size); if (s2.ok()) { status = Status::OK(); } } if (!status.ok()) { ArchiveFile(TableFileName(dbname_, number)); ArchiveFile(SSTTableFileName(dbname_, number)); Log(options_.info_log, "Table #%llu: dropped: %s", (unsigned long long)t.meta.number, status.ToString().c_str()); return; } // Extract metadata by scanning through table. int counter = 0; Iterator* iter = NewTableIterator(t.meta); bool empty = true; ParsedInternalKey parsed; t.max_sequence = 0; for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { Slice key = iter->key(); if (!ParseInternalKey(key, &parsed)) { Log(options_.info_log, "Table #%llu: unparsable key %s", (unsigned long long)t.meta.number, EscapeString(key).c_str()); continue; } counter++; if (empty) { empty = false; t.meta.smallest.DecodeFrom(key); } t.meta.largest.DecodeFrom(key); if (parsed.sequence > t.max_sequence) { t.max_sequence = parsed.sequence; } } if (!iter->status().ok()) { status = iter->status(); } delete iter; Log(options_.info_log, "Table #%llu: %d entries %s", (unsigned long long)t.meta.number, counter, status.ToString().c_str()); if (status.ok()) { tables_.push_back(t); } else { RepairTable(fname, t); // RepairTable archives input file. } } void RepairTable(const std::string& src, TableInfo t) { // We will copy src contents to a new table and then rename the // new table over the source. // Create builder. std::string copy = TableFileName(dbname_, next_file_number_++); WritableFile* file; Status s = env_->NewWritableFile(copy, &file); if (!s.ok()) { return; } TableBuilder* builder = new TableBuilder(options_, file); // Copy data. Iterator* iter = NewTableIterator(t.meta); int counter = 0; for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { builder->Add(iter->key(), iter->value()); counter++; } delete iter; ArchiveFile(src); if (counter == 0) { builder->Abandon(); // Nothing to save } else { s = builder->Finish(); if (s.ok()) { t.meta.file_size = builder->FileSize(); } } delete builder; builder = nullptr; if (s.ok()) { s = file->Close(); } delete file; file = nullptr; if (counter > 0 && s.ok()) { std::string orig = TableFileName(dbname_, t.meta.number); s = env_->RenameFile(copy, orig); if (s.ok()) { Log(options_.info_log, "Table #%llu: %d entries repaired", (unsigned long long)t.meta.number, counter); tables_.push_back(t); } } if (!s.ok()) { env_->DeleteFile(copy); } } Status WriteDescriptor() { std::string tmp = TempFileName(dbname_, 1); WritableFile* file; Status status = env_->NewWritableFile(tmp, &file); if (!status.ok()) { return status; } SequenceNumber max_sequence = 0; for (size_t i = 0; i < tables_.size(); i++) { if (max_sequence < tables_[i].max_sequence) { max_sequence = tables_[i].max_sequence; } } edit_.SetComparatorName(icmp_.user_comparator()->Name()); edit_.SetLogNumber(0); edit_.SetNextFile(next_file_number_); edit_.SetLastSequence(max_sequence); for (size_t i = 0; i < tables_.size(); i++) { // TODO(opt): separate out into multiple levels const TableInfo& t = tables_[i]; edit_.AddFile(0, t.meta.number, t.meta.file_size, t.meta.smallest, t.meta.largest); } // fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str()); { log::Writer log(file); std::string record; edit_.EncodeTo(&record); status = log.AddRecord(record); } if (status.ok()) { status = file->Close(); } delete file; file = nullptr; if (!status.ok()) { env_->DeleteFile(tmp); } else { // Discard older manifests for (size_t i = 0; i < manifests_.size(); i++) { ArchiveFile(dbname_ + "/" + manifests_[i]); } // Install new manifest status = env_->RenameFile(tmp, DescriptorFileName(dbname_, 1)); if (status.ok()) { status = SetCurrentFile(env_, dbname_, 1); } else { env_->DeleteFile(tmp); } } return status; } void ArchiveFile(const std::string& fname) { // Move into another directory. E.g., for // dir/foo // rename to // dir/lost/foo const char* slash = strrchr(fname.c_str(), '/'); std::string new_dir; if (slash != nullptr) { new_dir.assign(fname.data(), slash - fname.data()); } new_dir.append("/lost"); env_->CreateDir(new_dir); // Ignore error std::string new_file = new_dir; new_file.append("/"); new_file.append((slash == nullptr) ? fname.c_str() : slash + 1); Status s = env_->RenameFile(fname, new_file); Log(options_.info_log, "Archiving %s: %s\n", fname.c_str(), s.ToString().c_str()); } const std::string dbname_; Env* const env_; InternalKeyComparator const icmp_; InternalFilterPolicy const ipolicy_; const Options options_; bool owns_info_log_; bool owns_cache_; TableCache* table_cache_; VersionEdit edit_; std::vector<std::string> manifests_; std::vector<uint64_t> table_numbers_; std::vector<uint64_t> logs_; std::vector<TableInfo> tables_; uint64_t next_file_number_; }; } // namespace Status RepairDB(const std::string& dbname, const Options& options) { Repairer repairer(dbname, options); return repairer.Run(); } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/write_batch_test.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "leveldb/db.h" #include "db/memtable.h" #include "db/write_batch_internal.h" #include "leveldb/env.h" #include "util/logging.h" #include "util/testharness.h" namespace leveldb { static std::string PrintContents(WriteBatch* b) { InternalKeyComparator cmp(BytewiseComparator()); MemTable* mem = new MemTable(cmp); mem->Ref(); std::string state; Status s = WriteBatchInternal::InsertInto(b, mem); int count = 0; Iterator* iter = mem->NewIterator(); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedInternalKey ikey; ASSERT_TRUE(ParseInternalKey(iter->key(), &ikey)); switch (ikey.type) { case kTypeValue: state.append("Put("); state.append(ikey.user_key.ToString()); state.append(", "); state.append(iter->value().ToString()); state.append(")"); count++; break; case kTypeDeletion: state.append("Delete("); state.append(ikey.user_key.ToString()); state.append(")"); count++; break; } state.append("@"); state.append(NumberToString(ikey.sequence)); } delete iter; if (!s.ok()) { state.append("ParseError()"); } else if (count != WriteBatchInternal::Count(b)) { state.append("CountMismatch()"); } mem->Unref(); return state; } class WriteBatchTest {}; TEST(WriteBatchTest, Empty) { WriteBatch batch; ASSERT_EQ("", PrintContents(&batch)); ASSERT_EQ(0, WriteBatchInternal::Count(&batch)); } TEST(WriteBatchTest, Multiple) { WriteBatch batch; batch.Put(Slice("foo"), Slice("bar")); batch.Delete(Slice("box")); batch.Put(Slice("baz"), Slice("boo")); WriteBatchInternal::SetSequence(&batch, 100); ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch)); ASSERT_EQ(3, WriteBatchInternal::Count(&batch)); ASSERT_EQ( "Put(baz, boo)@102" "Delete(box)@101" "Put(foo, bar)@100", PrintContents(&batch)); } TEST(WriteBatchTest, Corruption) { WriteBatch batch; batch.Put(Slice("foo"), Slice("bar")); batch.Delete(Slice("box")); WriteBatchInternal::SetSequence(&batch, 200); Slice contents = WriteBatchInternal::Contents(&batch); WriteBatchInternal::SetContents(&batch, Slice(contents.data(), contents.size() - 1)); ASSERT_EQ( "Put(foo, bar)@200" "ParseError()", PrintContents(&batch)); } TEST(WriteBatchTest, Append) { WriteBatch b1, b2; WriteBatchInternal::SetSequence(&b1, 200); WriteBatchInternal::SetSequence(&b2, 300); b1.Append(b2); ASSERT_EQ("", PrintContents(&b1)); b2.Put("a", "va"); b1.Append(b2); ASSERT_EQ("Put(a, va)@200", PrintContents(&b1)); b2.Clear(); b2.Put("b", "vb"); b1.Append(b2); ASSERT_EQ( "Put(a, va)@200" "Put(b, vb)@201", PrintContents(&b1)); b2.Delete("foo"); b1.Append(b2); ASSERT_EQ( "Put(a, va)@200" "Put(b, vb)@202" "Put(b, vb)@201" "Delete(foo)@203", PrintContents(&b1)); } TEST(WriteBatchTest, ApproximateSize) { WriteBatch batch; size_t empty_size = batch.ApproximateSize(); batch.Put(Slice("foo"), Slice("bar")); size_t one_key_size = batch.ApproximateSize(); ASSERT_LT(empty_size, one_key_size); batch.Put(Slice("baz"), Slice("boo")); size_t two_keys_size = batch.ApproximateSize(); ASSERT_LT(one_key_size, two_keys_size); batch.Delete(Slice("box")); size_t post_delete_size = batch.ApproximateSize(); ASSERT_LT(two_keys_size, post_delete_size); } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/filename.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // File names used by DB code #ifndef STORAGE_LEVELDB_DB_FILENAME_H_ #define STORAGE_LEVELDB_DB_FILENAME_H_ #include <stdint.h> #include <string> #include "leveldb/slice.h" #include "leveldb/status.h" #include "port/port.h" namespace leveldb { class Env; enum FileType { kLogFile, kDBLockFile, kTableFile, kDescriptorFile, kCurrentFile, kTempFile, kInfoLogFile // Either the current one, or an old one }; // Return the name of the log file with the specified number // in the db named by "dbname". The result will be prefixed with // "dbname". std::string LogFileName(const std::string& dbname, uint64_t number); // Return the name of the sstable with the specified number // in the db named by "dbname". The result will be prefixed with // "dbname". std::string TableFileName(const std::string& dbname, uint64_t number); // Return the legacy file name for an sstable with the specified number // in the db named by "dbname". The result will be prefixed with // "dbname". std::string SSTTableFileName(const std::string& dbname, uint64_t number); // Return the name of the descriptor file for the db named by // "dbname" and the specified incarnation number. The result will be // prefixed with "dbname". std::string DescriptorFileName(const std::string& dbname, uint64_t number); // Return the name of the current file. This file contains the name // of the current manifest file. The result will be prefixed with // "dbname". std::string CurrentFileName(const std::string& dbname); // Return the name of the lock file for the db named by // "dbname". The result will be prefixed with "dbname". std::string LockFileName(const std::string& dbname); // Return the name of a temporary file owned by the db named "dbname". // The result will be prefixed with "dbname". std::string TempFileName(const std::string& dbname, uint64_t number); // Return the name of the info log file for "dbname". std::string InfoLogFileName(const std::string& dbname); // Return the name of the old info log file for "dbname". std::string OldInfoLogFileName(const std::string& dbname); // If filename is a leveldb file, store the type of the file in *type. // The number encoded in the filename is stored in *number. If the // filename was successfully parsed, returns true. Else return false. bool ParseFileName(const std::string& filename, uint64_t* number, FileType* type); // Make the CURRENT file point to the descriptor file with the // specified number. Status SetCurrentFile(Env* env, const std::string& dbname, uint64_t descriptor_number); } // namespace leveldb #endif // STORAGE_LEVELDB_DB_FILENAME_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/snapshot.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_DB_SNAPSHOT_H_ #define STORAGE_LEVELDB_DB_SNAPSHOT_H_ #include "db/dbformat.h" #include "leveldb/db.h" namespace leveldb { class SnapshotList; // Snapshots are kept in a doubly-linked list in the DB. // Each SnapshotImpl corresponds to a particular sequence number. class SnapshotImpl : public Snapshot { public: SnapshotImpl(SequenceNumber sequence_number) : sequence_number_(sequence_number) {} SequenceNumber sequence_number() const { return sequence_number_; } private: friend class SnapshotList; // SnapshotImpl is kept in a doubly-linked circular list. The SnapshotList // implementation operates on the next/previous fields direcly. SnapshotImpl* prev_; SnapshotImpl* next_; const SequenceNumber sequence_number_; #if !defined(NDEBUG) SnapshotList* list_ = nullptr; #endif // !defined(NDEBUG) }; class SnapshotList { public: SnapshotList() : head_(0) { head_.prev_ = &head_; head_.next_ = &head_; } bool empty() const { return head_.next_ == &head_; } SnapshotImpl* oldest() const { assert(!empty()); return head_.next_; } SnapshotImpl* newest() const { assert(!empty()); return head_.prev_; } // Creates a SnapshotImpl and appends it to the end of the list. SnapshotImpl* New(SequenceNumber sequence_number) { assert(empty() || newest()->sequence_number_ <= sequence_number); SnapshotImpl* snapshot = new SnapshotImpl(sequence_number); #if !defined(NDEBUG) snapshot->list_ = this; #endif // !defined(NDEBUG) snapshot->next_ = &head_; snapshot->prev_ = head_.prev_; snapshot->prev_->next_ = snapshot; snapshot->next_->prev_ = snapshot; return snapshot; } // Removes a SnapshotImpl from this list. // // The snapshot must have been created by calling New() on this list. // // The snapshot pointer should not be const, because its memory is // deallocated. However, that would force us to change DB::ReleaseSnapshot(), // which is in the API, and currently takes a const Snapshot. void Delete(const SnapshotImpl* snapshot) { #if !defined(NDEBUG) assert(snapshot->list_ == this); #endif // !defined(NDEBUG) snapshot->prev_->next_ = snapshot->next_; snapshot->next_->prev_ = snapshot->prev_; delete snapshot; } private: // Dummy head of doubly-linked list of snapshots SnapshotImpl head_; }; } // namespace leveldb #endif // STORAGE_LEVELDB_DB_SNAPSHOT_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/leveldbutil.cc
// Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <stdio.h> #include "leveldb/dumpfile.h" #include "leveldb/env.h" #include "leveldb/status.h" namespace leveldb { namespace { class StdoutPrinter : public WritableFile { public: Status Append(const Slice& data) override { fwrite(data.data(), 1, data.size(), stdout); return Status::OK(); } Status Close() override { return Status::OK(); } Status Flush() override { return Status::OK(); } Status Sync() override { return Status::OK(); } std::string GetName() const override { return "[stdout]"; } }; bool HandleDumpCommand(Env* env, char** files, int num) { StdoutPrinter printer; bool ok = true; for (int i = 0; i < num; i++) { Status s = DumpFile(env, files[i], &printer); if (!s.ok()) { fprintf(stderr, "%s\n", s.ToString().c_str()); ok = false; } } return ok; } } // namespace } // namespace leveldb static void Usage() { fprintf(stderr, "Usage: leveldbutil command...\n" " dump files... -- dump contents of specified files\n"); } int main(int argc, char** argv) { leveldb::Env* env = leveldb::Env::Default(); bool ok = true; if (argc < 2) { Usage(); ok = false; } else { std::string command = argv[1]; if (command == "dump") { ok = leveldb::HandleDumpCommand(env, argv + 2, argc - 2); } else { Usage(); ok = false; } } return (ok ? 0 : 1); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/version_edit_test.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/version_edit.h" #include "util/testharness.h" namespace leveldb { static void TestEncodeDecode(const VersionEdit& edit) { std::string encoded, encoded2; edit.EncodeTo(&encoded); VersionEdit parsed; Status s = parsed.DecodeFrom(encoded); ASSERT_TRUE(s.ok()) << s.ToString(); parsed.EncodeTo(&encoded2); ASSERT_EQ(encoded, encoded2); } class VersionEditTest {}; TEST(VersionEditTest, EncodeDecode) { static const uint64_t kBig = 1ull << 50; VersionEdit edit; for (int i = 0; i < 4; i++) { TestEncodeDecode(edit); edit.AddFile(3, kBig + 300 + i, kBig + 400 + i, InternalKey("foo", kBig + 500 + i, kTypeValue), InternalKey("zoo", kBig + 600 + i, kTypeDeletion)); edit.DeleteFile(4, kBig + 700 + i); edit.SetCompactPointer(i, InternalKey("x", kBig + 900 + i, kTypeValue)); } edit.SetComparatorName("foo"); edit.SetLogNumber(kBig + 100); edit.SetNextFile(kBig + 200); edit.SetLastSequence(kBig + 1000); TestEncodeDecode(edit); } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/memtable.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/memtable.h" #include "db/dbformat.h" #include "leveldb/comparator.h" #include "leveldb/env.h" #include "leveldb/iterator.h" #include "util/coding.h" namespace leveldb { static Slice GetLengthPrefixedSlice(const char* data) { uint32_t len; const char* p = data; p = GetVarint32Ptr(p, p + 5, &len); // +5: we assume "p" is not corrupted return Slice(p, len); } MemTable::MemTable(const InternalKeyComparator& comparator) : comparator_(comparator), refs_(0), table_(comparator_, &arena_) {} MemTable::~MemTable() { assert(refs_ == 0); } size_t MemTable::ApproximateMemoryUsage() { return arena_.MemoryUsage(); } int MemTable::KeyComparator::operator()(const char* aptr, const char* bptr) const { // Internal keys are encoded as length-prefixed strings. Slice a = GetLengthPrefixedSlice(aptr); Slice b = GetLengthPrefixedSlice(bptr); return comparator.Compare(a, b); } // Encode a suitable internal key target for "target" and return it. // Uses *scratch as scratch space, and the returned pointer will point // into this scratch space. static const char* EncodeKey(std::string* scratch, const Slice& target) { scratch->clear(); PutVarint32(scratch, target.size()); scratch->append(target.data(), target.size()); return scratch->data(); } class MemTableIterator : public Iterator { public: explicit MemTableIterator(MemTable::Table* table) : iter_(table) {} MemTableIterator(const MemTableIterator&) = delete; MemTableIterator& operator=(const MemTableIterator&) = delete; ~MemTableIterator() override = default; bool Valid() const override { return iter_.Valid(); } void Seek(const Slice& k) override { iter_.Seek(EncodeKey(&tmp_, k)); } void SeekToFirst() override { iter_.SeekToFirst(); } void SeekToLast() override { iter_.SeekToLast(); } void Next() override { iter_.Next(); } void Prev() override { iter_.Prev(); } Slice key() const override { return GetLengthPrefixedSlice(iter_.key()); } Slice value() const override { Slice key_slice = GetLengthPrefixedSlice(iter_.key()); return GetLengthPrefixedSlice(key_slice.data() + key_slice.size()); } Status status() const override { return Status::OK(); } private: MemTable::Table::Iterator iter_; std::string tmp_; // For passing to EncodeKey }; Iterator* MemTable::NewIterator() { return new MemTableIterator(&table_); } void MemTable::Add(SequenceNumber s, ValueType type, const Slice& key, const Slice& value) { // Format of an entry is concatenation of: // key_size : varint32 of internal_key.size() // key bytes : char[internal_key.size()] // value_size : varint32 of value.size() // value bytes : char[value.size()] size_t key_size = key.size(); size_t val_size = value.size(); size_t internal_key_size = key_size + 8; const size_t encoded_len = VarintLength(internal_key_size) + internal_key_size + VarintLength(val_size) + val_size; char* buf = arena_.Allocate(encoded_len); char* p = EncodeVarint32(buf, internal_key_size); memcpy(p, key.data(), key_size); p += key_size; EncodeFixed64(p, (s << 8) | type); p += 8; p = EncodeVarint32(p, val_size); memcpy(p, value.data(), val_size); assert(p + val_size == buf + encoded_len); table_.Insert(buf); } bool MemTable::Get(const LookupKey& key, std::string* value, Status* s) { Slice memkey = key.memtable_key(); Table::Iterator iter(&table_); iter.Seek(memkey.data()); if (iter.Valid()) { // entry format is: // klength varint32 // userkey char[klength] // tag uint64 // vlength varint32 // value char[vlength] // Check that it belongs to same user key. We do not check the // sequence number since the Seek() call above should have skipped // all entries with overly large sequence numbers. const char* entry = iter.key(); uint32_t key_length; const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length); if (comparator_.comparator.user_comparator()->Compare( Slice(key_ptr, key_length - 8), key.user_key()) == 0) { // Correct user key const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8); switch (static_cast<ValueType>(tag & 0xff)) { case kTypeValue: { Slice v = GetLengthPrefixedSlice(key_ptr + key_length); value->assign(v.data(), v.size()); return true; } case kTypeDeletion: *s = Status::NotFound(Slice()); return true; } } } return false; } } // namespace leveldb
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/c_test.c
/* Copyright (c) 2011 The LevelDB Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. See the AUTHORS file for names of contributors. */ #include "leveldb/c.h" #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include <string.h> const char* phase = ""; static void StartPhase(const char* name) { fprintf(stderr, "=== Test %s\n", name); phase = name; } #define CheckNoError(err) \ if ((err) != NULL) { \ fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, (err)); \ abort(); \ } #define CheckCondition(cond) \ if (!(cond)) { \ fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, #cond); \ abort(); \ } static void CheckEqual(const char* expected, const char* v, size_t n) { if (expected == NULL && v == NULL) { // ok } else if (expected != NULL && v != NULL && n == strlen(expected) && memcmp(expected, v, n) == 0) { // ok return; } else { fprintf(stderr, "%s: expected '%s', got '%s'\n", phase, (expected ? expected : "(null)"), (v ? v : "(null")); abort(); } } static void Free(char** ptr) { if (*ptr) { free(*ptr); *ptr = NULL; } } static void CheckGet( leveldb_t* db, const leveldb_readoptions_t* options, const char* key, const char* expected) { char* err = NULL; size_t val_len; char* val; val = leveldb_get(db, options, key, strlen(key), &val_len, &err); CheckNoError(err); CheckEqual(expected, val, val_len); Free(&val); } static void CheckIter(leveldb_iterator_t* iter, const char* key, const char* val) { size_t len; const char* str; str = leveldb_iter_key(iter, &len); CheckEqual(key, str, len); str = leveldb_iter_value(iter, &len); CheckEqual(val, str, len); } // Callback from leveldb_writebatch_iterate() static void CheckPut(void* ptr, const char* k, size_t klen, const char* v, size_t vlen) { int* state = (int*) ptr; CheckCondition(*state < 2); switch (*state) { case 0: CheckEqual("bar", k, klen); CheckEqual("b", v, vlen); break; case 1: CheckEqual("box", k, klen); CheckEqual("c", v, vlen); break; } (*state)++; } // Callback from leveldb_writebatch_iterate() static void CheckDel(void* ptr, const char* k, size_t klen) { int* state = (int*) ptr; CheckCondition(*state == 2); CheckEqual("bar", k, klen); (*state)++; } static void CmpDestroy(void* arg) { } static int CmpCompare(void* arg, const char* a, size_t alen, const char* b, size_t blen) { int n = (alen < blen) ? alen : blen; int r = memcmp(a, b, n); if (r == 0) { if (alen < blen) r = -1; else if (alen > blen) r = +1; } return r; } static const char* CmpName(void* arg) { return "foo"; } // Custom filter policy static uint8_t fake_filter_result = 1; static void FilterDestroy(void* arg) { } static const char* FilterName(void* arg) { return "TestFilter"; } static char* FilterCreate( void* arg, const char* const* key_array, const size_t* key_length_array, int num_keys, size_t* filter_length) { *filter_length = 4; char* result = malloc(4); memcpy(result, "fake", 4); return result; } uint8_t FilterKeyMatch(void* arg, const char* key, size_t length, const char* filter, size_t filter_length) { CheckCondition(filter_length == 4); CheckCondition(memcmp(filter, "fake", 4) == 0); return fake_filter_result; } int main(int argc, char** argv) { leveldb_t* db; leveldb_comparator_t* cmp; leveldb_cache_t* cache; leveldb_env_t* env; leveldb_options_t* options; leveldb_readoptions_t* roptions; leveldb_writeoptions_t* woptions; char* dbname; char* err = NULL; int run = -1; CheckCondition(leveldb_major_version() >= 1); CheckCondition(leveldb_minor_version() >= 1); StartPhase("create_objects"); cmp = leveldb_comparator_create(NULL, CmpDestroy, CmpCompare, CmpName); env = leveldb_create_default_env(); cache = leveldb_cache_create_lru(100000); dbname = leveldb_env_get_test_directory(env); CheckCondition(dbname != NULL); options = leveldb_options_create(); leveldb_options_set_comparator(options, cmp); leveldb_options_set_error_if_exists(options, 1); leveldb_options_set_cache(options, cache); leveldb_options_set_env(options, env); leveldb_options_set_info_log(options, NULL); leveldb_options_set_write_buffer_size(options, 100000); leveldb_options_set_paranoid_checks(options, 1); leveldb_options_set_max_open_files(options, 10); leveldb_options_set_block_size(options, 1024); leveldb_options_set_block_restart_interval(options, 8); leveldb_options_set_max_file_size(options, 3 << 20); leveldb_options_set_compression(options, leveldb_no_compression); roptions = leveldb_readoptions_create(); leveldb_readoptions_set_verify_checksums(roptions, 1); leveldb_readoptions_set_fill_cache(roptions, 0); woptions = leveldb_writeoptions_create(); leveldb_writeoptions_set_sync(woptions, 1); StartPhase("destroy"); leveldb_destroy_db(options, dbname, &err); Free(&err); StartPhase("open_error"); db = leveldb_open(options, dbname, &err); CheckCondition(err != NULL); Free(&err); StartPhase("leveldb_free"); db = leveldb_open(options, dbname, &err); CheckCondition(err != NULL); leveldb_free(err); err = NULL; StartPhase("open"); leveldb_options_set_create_if_missing(options, 1); db = leveldb_open(options, dbname, &err); CheckNoError(err); CheckGet(db, roptions, "foo", NULL); StartPhase("put"); leveldb_put(db, woptions, "foo", 3, "hello", 5, &err); CheckNoError(err); CheckGet(db, roptions, "foo", "hello"); StartPhase("compactall"); leveldb_compact_range(db, NULL, 0, NULL, 0); CheckGet(db, roptions, "foo", "hello"); StartPhase("compactrange"); leveldb_compact_range(db, "a", 1, "z", 1); CheckGet(db, roptions, "foo", "hello"); StartPhase("writebatch"); { leveldb_writebatch_t* wb = leveldb_writebatch_create(); leveldb_writebatch_put(wb, "foo", 3, "a", 1); leveldb_writebatch_clear(wb); leveldb_writebatch_put(wb, "bar", 3, "b", 1); leveldb_writebatch_put(wb, "box", 3, "c", 1); leveldb_writebatch_t* wb2 = leveldb_writebatch_create(); leveldb_writebatch_delete(wb2, "bar", 3); leveldb_writebatch_append(wb, wb2); leveldb_writebatch_destroy(wb2); leveldb_write(db, woptions, wb, &err); CheckNoError(err); CheckGet(db, roptions, "foo", "hello"); CheckGet(db, roptions, "bar", NULL); CheckGet(db, roptions, "box", "c"); int pos = 0; leveldb_writebatch_iterate(wb, &pos, CheckPut, CheckDel); CheckCondition(pos == 3); leveldb_writebatch_destroy(wb); } StartPhase("iter"); { leveldb_iterator_t* iter = leveldb_create_iterator(db, roptions); CheckCondition(!leveldb_iter_valid(iter)); leveldb_iter_seek_to_first(iter); CheckCondition(leveldb_iter_valid(iter)); CheckIter(iter, "box", "c"); leveldb_iter_next(iter); CheckIter(iter, "foo", "hello"); leveldb_iter_prev(iter); CheckIter(iter, "box", "c"); leveldb_iter_prev(iter); CheckCondition(!leveldb_iter_valid(iter)); leveldb_iter_seek_to_last(iter); CheckIter(iter, "foo", "hello"); leveldb_iter_seek(iter, "b", 1); CheckIter(iter, "box", "c"); leveldb_iter_get_error(iter, &err); CheckNoError(err); leveldb_iter_destroy(iter); } StartPhase("approximate_sizes"); { int i; int n = 20000; char keybuf[100]; char valbuf[100]; uint64_t sizes[2]; const char* start[2] = { "a", "k00000000000000010000" }; size_t start_len[2] = { 1, 21 }; const char* limit[2] = { "k00000000000000010000", "z" }; size_t limit_len[2] = { 21, 1 }; leveldb_writeoptions_set_sync(woptions, 0); for (i = 0; i < n; i++) { snprintf(keybuf, sizeof(keybuf), "k%020d", i); snprintf(valbuf, sizeof(valbuf), "v%020d", i); leveldb_put(db, woptions, keybuf, strlen(keybuf), valbuf, strlen(valbuf), &err); CheckNoError(err); } leveldb_approximate_sizes(db, 2, start, start_len, limit, limit_len, sizes); CheckCondition(sizes[0] > 0); CheckCondition(sizes[1] > 0); } StartPhase("property"); { char* prop = leveldb_property_value(db, "nosuchprop"); CheckCondition(prop == NULL); prop = leveldb_property_value(db, "leveldb.stats"); CheckCondition(prop != NULL); Free(&prop); } StartPhase("snapshot"); { const leveldb_snapshot_t* snap; snap = leveldb_create_snapshot(db); leveldb_delete(db, woptions, "foo", 3, &err); CheckNoError(err); leveldb_readoptions_set_snapshot(roptions, snap); CheckGet(db, roptions, "foo", "hello"); leveldb_readoptions_set_snapshot(roptions, NULL); CheckGet(db, roptions, "foo", NULL); leveldb_release_snapshot(db, snap); } StartPhase("repair"); { leveldb_close(db); leveldb_options_set_create_if_missing(options, 0); leveldb_options_set_error_if_exists(options, 0); leveldb_repair_db(options, dbname, &err); CheckNoError(err); db = leveldb_open(options, dbname, &err); CheckNoError(err); CheckGet(db, roptions, "foo", NULL); CheckGet(db, roptions, "bar", NULL); CheckGet(db, roptions, "box", "c"); leveldb_options_set_create_if_missing(options, 1); leveldb_options_set_error_if_exists(options, 1); } StartPhase("filter"); for (run = 0; run < 2; run++) { // First run uses custom filter, second run uses bloom filter CheckNoError(err); leveldb_filterpolicy_t* policy; if (run == 0) { policy = leveldb_filterpolicy_create( NULL, FilterDestroy, FilterCreate, FilterKeyMatch, FilterName); } else { policy = leveldb_filterpolicy_create_bloom(10); } // Create new database leveldb_close(db); leveldb_destroy_db(options, dbname, &err); leveldb_options_set_filter_policy(options, policy); db = leveldb_open(options, dbname, &err); CheckNoError(err); leveldb_put(db, woptions, "foo", 3, "foovalue", 8, &err); CheckNoError(err); leveldb_put(db, woptions, "bar", 3, "barvalue", 8, &err); CheckNoError(err); leveldb_compact_range(db, NULL, 0, NULL, 0); fake_filter_result = 1; CheckGet(db, roptions, "foo", "foovalue"); CheckGet(db, roptions, "bar", "barvalue"); if (phase == 0) { // Must not find value when custom filter returns false fake_filter_result = 0; CheckGet(db, roptions, "foo", NULL); CheckGet(db, roptions, "bar", NULL); fake_filter_result = 1; CheckGet(db, roptions, "foo", "foovalue"); CheckGet(db, roptions, "bar", "barvalue"); } leveldb_options_set_filter_policy(options, NULL); leveldb_filterpolicy_destroy(policy); } StartPhase("cleanup"); leveldb_close(db); leveldb_options_destroy(options); leveldb_readoptions_destroy(roptions); leveldb_writeoptions_destroy(woptions); leveldb_free(dbname); leveldb_cache_destroy(cache); leveldb_comparator_destroy(cmp); leveldb_env_destroy(env); fprintf(stderr, "PASS\n"); return 0; }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/db/dbformat_test.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/dbformat.h" #include "util/logging.h" #include "util/testharness.h" namespace leveldb { static std::string IKey(const std::string& user_key, uint64_t seq, ValueType vt) { std::string encoded; AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt)); return encoded; } static std::string Shorten(const std::string& s, const std::string& l) { std::string result = s; InternalKeyComparator(BytewiseComparator()).FindShortestSeparator(&result, l); return result; } static std::string ShortSuccessor(const std::string& s) { std::string result = s; InternalKeyComparator(BytewiseComparator()).FindShortSuccessor(&result); return result; } static void TestKey(const std::string& key, uint64_t seq, ValueType vt) { std::string encoded = IKey(key, seq, vt); Slice in(encoded); ParsedInternalKey decoded("", 0, kTypeValue); ASSERT_TRUE(ParseInternalKey(in, &decoded)); ASSERT_EQ(key, decoded.user_key.ToString()); ASSERT_EQ(seq, decoded.sequence); ASSERT_EQ(vt, decoded.type); ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded)); } class FormatTest {}; TEST(FormatTest, InternalKey_EncodeDecode) { const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"}; const uint64_t seq[] = {1, 2, 3, (1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1, (1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1, (1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1}; for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) { for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) { TestKey(keys[k], seq[s], kTypeValue); TestKey("hello", 1, kTypeDeletion); } } } TEST(FormatTest, InternalKey_DecodeFromEmpty) { InternalKey internal_key; ASSERT_TRUE(!internal_key.DecodeFrom("")); } TEST(FormatTest, InternalKeyShortSeparator) { // When user keys are same ASSERT_EQ(IKey("foo", 100, kTypeValue), Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 99, kTypeValue))); ASSERT_EQ( IKey("foo", 100, kTypeValue), Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 101, kTypeValue))); ASSERT_EQ( IKey("foo", 100, kTypeValue), Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeValue))); ASSERT_EQ( IKey("foo", 100, kTypeValue), Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeDeletion))); // When user keys are misordered ASSERT_EQ(IKey("foo", 100, kTypeValue), Shorten(IKey("foo", 100, kTypeValue), IKey("bar", 99, kTypeValue))); // When user keys are different, but correctly ordered ASSERT_EQ( IKey("g", kMaxSequenceNumber, kValueTypeForSeek), Shorten(IKey("foo", 100, kTypeValue), IKey("hello", 200, kTypeValue))); // When start user key is prefix of limit user key ASSERT_EQ( IKey("foo", 100, kTypeValue), Shorten(IKey("foo", 100, kTypeValue), IKey("foobar", 200, kTypeValue))); // When limit user key is prefix of start user key ASSERT_EQ( IKey("foobar", 100, kTypeValue), Shorten(IKey("foobar", 100, kTypeValue), IKey("foo", 200, kTypeValue))); } TEST(FormatTest, InternalKeyShortestSuccessor) { ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek), ShortSuccessor(IKey("foo", 100, kTypeValue))); ASSERT_EQ(IKey("\xff\xff", 100, kTypeValue), ShortSuccessor(IKey("\xff\xff", 100, kTypeValue))); } TEST(FormatTest, ParsedInternalKeyDebugString) { ParsedInternalKey key("The \"key\" in 'single quotes'", 42, kTypeValue); ASSERT_EQ("'The \"key\" in 'single quotes'' @ 42 : 1", key.DebugString()); } TEST(FormatTest, InternalKeyDebugString) { InternalKey key("The \"key\" in 'single quotes'", 42, kTypeValue); ASSERT_EQ("'The \"key\" in 'single quotes'' @ 42 : 1", key.DebugString()); InternalKey invalid_key; ASSERT_EQ("(bad)", invalid_key.DebugString()); } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/doc/impl.md
## Files The implementation of leveldb is similar in spirit to the representation of a single [Bigtable tablet (section 5.3)](http://research.google.com/archive/bigtable.html). However the organization of the files that make up the representation is somewhat different and is explained below. Each database is represented by a set of files stored in a directory. There are several different types of files as documented below: ### Log files A log file (*.log) stores a sequence of recent updates. Each update is appended to the current log file. When the log file reaches a pre-determined size (approximately 4MB by default), it is converted to a sorted table (see below) and a new log file is created for future updates. A copy of the current log file is kept in an in-memory structure (the `memtable`). This copy is consulted on every read so that read operations reflect all logged updates. ## Sorted tables A sorted table (*.ldb) stores a sequence of entries sorted by key. Each entry is either a value for the key, or a deletion marker for the key. (Deletion markers are kept around to hide obsolete values present in older sorted tables). The set of sorted tables are organized into a sequence of levels. The sorted table generated from a log file is placed in a special **young** level (also called level-0). When the number of young files exceeds a certain threshold (currently four), all of the young files are merged together with all of the overlapping level-1 files to produce a sequence of new level-1 files (we create a new level-1 file for every 2MB of data.) Files in the young level may contain overlapping keys. However files in other levels have distinct non-overlapping key ranges. Consider level number L where L >= 1. When the combined size of files in level-L exceeds (10^L) MB (i.e., 10MB for level-1, 100MB for level-2, ...), one file in level-L, and all of the overlapping files in level-(L+1) are merged to form a set of new files for level-(L+1). These merges have the effect of gradually migrating new updates from the young level to the largest level using only bulk reads and writes (i.e., minimizing expensive seeks). ### Manifest A MANIFEST file lists the set of sorted tables that make up each level, the corresponding key ranges, and other important metadata. A new MANIFEST file (with a new number embedded in the file name) is created whenever the database is reopened. The MANIFEST file is formatted as a log, and changes made to the serving state (as files are added or removed) are appended to this log. ### Current CURRENT is a simple text file that contains the name of the latest MANIFEST file. ### Info logs Informational messages are printed to files named LOG and LOG.old. ### Others Other files used for miscellaneous purposes may also be present (LOCK, *.dbtmp). ## Level 0 When the log file grows above a certain size (4MB by default): Create a brand new memtable and log file and direct future updates here. In the background: 1. Write the contents of the previous memtable to an sstable. 2. Discard the memtable. 3. Delete the old log file and the old memtable. 4. Add the new sstable to the young (level-0) level. ## Compactions When the size of level L exceeds its limit, we compact it in a background thread. The compaction picks a file from level L and all overlapping files from the next level L+1. Note that if a level-L file overlaps only part of a level-(L+1) file, the entire file at level-(L+1) is used as an input to the compaction and will be discarded after the compaction. Aside: because level-0 is special (files in it may overlap each other), we treat compactions from level-0 to level-1 specially: a level-0 compaction may pick more than one level-0 file in case some of these files overlap each other. A compaction merges the contents of the picked files to produce a sequence of level-(L+1) files. We switch to producing a new level-(L+1) file after the current output file has reached the target file size (2MB). We also switch to a new output file when the key range of the current output file has grown enough to overlap more than ten level-(L+2) files. This last rule ensures that a later compaction of a level-(L+1) file will not pick up too much data from level-(L+2). The old files are discarded and the new files are added to the serving state. Compactions for a particular level rotate through the key space. In more detail, for each level L, we remember the ending key of the last compaction at level L. The next compaction for level L will pick the first file that starts after this key (wrapping around to the beginning of the key space if there is no such file). Compactions drop overwritten values. They also drop deletion markers if there are no higher numbered levels that contain a file whose range overlaps the current key. ### Timing Level-0 compactions will read up to four 1MB files from level-0, and at worst all the level-1 files (10MB). I.e., we will read 14MB and write 14MB. Other than the special level-0 compactions, we will pick one 2MB file from level L. In the worst case, this will overlap ~ 12 files from level L+1 (10 because level-(L+1) is ten times the size of level-L, and another two at the boundaries since the file ranges at level-L will usually not be aligned with the file ranges at level-L+1). The compaction will therefore read 26MB and write 26MB. Assuming a disk IO rate of 100MB/s (ballpark range for modern drives), the worst compaction cost will be approximately 0.5 second. If we throttle the background writing to something small, say 10% of the full 100MB/s speed, a compaction may take up to 5 seconds. If the user is writing at 10MB/s, we might build up lots of level-0 files (~50 to hold the 5*10MB). This may significantly increase the cost of reads due to the overhead of merging more files together on every read. Solution 1: To reduce this problem, we might want to increase the log switching threshold when the number of level-0 files is large. Though the downside is that the larger this threshold, the more memory we will need to hold the corresponding memtable. Solution 2: We might want to decrease write rate artificially when the number of level-0 files goes up. Solution 3: We work on reducing the cost of very wide merges. Perhaps most of the level-0 files will have their blocks sitting uncompressed in the cache and we will only need to worry about the O(N) complexity in the merging iterator. ### Number of files Instead of always making 2MB files, we could make larger files for larger levels to reduce the total file count, though at the expense of more bursty compactions. Alternatively, we could shard the set of files into multiple directories. An experiment on an ext3 filesystem on Feb 04, 2011 shows the following timings to do 100K file opens in directories with varying number of files: | Files in directory | Microseconds to open a file | |-------------------:|----------------------------:| | 1000 | 9 | | 10000 | 10 | | 100000 | 16 | So maybe even the sharding is not necessary on modern filesystems? ## Recovery * Read CURRENT to find name of the latest committed MANIFEST * Read the named MANIFEST file * Clean up stale files * We could open all sstables here, but it is probably better to be lazy... * Convert log chunk to a new level-0 sstable * Start directing new writes to a new log file with recovered sequence# ## Garbage collection of files `DeleteObsoleteFiles()` is called at the end of every compaction and at the end of recovery. It finds the names of all files in the database. It deletes all log files that are not the current log file. It deletes all table files that are not referenced from some level and are not the output of an active compaction.
0
bitcoin/src/leveldb
bitcoin/src/leveldb/doc/benchmark.html
<!DOCTYPE html> <html> <head> <title>LevelDB Benchmarks</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <style> body { font-family:Helvetica,sans-serif; padding:20px; } h2 { padding-top:30px; } table.bn { width:800px; border-collapse:collapse; border:0; padding:0; } table.bnbase { width:650px; } table.bn td { padding:2px 0; } table.bn td.c1 { font-weight:bold; width:150px; } table.bn td.c1 div.e { float:right; font-weight:normal; } table.bn td.c2 { width:150px; text-align:right; padding:2px; } table.bn td.c3 { width:350px; } table.bn td.c4 { width:150px; font-size:small; padding-left:4px; } /* chart bars */ div.bldb { background-color:#0255df; } div.bkct { background-color:#df5555; } div.bsql { background-color:#aadf55; } .code { font-family:monospace; font-size:large; } .todo { color: red; } </style> </head> <body> <h1>LevelDB Benchmarks</h1> <p>Google, July 2011</p> <hr> <p>In order to test LevelDB's performance, we benchmark it against other well-established database implementations. We compare LevelDB (revision 39) against <a href="http://www.sqlite.org/">SQLite3</a> (version 3.7.6.3) and <a href="http://fallabs.com/kyotocabinet/spex.html">Kyoto Cabinet's</a> (version 1.2.67) TreeDB (a B+Tree based key-value store). We would like to acknowledge Scott Hess and Mikio Hirabayashi for their suggestions and contributions to the SQLite3 and Kyoto Cabinet benchmarks, respectively.</p> <p>Benchmarks were all performed on a six-core Intel(R) Xeon(R) CPU X5650 @ 2.67GHz, with 12288 KB of total L3 cache and 12 GB of DDR3 RAM at 1333 MHz. (Note that LevelDB uses at most two CPUs since the benchmarks are single threaded: one to run the benchmark, and one for background compactions.) We ran the benchmarks on two machines (with identical processors), one with an Ext3 file system and one with an Ext4 file system. The machine with the Ext3 file system has a SATA Hitachi HDS721050CLA362 hard drive. The machine with the Ext4 file system has a SATA Samsung HD502HJ hard drive. Both hard drives spin at 7200 RPM and have hard drive write-caching enabled (using `hdparm -W 1 [device]`). The numbers reported below are the median of three measurements.</p> <h4>Benchmark Source Code</h4> <p>We wrote benchmark tools for SQLite and Kyoto TreeDB based on LevelDB's <span class="code">db_bench</span>. The code for each of the benchmarks resides here:</p> <ul> <li> <b>LevelDB:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench.cc">benchmarks/db_bench.cc</a>.</li> <li> <b>SQLite:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench_sqlite3.cc">benchmarks/db_bench_sqlite3.cc</a>.</li> <li> <b>Kyoto TreeDB:</b> <a href="https://github.com/google/leveldb/blob/master/benchmarks/db_bench_tree_db.cc">benchmarks/db_bench_tree_db.cc</a>.</li> </ul> <h4>Custom Build Specifications</h4> <ul> <li>LevelDB: LevelDB was compiled with the <a href="http://code.google.com/p/google-perftools">tcmalloc</a> library and the <a href="http://code.google.com/p/snappy/">Snappy</a> compression library (revision 33). Assertions were disabled.</li> <li>TreeDB: TreeDB was compiled using the <a href="http://www.oberhumer.com/opensource/lzo/">LZO</a> compression library (version 2.03). Furthermore, we enabled the TSMALL and TLINEAR options when opening the database in order to reduce the footprint of each record.</li> <li>SQLite: We tuned SQLite's performance, by setting its locking mode to exclusive. We also enabled SQLite's <a href="http://www.sqlite.org/draft/wal.html">write-ahead logging</a>.</li> </ul> <h2>1. Baseline Performance</h2> <p>This section gives the baseline performance of all the databases. Following sections show how performance changes as various parameters are varied. For the baseline:</p> <ul> <li> Each database is allowed 4 MB of cache memory.</li> <li> Databases are opened in <em>asynchronous</em> write mode. (LevelDB's sync option, TreeDB's OAUTOSYNC option, and SQLite3's synchronous options are all turned off). I.e., every write is pushed to the operating system, but the benchmark does not wait for the write to reach the disk.</li> <li> Keys are 16 bytes each.</li> <li> Value are 100 bytes each (with enough redundancy so that a simple compressor shrinks them to 50% of their original size).</li> <li> Sequential reads/writes traverse the key space in increasing order.</li> <li> Random reads/writes traverse the key space in random order.</li> </ul> <h3>A. Sequential Reads</h3> <table class="bn bnbase"> <tr><td class="c1">LevelDB</td> <td class="c2">4,030,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">1,010,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:95px">&nbsp;</div></td> <tr><td class="c1">SQLite3</td> <td class="c2">383,000 ops/sec</td> <td class="c3"><div class="bsql" style="width:33px">&nbsp;</div></td> </table> <h3>B. Random Reads</h3> <table class="bn bnbase"> <tr><td class="c1">LevelDB</td> <td class="c2">129,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:298px">&nbsp;</div></td> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">151,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:350px">&nbsp;</div></td> <tr><td class="c1">SQLite3</td> <td class="c2">134,000 ops/sec</td> <td class="c3"><div class="bsql" style="width:310px">&nbsp;</div></td> </table> <h3>C. Sequential Writes</h3> <table class="bn bnbase"> <tr><td class="c1">LevelDB</td> <td class="c2">779,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">342,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:154px">&nbsp;</div></td> <tr><td class="c1">SQLite3</td> <td class="c2">48,600 ops/sec</td> <td class="c3"><div class="bsql" style="width:22px">&nbsp;</div></td> </table> <h3>D. Random Writes</h3> <table class="bn bnbase"> <tr><td class="c1">LevelDB</td> <td class="c2">164,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">88,500 ops/sec</td> <td class="c3"><div class="bkct" style="width:188px">&nbsp;</div></td> <tr><td class="c1">SQLite3</td> <td class="c2">9,860 ops/sec</td> <td class="c3"><div class="bsql" style="width:21px">&nbsp;</div></td> </table> <p>LevelDB outperforms both SQLite3 and TreeDB in sequential and random write operations and sequential read operations. Kyoto Cabinet has the fastest random read operations.</p> <h2>2. Write Performance under Different Configurations</h2> <h3>A. Large Values </h3> <p>For this benchmark, we start with an empty database, and write 100,000 byte values (~50% compressible). To keep the benchmark running time reasonable, we stop after writing 1000 values.</p> <h4>Sequential Writes</h4> <table class="bn bnbase"> <tr><td class="c1">LevelDB</td> <td class="c2">1,100 ops/sec</td> <td class="c3"><div class="bldb" style="width:234px">&nbsp;</div></td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">1,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:224px">&nbsp;</div></td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">1,600 ops/sec</td> <td class="c3"><div class="bsql" style="width:350px">&nbsp;</div></td></tr> </table> <h4>Random Writes</h4> <table class="bn bnbase"> <tr><td class="c1">LevelDB</td> <td class="c2">480 ops/sec</td> <td class="c3"><div class="bldb" style="width:105px">&nbsp;</div></td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">1,100 ops/sec</td> <td class="c3"><div class="bkct" style="width:240px">&nbsp;</div></td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">1,600 ops/sec</td> <td class="c3"><div class="bsql" style="width:350px">&nbsp;</div></td></tr> </table> <p>LevelDB doesn't perform as well with large values of 100,000 bytes each. This is because LevelDB writes keys and values at least twice: first time to the transaction log, and second time (during a compaction) to a sorted file. With larger values, LevelDB's per-operation efficiency is swamped by the cost of extra copies of large values.</p> <h3>B. Batch Writes</h3> <p>A batch write is a set of writes that are applied atomically to the underlying database. A single batch of N writes may be significantly faster than N individual writes. The following benchmark writes one thousand batches where each batch contains one thousand 100-byte values. TreeDB does not support batch writes and is omitted from this benchmark.</p> <h4>Sequential Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">840,000 entries/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(1.08x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">124,000 entries/sec</td> <td class="c3"><div class="bsql" style="width:52px">&nbsp;</div></td> <td class="c4">(2.55x baseline)</td></tr> </table> <h4>Random Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">221,000 entries/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(1.35x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">22,000 entries/sec</td> <td class="c3"><div class="bsql" style="width:34px">&nbsp;</div></td> <td class="c4">(2.23x baseline)</td></tr> </table> <p>Because of the way LevelDB persistent storage is organized, batches of random writes are not much slower (only a factor of 4x) than batches of sequential writes.</p> <h3>C. Synchronous Writes</h3> <p>In the following benchmark, we enable the synchronous writing modes of all of the databases. Since this change significantly slows down the benchmark, we stop after 10,000 writes. For synchronous write tests, we've disabled hard drive write-caching (using `hdparm -W 0 [device]`).</p> <ul> <li>For LevelDB, we set WriteOptions.sync = true.</li> <li>In TreeDB, we enabled TreeDB's OAUTOSYNC option.</li> <li>For SQLite3, we set "PRAGMA synchronous = FULL".</li> </ul> <h4>Sequential Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">100 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(0.003x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">7 ops/sec</td> <td class="c3"><div class="bkct" style="width:27px">&nbsp;</div></td> <td class="c4">(0.0004x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">88 ops/sec</td> <td class="c3"><div class="bsql" style="width:315px">&nbsp;</div></td> <td class="c4">(0.002x baseline)</td></tr> </table> <h4>Random Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">100 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(0.015x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">8 ops/sec</td> <td class="c3"><div class="bkct" style="width:29px">&nbsp;</div></td> <td class="c4">(0.001x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">88 ops/sec</td> <td class="c3"><div class="bsql" style="width:314px">&nbsp;</div></td> <td class="c4">(0.009x baseline)</td></tr> </table> <p>Also see the <code>ext4</code> performance numbers below since synchronous writes behave significantly differently on <code>ext3</code> and <code>ext4</code>.</p> <h3>D. Turning Compression Off</h3> <p>In the baseline measurements, LevelDB and TreeDB were using light-weight compression (<a href="http://code.google.com/p/snappy/">Snappy</a> for LevelDB, and <a href="http://www.oberhumer.com/opensource/lzo/">LZO</a> for TreeDB). SQLite3, by default does not use compression. The experiments below show what happens when compression is disabled in all of the databases (the SQLite3 numbers are just a copy of its baseline measurements):</p> <h4>Sequential Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">594,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(0.76x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">485,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:239px">&nbsp;</div></td> <td class="c4">(1.42x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">48,600 ops/sec</td> <td class="c3"><div class="bsql" style="width:29px">&nbsp;</div></td> <td class="c4">(1.00x baseline)</td></tr> </table> <h4>Random Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">135,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:296px">&nbsp;</div></td> <td class="c4">(0.82x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">159,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:350px">&nbsp;</div></td> <td class="c4">(1.80x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">9,860 ops/sec</td> <td class="c3"><div class="bsql" style="width:22px">&nbsp;</div></td> <td class="c4">(1.00x baseline)</td></tr> </table> <p>LevelDB's write performance is better with compression than without since compression decreases the amount of data that has to be written to disk. Therefore LevelDB users can leave compression enabled in most scenarios without having worry about a tradeoff between space usage and performance. TreeDB's performance on the other hand is better without compression than with compression. Presumably this is because TreeDB's compression library (LZO) is more expensive than LevelDB's compression library (Snappy).<p> <h3>E. Using More Memory</h3> <p>We increased the overall cache size for each database to 128 MB. For LevelDB, we partitioned 128 MB into a 120 MB write buffer and 8 MB of cache (up from 2 MB of write buffer and 2 MB of cache). For SQLite3, we kept the page size at 1024 bytes, but increased the number of pages to 131,072 (up from 4096). For TreeDB, we also kept the page size at 1024 bytes, but increased the cache size to 128 MB (up from 4 MB).</p> <h4>Sequential Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">812,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(1.04x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">321,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:138px">&nbsp;</div></td> <td class="c4">(0.94x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">48,500 ops/sec</td> <td class="c3"><div class="bsql" style="width:21px">&nbsp;</div></td> <td class="c4">(1.00x baseline)</td></tr> </table> <h4>Random Writes</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">355,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(2.16x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">284,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:280px">&nbsp;</div></td> <td class="c4">(3.21x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">9,670 ops/sec</td> <td class="c3"><div class="bsql" style="width:10px">&nbsp;</div></td> <td class="c4">(0.98x baseline)</td></tr> </table> <p>SQLite's performance does not change substantially when compared to the baseline, but the random write performance for both LevelDB and TreeDB increases significantly. LevelDB's performance improves because a larger write buffer reduces the need to merge sorted files (since it creates a smaller number of larger sorted files). TreeDB's performance goes up because the entire database is available in memory for fast in-place updates.</p> <h2>3. Read Performance under Different Configurations</h2> <h3>A. Larger Caches</h3> <p>We increased the overall memory usage to 128 MB for each database. For LevelDB, we allocated 8 MB to LevelDB's write buffer and 120 MB to LevelDB's cache. The other databases don't differentiate between a write buffer and a cache, so we simply set their cache size to 128 MB.</p> <h4>Sequential Reads</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">5,210,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(1.29x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">1,070,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:72px">&nbsp;</div></td> <td class="c4">(1.06x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">609,000 ops/sec</td> <td class="c3"><div class="bsql" style="width:41px">&nbsp;</div></td> <td class="c4">(1.59x baseline)</td></tr> </table> <h4>Random Reads</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">190,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:144px">&nbsp;</div></td> <td class="c4">(1.47x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">463,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:350px">&nbsp;</div></td> <td class="c4">(3.07x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">186,000 ops/sec</td> <td class="c3"><div class="bsql" style="width:141px">&nbsp;</div></td> <td class="c4">(1.39x baseline)</td></tr> </table> <p>As expected, the read performance of all of the databases increases when the caches are enlarged. In particular, TreeDB seems to make very effective use of a cache that is large enough to hold the entire database.</p> <h3>B. No Compression Reads </h3> <p>For this benchmark, we populated a database with 1 million entries consisting of 16 byte keys and 100 byte values. We compiled LevelDB and Kyoto Cabinet without compression support, so results that are read out from the database are already uncompressed. We've listed the SQLite3 baseline read performance as a point of comparison.</p> <h4>Sequential Reads</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">4,880,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:350px">&nbsp;</div></td> <td class="c4">(1.21x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">1,230,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:88px">&nbsp;</div></td> <td class="c4">(3.60x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">383,000 ops/sec</td> <td class="c3"><div class="bsql" style="width:27px">&nbsp;</div></td> <td class="c4">(1.00x baseline)</td></tr> </table> <h4>Random Reads</h4> <table class="bn"> <tr><td class="c1">LevelDB</td> <td class="c2">149,000 ops/sec</td> <td class="c3"><div class="bldb" style="width:300px">&nbsp;</div></td> <td class="c4">(1.16x baseline)</td></tr> <tr><td class="c1">Kyoto TreeDB</td> <td class="c2">175,000 ops/sec</td> <td class="c3"><div class="bkct" style="width:350px">&nbsp;</div></td> <td class="c4">(1.16x baseline)</td></tr> <tr><td class="c1">SQLite3</td> <td class="c2">134,000 ops/sec</td> <td class="c3"><div class="bsql" style="width:268px">&nbsp;</div></td> <td class="c4">(1.00x baseline)</td></tr> </table> <p>Performance of both LevelDB and TreeDB improves a small amount when compression is disabled. Note however that under different workloads, performance may very well be better with compression if it allows more of the working set to fit in memory.</p> <h2>Note about Ext4 Filesystems</h2> <p>The preceding numbers are for an ext3 file system. Synchronous writes are much slower under <a href="http://en.wikipedia.org/wiki/Ext4">ext4</a> (LevelDB drops to ~31 writes / second and TreeDB drops to ~5 writes / second; SQLite3's synchronous writes do not noticeably drop) due to ext4's different handling of <span class="code">fsync</span> / <span class="code">msync</span> calls. Even LevelDB's asynchronous write performance drops somewhat since it spreads its storage across multiple files and issues <span class="code">fsync</span> calls when switching to a new file.</p> <h2>Acknowledgements</h2> <p>Jeff Dean and Sanjay Ghemawat wrote LevelDB. Kevin Tseng wrote and compiled these benchmarks. Mikio Hirabayashi, Scott Hess, and Gabor Cselle provided help and advice.</p> </body> </html>
0
bitcoin/src/leveldb
bitcoin/src/leveldb/doc/index.md
leveldb ======= _Jeff Dean, Sanjay Ghemawat_ The leveldb library provides a persistent key value store. Keys and values are arbitrary byte arrays. The keys are ordered within the key value store according to a user-specified comparator function. ## Opening A Database A leveldb database has a name which corresponds to a file system directory. All of the contents of database are stored in this directory. The following example shows how to open a database, creating it if necessary: ```c++ #include <cassert> #include "leveldb/db.h" leveldb::DB* db; leveldb::Options options; options.create_if_missing = true; leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db); assert(status.ok()); ... ``` If you want to raise an error if the database already exists, add the following line before the `leveldb::DB::Open` call: ```c++ options.error_if_exists = true; ``` ## Status You may have noticed the `leveldb::Status` type above. Values of this type are returned by most functions in leveldb that may encounter an error. You can check if such a result is ok, and also print an associated error message: ```c++ leveldb::Status s = ...; if (!s.ok()) cerr << s.ToString() << endl; ``` ## Closing A Database When you are done with a database, just delete the database object. Example: ```c++ ... open the db as described above ... ... do something with db ... delete db; ``` ## Reads And Writes The database provides Put, Delete, and Get methods to modify/query the database. For example, the following code moves the value stored under key1 to key2. ```c++ std::string value; leveldb::Status s = db->Get(leveldb::ReadOptions(), key1, &value); if (s.ok()) s = db->Put(leveldb::WriteOptions(), key2, value); if (s.ok()) s = db->Delete(leveldb::WriteOptions(), key1); ``` ## Atomic Updates Note that if the process dies after the Put of key2 but before the delete of key1, the same value may be left stored under multiple keys. Such problems can be avoided by using the `WriteBatch` class to atomically apply a set of updates: ```c++ #include "leveldb/write_batch.h" ... std::string value; leveldb::Status s = db->Get(leveldb::ReadOptions(), key1, &value); if (s.ok()) { leveldb::WriteBatch batch; batch.Delete(key1); batch.Put(key2, value); s = db->Write(leveldb::WriteOptions(), &batch); } ``` The `WriteBatch` holds a sequence of edits to be made to the database, and these edits within the batch are applied in order. Note that we called Delete before Put so that if key1 is identical to key2, we do not end up erroneously dropping the value entirely. Apart from its atomicity benefits, `WriteBatch` may also be used to speed up bulk updates by placing lots of individual mutations into the same batch. ## Synchronous Writes By default, each write to leveldb is asynchronous: it returns after pushing the write from the process into the operating system. The transfer from operating system memory to the underlying persistent storage happens asynchronously. The sync flag can be turned on for a particular write to make the write operation not return until the data being written has been pushed all the way to persistent storage. (On Posix systems, this is implemented by calling either `fsync(...)` or `fdatasync(...)` or `msync(..., MS_SYNC)` before the write operation returns.) ```c++ leveldb::WriteOptions write_options; write_options.sync = true; db->Put(write_options, ...); ``` Asynchronous writes are often more than a thousand times as fast as synchronous writes. The downside of asynchronous writes is that a crash of the machine may cause the last few updates to be lost. Note that a crash of just the writing process (i.e., not a reboot) will not cause any loss since even when sync is false, an update is pushed from the process memory into the operating system before it is considered done. Asynchronous writes can often be used safely. For example, when loading a large amount of data into the database you can handle lost updates by restarting the bulk load after a crash. A hybrid scheme is also possible where every Nth write is synchronous, and in the event of a crash, the bulk load is restarted just after the last synchronous write finished by the previous run. (The synchronous write can update a marker that describes where to restart on a crash.) `WriteBatch` provides an alternative to asynchronous writes. Multiple updates may be placed in the same WriteBatch and applied together using a synchronous write (i.e., `write_options.sync` is set to true). The extra cost of the synchronous write will be amortized across all of the writes in the batch. ## Concurrency A database may only be opened by one process at a time. The leveldb implementation acquires a lock from the operating system to prevent misuse. Within a single process, the same `leveldb::DB` object may be safely shared by multiple concurrent threads. I.e., different threads may write into or fetch iterators or call Get on the same database without any external synchronization (the leveldb implementation will automatically do the required synchronization). However other objects (like Iterator and `WriteBatch`) may require external synchronization. If two threads share such an object, they must protect access to it using their own locking protocol. More details are available in the public header files. ## Iteration The following example demonstrates how to print all key,value pairs in a database. ```c++ leveldb::Iterator* it = db->NewIterator(leveldb::ReadOptions()); for (it->SeekToFirst(); it->Valid(); it->Next()) { cout << it->key().ToString() << ": " << it->value().ToString() << endl; } assert(it->status().ok()); // Check for any errors found during the scan delete it; ``` The following variation shows how to process just the keys in the range [start,limit): ```c++ for (it->Seek(start); it->Valid() && it->key().ToString() < limit; it->Next()) { ... } ``` You can also process entries in reverse order. (Caveat: reverse iteration may be somewhat slower than forward iteration.) ```c++ for (it->SeekToLast(); it->Valid(); it->Prev()) { ... } ``` ## Snapshots Snapshots provide consistent read-only views over the entire state of the key-value store. `ReadOptions::snapshot` may be non-NULL to indicate that a read should operate on a particular version of the DB state. If `ReadOptions::snapshot` is NULL, the read will operate on an implicit snapshot of the current state. Snapshots are created by the `DB::GetSnapshot()` method: ```c++ leveldb::ReadOptions options; options.snapshot = db->GetSnapshot(); ... apply some updates to db ... leveldb::Iterator* iter = db->NewIterator(options); ... read using iter to view the state when the snapshot was created ... delete iter; db->ReleaseSnapshot(options.snapshot); ``` Note that when a snapshot is no longer needed, it should be released using the `DB::ReleaseSnapshot` interface. This allows the implementation to get rid of state that was being maintained just to support reading as of that snapshot. ## Slice The return value of the `it->key()` and `it->value()` calls above are instances of the `leveldb::Slice` type. Slice is a simple structure that contains a length and a pointer to an external byte array. Returning a Slice is a cheaper alternative to returning a `std::string` since we do not need to copy potentially large keys and values. In addition, leveldb methods do not return null-terminated C-style strings since leveldb keys and values are allowed to contain `'\0'` bytes. C++ strings and null-terminated C-style strings can be easily converted to a Slice: ```c++ leveldb::Slice s1 = "hello"; std::string str("world"); leveldb::Slice s2 = str; ``` A Slice can be easily converted back to a C++ string: ```c++ std::string str = s1.ToString(); assert(str == std::string("hello")); ``` Be careful when using Slices since it is up to the caller to ensure that the external byte array into which the Slice points remains live while the Slice is in use. For example, the following is buggy: ```c++ leveldb::Slice slice; if (...) { std::string str = ...; slice = str; } Use(slice); ``` When the if statement goes out of scope, str will be destroyed and the backing storage for slice will disappear. ## Comparators The preceding examples used the default ordering function for key, which orders bytes lexicographically. You can however supply a custom comparator when opening a database. For example, suppose each database key consists of two numbers and we should sort by the first number, breaking ties by the second number. First, define a proper subclass of `leveldb::Comparator` that expresses these rules: ```c++ class TwoPartComparator : public leveldb::Comparator { public: // Three-way comparison function: // if a < b: negative result // if a > b: positive result // else: zero result int Compare(const leveldb::Slice& a, const leveldb::Slice& b) const { int a1, a2, b1, b2; ParseKey(a, &a1, &a2); ParseKey(b, &b1, &b2); if (a1 < b1) return -1; if (a1 > b1) return +1; if (a2 < b2) return -1; if (a2 > b2) return +1; return 0; } // Ignore the following methods for now: const char* Name() const { return "TwoPartComparator"; } void FindShortestSeparator(std::string*, const leveldb::Slice&) const {} void FindShortSuccessor(std::string*) const {} }; ``` Now create a database using this custom comparator: ```c++ TwoPartComparator cmp; leveldb::DB* db; leveldb::Options options; options.create_if_missing = true; options.comparator = &cmp; leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db); ... ``` ### Backwards compatibility The result of the comparator's Name method is attached to the database when it is created, and is checked on every subsequent database open. If the name changes, the `leveldb::DB::Open` call will fail. Therefore, change the name if and only if the new key format and comparison function are incompatible with existing databases, and it is ok to discard the contents of all existing databases. You can however still gradually evolve your key format over time with a little bit of pre-planning. For example, you could store a version number at the end of each key (one byte should suffice for most uses). When you wish to switch to a new key format (e.g., adding an optional third part to the keys processed by `TwoPartComparator`), (a) keep the same comparator name (b) increment the version number for new keys (c) change the comparator function so it uses the version numbers found in the keys to decide how to interpret them. ## Performance Performance can be tuned by changing the default values of the types defined in `include/options.h`. ### Block size leveldb groups adjacent keys together into the same block and such a block is the unit of transfer to and from persistent storage. The default block size is approximately 4096 uncompressed bytes. Applications that mostly do bulk scans over the contents of the database may wish to increase this size. Applications that do a lot of point reads of small values may wish to switch to a smaller block size if performance measurements indicate an improvement. There isn't much benefit in using blocks smaller than one kilobyte, or larger than a few megabytes. Also note that compression will be more effective with larger block sizes. ### Compression Each block is individually compressed before being written to persistent storage. Compression is on by default since the default compression method is very fast, and is automatically disabled for uncompressible data. In rare cases, applications may want to disable compression entirely, but should only do so if benchmarks show a performance improvement: ```c++ leveldb::Options options; options.compression = leveldb::kNoCompression; ... leveldb::DB::Open(options, name, ...) .... ``` ### Cache The contents of the database are stored in a set of files in the filesystem and each file stores a sequence of compressed blocks. If options.block_cache is non-NULL, it is used to cache frequently used uncompressed block contents. ```c++ #include "leveldb/cache.h" leveldb::Options options; options.block_cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache leveldb::DB* db; leveldb::DB::Open(options, name, &db); ... use the db ... delete db delete options.block_cache; ``` Note that the cache holds uncompressed data, and therefore it should be sized according to application level data sizes, without any reduction from compression. (Caching of compressed blocks is left to the operating system buffer cache, or any custom Env implementation provided by the client.) When performing a bulk read, the application may wish to disable caching so that the data processed by the bulk read does not end up displacing most of the cached contents. A per-iterator option can be used to achieve this: ```c++ leveldb::ReadOptions options; options.fill_cache = false; leveldb::Iterator* it = db->NewIterator(options); for (it->SeekToFirst(); it->Valid(); it->Next()) { ... } ``` ### Key Layout Note that the unit of disk transfer and caching is a block. Adjacent keys (according to the database sort order) will usually be placed in the same block. Therefore the application can improve its performance by placing keys that are accessed together near each other and placing infrequently used keys in a separate region of the key space. For example, suppose we are implementing a simple file system on top of leveldb. The types of entries we might wish to store are: filename -> permission-bits, length, list of file_block_ids file_block_id -> data We might want to prefix filename keys with one letter (say '/') and the `file_block_id` keys with a different letter (say '0') so that scans over just the metadata do not force us to fetch and cache bulky file contents. ### Filters Because of the way leveldb data is organized on disk, a single `Get()` call may involve multiple reads from disk. The optional FilterPolicy mechanism can be used to reduce the number of disk reads substantially. ```c++ leveldb::Options options; options.filter_policy = NewBloomFilterPolicy(10); leveldb::DB* db; leveldb::DB::Open(options, "/tmp/testdb", &db); ... use the database ... delete db; delete options.filter_policy; ``` The preceding code associates a Bloom filter based filtering policy with the database. Bloom filter based filtering relies on keeping some number of bits of data in memory per key (in this case 10 bits per key since that is the argument we passed to `NewBloomFilterPolicy`). This filter will reduce the number of unnecessary disk reads needed for Get() calls by a factor of approximately a 100. Increasing the bits per key will lead to a larger reduction at the cost of more memory usage. We recommend that applications whose working set does not fit in memory and that do a lot of random reads set a filter policy. If you are using a custom comparator, you should ensure that the filter policy you are using is compatible with your comparator. For example, consider a comparator that ignores trailing spaces when comparing keys. `NewBloomFilterPolicy` must not be used with such a comparator. Instead, the application should provide a custom filter policy that also ignores trailing spaces. For example: ```c++ class CustomFilterPolicy : public leveldb::FilterPolicy { private: FilterPolicy* builtin_policy_; public: CustomFilterPolicy() : builtin_policy_(NewBloomFilterPolicy(10)) {} ~CustomFilterPolicy() { delete builtin_policy_; } const char* Name() const { return "IgnoreTrailingSpacesFilter"; } void CreateFilter(const Slice* keys, int n, std::string* dst) const { // Use builtin bloom filter code after removing trailing spaces std::vector<Slice> trimmed(n); for (int i = 0; i < n; i++) { trimmed[i] = RemoveTrailingSpaces(keys[i]); } return builtin_policy_->CreateFilter(&trimmed[i], n, dst); } }; ``` Advanced applications may provide a filter policy that does not use a bloom filter but uses some other mechanism for summarizing a set of keys. See `leveldb/filter_policy.h` for detail. ## Checksums leveldb associates checksums with all data it stores in the file system. There are two separate controls provided over how aggressively these checksums are verified: `ReadOptions::verify_checksums` may be set to true to force checksum verification of all data that is read from the file system on behalf of a particular read. By default, no such verification is done. `Options::paranoid_checks` may be set to true before opening a database to make the database implementation raise an error as soon as it detects an internal corruption. Depending on which portion of the database has been corrupted, the error may be raised when the database is opened, or later by another database operation. By default, paranoid checking is off so that the database can be used even if parts of its persistent storage have been corrupted. If a database is corrupted (perhaps it cannot be opened when paranoid checking is turned on), the `leveldb::RepairDB` function may be used to recover as much of the data as possible ## Approximate Sizes The `GetApproximateSizes` method can used to get the approximate number of bytes of file system space used by one or more key ranges. ```c++ leveldb::Range ranges[2]; ranges[0] = leveldb::Range("a", "c"); ranges[1] = leveldb::Range("x", "z"); uint64_t sizes[2]; leveldb::Status s = db->GetApproximateSizes(ranges, 2, sizes); ``` The preceding call will set `sizes[0]` to the approximate number of bytes of file system space used by the key range `[a..c)` and `sizes[1]` to the approximate number of bytes used by the key range `[x..z)`. ## Environment All file operations (and other operating system calls) issued by the leveldb implementation are routed through a `leveldb::Env` object. Sophisticated clients may wish to provide their own Env implementation to get better control. For example, an application may introduce artificial delays in the file IO paths to limit the impact of leveldb on other activities in the system. ```c++ class SlowEnv : public leveldb::Env { ... implementation of the Env interface ... }; SlowEnv env; leveldb::Options options; options.env = &env; Status s = leveldb::DB::Open(options, ...); ``` ## Porting leveldb may be ported to a new platform by providing platform specific implementations of the types/methods/functions exported by `leveldb/port/port.h`. See `leveldb/port/port_example.h` for more details. In addition, the new platform may need a new default `leveldb::Env` implementation. See `leveldb/util/env_posix.h` for an example. ## Other Information Details about the leveldb implementation may be found in the following documents: 1. [Implementation notes](impl.md) 2. [Format of an immutable Table file](table_format.md) 3. [Format of a log file](log_format.md)
0
bitcoin/src/leveldb
bitcoin/src/leveldb/doc/table_format.md
leveldb File format =================== <beginning_of_file> [data block 1] [data block 2] ... [data block N] [meta block 1] ... [meta block K] [metaindex block] [index block] [Footer] (fixed size; starts at file_size - sizeof(Footer)) <end_of_file> The file contains internal pointers. Each such pointer is called a BlockHandle and contains the following information: offset: varint64 size: varint64 See [varints](https://developers.google.com/protocol-buffers/docs/encoding#varints) for an explanation of varint64 format. 1. The sequence of key/value pairs in the file are stored in sorted order and partitioned into a sequence of data blocks. These blocks come one after another at the beginning of the file. Each data block is formatted according to the code in `block_builder.cc`, and then optionally compressed. 2. After the data blocks we store a bunch of meta blocks. The supported meta block types are described below. More meta block types may be added in the future. Each meta block is again formatted using `block_builder.cc` and then optionally compressed. 3. A "metaindex" block. It contains one entry for every other meta block where the key is the name of the meta block and the value is a BlockHandle pointing to that meta block. 4. An "index" block. This block contains one entry per data block, where the key is a string >= last key in that data block and before the first key in the successive data block. The value is the BlockHandle for the data block. 5. At the very end of the file is a fixed length footer that contains the BlockHandle of the metaindex and index blocks as well as a magic number. metaindex_handle: char[p]; // Block handle for metaindex index_handle: char[q]; // Block handle for index padding: char[40-p-q];// zeroed bytes to make fixed length // (40==2*BlockHandle::kMaxEncodedLength) magic: fixed64; // == 0xdb4775248b80fb57 (little-endian) ## "filter" Meta Block If a `FilterPolicy` was specified when the database was opened, a filter block is stored in each table. The "metaindex" block contains an entry that maps from `filter.<N>` to the BlockHandle for the filter block where `<N>` is the string returned by the filter policy's `Name()` method. The filter block stores a sequence of filters, where filter i contains the output of `FilterPolicy::CreateFilter()` on all keys that are stored in a block whose file offset falls within the range [ i*base ... (i+1)*base-1 ] Currently, "base" is 2KB. So for example, if blocks X and Y start in the range `[ 0KB .. 2KB-1 ]`, all of the keys in X and Y will be converted to a filter by calling `FilterPolicy::CreateFilter()`, and the resulting filter will be stored as the first filter in the filter block. The filter block is formatted as follows: [filter 0] [filter 1] [filter 2] ... [filter N-1] [offset of filter 0] : 4 bytes [offset of filter 1] : 4 bytes [offset of filter 2] : 4 bytes ... [offset of filter N-1] : 4 bytes [offset of beginning of offset array] : 4 bytes lg(base) : 1 byte The offset array at the end of the filter block allows efficient mapping from a data block offset to the corresponding filter. ## "stats" Meta Block This meta block contains a bunch of stats. The key is the name of the statistic. The value contains the statistic. TODO(postrelease): record following stats. data size index size key size (uncompressed) value size (uncompressed) number of entries number of data blocks
0
bitcoin/src/leveldb
bitcoin/src/leveldb/doc/log_format.md
leveldb Log format ================== The log file contents are a sequence of 32KB blocks. The only exception is that the tail of the file may contain a partial block. Each block consists of a sequence of records: block := record* trailer? record := checksum: uint32 // crc32c of type and data[] ; little-endian length: uint16 // little-endian type: uint8 // One of FULL, FIRST, MIDDLE, LAST data: uint8[length] A record never starts within the last six bytes of a block (since it won't fit). Any leftover bytes here form the trailer, which must consist entirely of zero bytes and must be skipped by readers. Aside: if exactly seven bytes are left in the current block, and a new non-zero length record is added, the writer must emit a FIRST record (which contains zero bytes of user data) to fill up the trailing seven bytes of the block and then emit all of the user data in subsequent blocks. More types may be added in the future. Some Readers may skip record types they do not understand, others may report that some data was skipped. FULL == 1 FIRST == 2 MIDDLE == 3 LAST == 4 The FULL record contains the contents of an entire user record. FIRST, MIDDLE, LAST are types used for user records that have been split into multiple fragments (typically because of block boundaries). FIRST is the type of the first fragment of a user record, LAST is the type of the last fragment of a user record, and MIDDLE is the type of all interior fragments of a user record. Example: consider a sequence of user records: A: length 1000 B: length 97270 C: length 8000 **A** will be stored as a FULL record in the first block. **B** will be split into three fragments: first fragment occupies the rest of the first block, second fragment occupies the entirety of the second block, and the third fragment occupies a prefix of the third block. This will leave six bytes free in the third block, which will be left empty as the trailer. **C** will be stored as a FULL record in the fourth block. ---- ## Some benefits over the recordio format: 1. We do not need any heuristics for resyncing - just go to next block boundary and scan. If there is a corruption, skip to the next block. As a side-benefit, we do not get confused when part of the contents of one log file are embedded as a record inside another log file. 2. Splitting at approximate boundaries (e.g., for mapreduce) is simple: find the next block boundary and skip records until we hit a FULL or FIRST record. 3. We do not need extra buffering for large records. ## Some downsides compared to recordio format: 1. No packing of tiny records. This could be fixed by adding a new record type, so it is a shortcoming of the current implementation, not necessarily the format. 2. No compression. Again, this could be fixed by adding new record types.
0
bitcoin/src/leveldb
bitcoin/src/leveldb/port/port.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_PORT_PORT_H_ #define STORAGE_LEVELDB_PORT_PORT_H_ #include <string.h> // Include the appropriate platform specific file below. If you are // porting to a new platform, see "port_example.h" for documentation // of what the new port_<platform>.h file must provide. #if defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_WINDOWS) #include "port/port_stdcxx.h" #elif defined(LEVELDB_PLATFORM_CHROMIUM) #include "port/port_chromium.h" #endif #endif // STORAGE_LEVELDB_PORT_PORT_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/port/port_config.h.in
// Copyright 2017 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ #define STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ // Define to 1 if you have a definition for fdatasync() in <unistd.h>. #if !defined(HAVE_FDATASYNC) #cmakedefine01 HAVE_FDATASYNC #endif // !defined(HAVE_FDATASYNC) // Define to 1 if you have a definition for F_FULLFSYNC in <fcntl.h>. #if !defined(HAVE_FULLFSYNC) #cmakedefine01 HAVE_FULLFSYNC #endif // !defined(HAVE_FULLFSYNC) // Define to 1 if you have a definition for O_CLOEXEC in <fcntl.h>. #if !defined(HAVE_O_CLOEXEC) #cmakedefine01 HAVE_O_CLOEXEC #endif // !defined(HAVE_O_CLOEXEC) // Define to 1 if you have Google CRC32C. #if !defined(HAVE_CRC32C) #cmakedefine01 HAVE_CRC32C #endif // !defined(HAVE_CRC32C) // Define to 1 if you have Google Snappy. #if !defined(HAVE_SNAPPY) #cmakedefine01 HAVE_SNAPPY #endif // !defined(HAVE_SNAPPY) // Define to 1 if your processor stores words with the most significant byte // first (like Motorola and SPARC, unlike Intel and VAX). #if !defined(LEVELDB_IS_BIG_ENDIAN) #cmakedefine01 LEVELDB_IS_BIG_ENDIAN #endif // !defined(LEVELDB_IS_BIG_ENDIAN) #endif // STORAGE_LEVELDB_PORT_PORT_CONFIG_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/port/README.md
This directory contains interfaces and implementations that isolate the rest of the package from platform details. Code in the rest of the package includes "port.h" from this directory. "port.h" in turn includes a platform specific "port_<platform>.h" file that provides the platform specific implementation. See port_stdcxx.h for an example of what must be provided in a platform specific header file.
0
bitcoin/src/leveldb
bitcoin/src/leveldb/port/port_example.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // // This file contains the specification, but not the implementations, // of the types/operations/etc. that should be defined by a platform // specific port_<platform>.h file. Use this file as a reference for // how to port this package to a new platform. #ifndef STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_ #define STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_ #include "port/thread_annotations.h" namespace leveldb { namespace port { // TODO(jorlow): Many of these belong more in the environment class rather than // here. We should try moving them and see if it affects perf. // The following boolean constant must be true on a little-endian machine // and false otherwise. static const bool kLittleEndian = true /* or some other expression */; // ------------------ Threading ------------------- // A Mutex represents an exclusive lock. class LOCKABLE Mutex { public: Mutex(); ~Mutex(); // Lock the mutex. Waits until other lockers have exited. // Will deadlock if the mutex is already locked by this thread. void Lock() EXCLUSIVE_LOCK_FUNCTION(); // Unlock the mutex. // REQUIRES: This mutex was locked by this thread. void Unlock() UNLOCK_FUNCTION(); // Optionally crash if this thread does not hold this mutex. // The implementation must be fast, especially if NDEBUG is // defined. The implementation is allowed to skip all checks. void AssertHeld() ASSERT_EXCLUSIVE_LOCK(); }; class CondVar { public: explicit CondVar(Mutex* mu); ~CondVar(); // Atomically release *mu and block on this condition variable until // either a call to SignalAll(), or a call to Signal() that picks // this thread to wakeup. // REQUIRES: this thread holds *mu void Wait(); // If there are some threads waiting, wake up at least one of them. void Signal(); // Wake up all waiting threads. void SignallAll(); }; // ------------------ Compression ------------------- // Store the snappy compression of "input[0,input_length-1]" in *output. // Returns false if snappy is not supported by this port. bool Snappy_Compress(const char* input, size_t input_length, std::string* output); // If input[0,input_length-1] looks like a valid snappy compressed // buffer, store the size of the uncompressed data in *result and // return true. Else return false. bool Snappy_GetUncompressedLength(const char* input, size_t length, size_t* result); // Attempt to snappy uncompress input[0,input_length-1] into *output. // Returns true if successful, false if the input is invalid lightweight // compressed data. // // REQUIRES: at least the first "n" bytes of output[] must be writable // where "n" is the result of a successful call to // Snappy_GetUncompressedLength. bool Snappy_Uncompress(const char* input_data, size_t input_length, char* output); // ------------------ Miscellaneous ------------------- // If heap profiling is not supported, returns false. // Else repeatedly calls (*func)(arg, data, n) and then returns true. // The concatenation of all "data[0,n-1]" fragments is the heap profile. bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg); // Extend the CRC to include the first n bytes of buf. // // Returns zero if the CRC cannot be extended using acceleration, else returns // the newly extended CRC value (which may also be zero). uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size); } // namespace port } // namespace leveldb #endif // STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/port/thread_annotations.h
// Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_ #define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_ // Use Clang's thread safety analysis annotations when available. In other // environments, the macros receive empty definitions. // Usage documentation: https://clang.llvm.org/docs/ThreadSafetyAnalysis.html #if !defined(THREAD_ANNOTATION_ATTRIBUTE__) #if defined(__clang__) #define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) #else #define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op #endif #endif // !defined(THREAD_ANNOTATION_ATTRIBUTE__) #ifndef GUARDED_BY #define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) #endif #ifndef PT_GUARDED_BY #define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) #endif #ifndef ACQUIRED_AFTER #define ACQUIRED_AFTER(...) \ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) #endif #ifndef ACQUIRED_BEFORE #define ACQUIRED_BEFORE(...) \ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) #endif #ifndef EXCLUSIVE_LOCKS_REQUIRED #define EXCLUSIVE_LOCKS_REQUIRED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) #endif #ifndef SHARED_LOCKS_REQUIRED #define SHARED_LOCKS_REQUIRED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) #endif #ifndef LOCKS_EXCLUDED #define LOCKS_EXCLUDED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) #endif #ifndef LOCK_RETURNED #define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) #endif #ifndef LOCKABLE #define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable) #endif #ifndef SCOPED_LOCKABLE #define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) #endif #ifndef EXCLUSIVE_LOCK_FUNCTION #define EXCLUSIVE_LOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) #endif #ifndef SHARED_LOCK_FUNCTION #define SHARED_LOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) #endif #ifndef EXCLUSIVE_TRYLOCK_FUNCTION #define EXCLUSIVE_TRYLOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__)) #endif #ifndef SHARED_TRYLOCK_FUNCTION #define SHARED_TRYLOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) #endif #ifndef UNLOCK_FUNCTION #define UNLOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) #endif #ifndef NO_THREAD_SAFETY_ANALYSIS #define NO_THREAD_SAFETY_ANALYSIS \ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) #endif #ifndef ASSERT_EXCLUSIVE_LOCK #define ASSERT_EXCLUSIVE_LOCK(...) \ THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__)) #endif #ifndef ASSERT_SHARED_LOCK #define ASSERT_SHARED_LOCK(...) \ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__)) #endif #endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/port/port_stdcxx.h
// Copyright (c) 2018 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_PORT_PORT_STDCXX_H_ #define STORAGE_LEVELDB_PORT_PORT_STDCXX_H_ // port/port_config.h availability is automatically detected via __has_include // in newer compilers. If LEVELDB_HAS_PORT_CONFIG_H is defined, it overrides the // configuration detection. #if defined(LEVELDB_HAS_PORT_CONFIG_H) #if LEVELDB_HAS_PORT_CONFIG_H #include "port/port_config.h" #endif // LEVELDB_HAS_PORT_CONFIG_H #elif defined(__has_include) #if __has_include("port/port_config.h") #include "port/port_config.h" #endif // __has_include("port/port_config.h") #endif // defined(LEVELDB_HAS_PORT_CONFIG_H) #if HAVE_CRC32C #include <crc32c/crc32c.h> #endif // HAVE_CRC32C #if HAVE_SNAPPY #include <snappy.h> #endif // HAVE_SNAPPY #include <cassert> #include <condition_variable> // NOLINT #include <cstddef> #include <cstdint> #include <mutex> // NOLINT #include <string> #include "port/thread_annotations.h" namespace leveldb { namespace port { static const bool kLittleEndian = !LEVELDB_IS_BIG_ENDIAN; class CondVar; // Thinly wraps std::mutex. class LOCKABLE Mutex { public: Mutex() = default; ~Mutex() = default; Mutex(const Mutex&) = delete; Mutex& operator=(const Mutex&) = delete; void Lock() EXCLUSIVE_LOCK_FUNCTION() { mu_.lock(); } void Unlock() UNLOCK_FUNCTION() { mu_.unlock(); } void AssertHeld() ASSERT_EXCLUSIVE_LOCK() {} private: friend class CondVar; std::mutex mu_; }; // Thinly wraps std::condition_variable. class CondVar { public: explicit CondVar(Mutex* mu) : mu_(mu) { assert(mu != nullptr); } ~CondVar() = default; CondVar(const CondVar&) = delete; CondVar& operator=(const CondVar&) = delete; void Wait() { std::unique_lock<std::mutex> lock(mu_->mu_, std::adopt_lock); cv_.wait(lock); lock.release(); } void Signal() { cv_.notify_one(); } void SignalAll() { cv_.notify_all(); } private: std::condition_variable cv_; Mutex* const mu_; }; inline bool Snappy_Compress(const char* input, size_t length, std::string* output) { #if HAVE_SNAPPY output->resize(snappy::MaxCompressedLength(length)); size_t outlen; snappy::RawCompress(input, length, &(*output)[0], &outlen); output->resize(outlen); return true; #else // Silence compiler warnings about unused arguments. (void)input; (void)length; (void)output; #endif // HAVE_SNAPPY return false; } inline bool Snappy_GetUncompressedLength(const char* input, size_t length, size_t* result) { #if HAVE_SNAPPY return snappy::GetUncompressedLength(input, length, result); #else // Silence compiler warnings about unused arguments. (void)input; (void)length; (void)result; return false; #endif // HAVE_SNAPPY } inline bool Snappy_Uncompress(const char* input, size_t length, char* output) { #if HAVE_SNAPPY return snappy::RawUncompress(input, length, output); #else // Silence compiler warnings about unused arguments. (void)input; (void)length; (void)output; return false; #endif // HAVE_SNAPPY } inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) { // Silence compiler warnings about unused arguments. (void)func; (void)arg; return false; } inline uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) { #if HAVE_CRC32C return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t*>(buf), size); #else // Silence compiler warnings about unused arguments. (void)crc; (void)buf; (void)size; return 0; #endif // HAVE_CRC32C } } // namespace port } // namespace leveldb #endif // STORAGE_LEVELDB_PORT_PORT_STDCXX_H_
0
bitcoin/src/leveldb
bitcoin/src/leveldb/issues/issue200_test.cc
// Copyright (c) 2013 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // Test for issue 200: when iterator switches direction from backward // to forward, the current key can be yielded unexpectedly if a new // mutation has been added just before the current key. #include "leveldb/db.h" #include "util/testharness.h" namespace leveldb { class Issue200 {}; TEST(Issue200, Test) { // Get rid of any state from an old run. std::string dbpath = test::TmpDir() + "/leveldb_issue200_test"; DestroyDB(dbpath, Options()); DB* db; Options options; options.create_if_missing = true; ASSERT_OK(DB::Open(options, dbpath, &db)); WriteOptions write_options; ASSERT_OK(db->Put(write_options, "1", "b")); ASSERT_OK(db->Put(write_options, "2", "c")); ASSERT_OK(db->Put(write_options, "3", "d")); ASSERT_OK(db->Put(write_options, "4", "e")); ASSERT_OK(db->Put(write_options, "5", "f")); ReadOptions read_options; Iterator* iter = db->NewIterator(read_options); // Add an element that should not be reflected in the iterator. ASSERT_OK(db->Put(write_options, "25", "cd")); iter->Seek("5"); ASSERT_EQ(iter->key().ToString(), "5"); iter->Prev(); ASSERT_EQ(iter->key().ToString(), "4"); iter->Prev(); ASSERT_EQ(iter->key().ToString(), "3"); iter->Next(); ASSERT_EQ(iter->key().ToString(), "4"); iter->Next(); ASSERT_EQ(iter->key().ToString(), "5"); delete iter; delete db; DestroyDB(dbpath, options); } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/issues/issue320_test.cc
// Copyright (c) 2019 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include <cstdint> #include <cstdlib> #include <iostream> #include <memory> #include <string> #include <vector> #include "leveldb/db.h" #include "leveldb/write_batch.h" #include "util/testharness.h" namespace leveldb { namespace { // Creates a random number in the range of [0, max). int GenerateRandomNumber(int max) { return std::rand() % max; } std::string CreateRandomString(int32_t index) { static const size_t len = 1024; char bytes[len]; size_t i = 0; while (i < 8) { bytes[i] = 'a' + ((index >> (4 * i)) & 0xf); ++i; } while (i < sizeof(bytes)) { bytes[i] = 'a' + GenerateRandomNumber(26); ++i; } return std::string(bytes, sizeof(bytes)); } } // namespace class Issue320 {}; TEST(Issue320, Test) { std::srand(0); bool delete_before_put = false; bool keep_snapshots = true; std::vector<std::unique_ptr<std::pair<std::string, std::string>>> test_map( 10000); std::vector<Snapshot const*> snapshots(100, nullptr); DB* db; Options options; options.create_if_missing = true; std::string dbpath = test::TmpDir() + "/leveldb_issue320_test"; ASSERT_OK(DB::Open(options, dbpath, &db)); uint32_t target_size = 10000; uint32_t num_items = 0; uint32_t count = 0; std::string key; std::string value, old_value; WriteOptions writeOptions; ReadOptions readOptions; while (count < 200000) { if ((++count % 1000) == 0) { std::cout << "count: " << count << std::endl; } int index = GenerateRandomNumber(test_map.size()); WriteBatch batch; if (test_map[index] == nullptr) { num_items++; test_map[index].reset(new std::pair<std::string, std::string>( CreateRandomString(index), CreateRandomString(index))); batch.Put(test_map[index]->first, test_map[index]->second); } else { ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value)); if (old_value != test_map[index]->second) { std::cout << "ERROR incorrect value returned by Get" << std::endl; std::cout << " count=" << count << std::endl; std::cout << " old value=" << old_value << std::endl; std::cout << " test_map[index]->second=" << test_map[index]->second << std::endl; std::cout << " test_map[index]->first=" << test_map[index]->first << std::endl; std::cout << " index=" << index << std::endl; ASSERT_EQ(old_value, test_map[index]->second); } if (num_items >= target_size && GenerateRandomNumber(100) > 30) { batch.Delete(test_map[index]->first); test_map[index] = nullptr; --num_items; } else { test_map[index]->second = CreateRandomString(index); if (delete_before_put) batch.Delete(test_map[index]->first); batch.Put(test_map[index]->first, test_map[index]->second); } } ASSERT_OK(db->Write(writeOptions, &batch)); if (keep_snapshots && GenerateRandomNumber(10) == 0) { int i = GenerateRandomNumber(snapshots.size()); if (snapshots[i] != nullptr) { db->ReleaseSnapshot(snapshots[i]); } snapshots[i] = db->GetSnapshot(); } } for (Snapshot const* snapshot : snapshots) { if (snapshot) { db->ReleaseSnapshot(snapshot); } } delete db; DestroyDB(dbpath, options); } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb
bitcoin/src/leveldb/issues/issue178_test.cc
// Copyright (c) 2013 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // Test for issue 178: a manual compaction causes deleted data to reappear. #include <cstdlib> #include <iostream> #include <sstream> #include "leveldb/db.h" #include "leveldb/write_batch.h" #include "util/testharness.h" namespace { const int kNumKeys = 1100000; std::string Key1(int i) { char buf[100]; snprintf(buf, sizeof(buf), "my_key_%d", i); return buf; } std::string Key2(int i) { return Key1(i) + "_xxx"; } class Issue178 {}; TEST(Issue178, Test) { // Get rid of any state from an old run. std::string dbpath = leveldb::test::TmpDir() + "/leveldb_cbug_test"; DestroyDB(dbpath, leveldb::Options()); // Open database. Disable compression since it affects the creation // of layers and the code below is trying to test against a very // specific scenario. leveldb::DB* db; leveldb::Options db_options; db_options.create_if_missing = true; db_options.compression = leveldb::kNoCompression; ASSERT_OK(leveldb::DB::Open(db_options, dbpath, &db)); // create first key range leveldb::WriteBatch batch; for (size_t i = 0; i < kNumKeys; i++) { batch.Put(Key1(i), "value for range 1 key"); } ASSERT_OK(db->Write(leveldb::WriteOptions(), &batch)); // create second key range batch.Clear(); for (size_t i = 0; i < kNumKeys; i++) { batch.Put(Key2(i), "value for range 2 key"); } ASSERT_OK(db->Write(leveldb::WriteOptions(), &batch)); // delete second key range batch.Clear(); for (size_t i = 0; i < kNumKeys; i++) { batch.Delete(Key2(i)); } ASSERT_OK(db->Write(leveldb::WriteOptions(), &batch)); // compact database std::string start_key = Key1(0); std::string end_key = Key1(kNumKeys - 1); leveldb::Slice least(start_key.data(), start_key.size()); leveldb::Slice greatest(end_key.data(), end_key.size()); // commenting out the line below causes the example to work correctly db->CompactRange(&least, &greatest); // count the keys leveldb::Iterator* iter = db->NewIterator(leveldb::ReadOptions()); size_t num_keys = 0; for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { num_keys++; } delete iter; ASSERT_EQ(kNumKeys, num_keys) << "Bad number of keys"; // close database delete db; DestroyDB(dbpath, leveldb::Options()); } } // anonymous namespace int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb/helpers
bitcoin/src/leveldb/helpers/memenv/memenv.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "helpers/memenv/memenv.h" #include <string.h> #include <limits> #include <map> #include <string> #include <vector> #include "leveldb/env.h" #include "leveldb/status.h" #include "port/port.h" #include "port/thread_annotations.h" #include "util/mutexlock.h" namespace leveldb { namespace { class FileState { public: // FileStates are reference counted. The initial reference count is zero // and the caller must call Ref() at least once. FileState() : refs_(0), size_(0) {} // No copying allowed. FileState(const FileState&) = delete; FileState& operator=(const FileState&) = delete; // Increase the reference count. void Ref() { MutexLock lock(&refs_mutex_); ++refs_; } // Decrease the reference count. Delete if this is the last reference. void Unref() { bool do_delete = false; { MutexLock lock(&refs_mutex_); --refs_; assert(refs_ >= 0); if (refs_ <= 0) { do_delete = true; } } if (do_delete) { delete this; } } uint64_t Size() const { MutexLock lock(&blocks_mutex_); return size_; } void Truncate() { MutexLock lock(&blocks_mutex_); for (char*& block : blocks_) { delete[] block; } blocks_.clear(); size_ = 0; } Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const { MutexLock lock(&blocks_mutex_); if (offset > size_) { return Status::IOError("Offset greater than file size."); } const uint64_t available = size_ - offset; if (n > available) { n = static_cast<size_t>(available); } if (n == 0) { *result = Slice(); return Status::OK(); } assert(offset / kBlockSize <= std::numeric_limits<size_t>::max()); size_t block = static_cast<size_t>(offset / kBlockSize); size_t block_offset = offset % kBlockSize; size_t bytes_to_copy = n; char* dst = scratch; while (bytes_to_copy > 0) { size_t avail = kBlockSize - block_offset; if (avail > bytes_to_copy) { avail = bytes_to_copy; } memcpy(dst, blocks_[block] + block_offset, avail); bytes_to_copy -= avail; dst += avail; block++; block_offset = 0; } *result = Slice(scratch, n); return Status::OK(); } Status Append(const Slice& data) { const char* src = data.data(); size_t src_len = data.size(); MutexLock lock(&blocks_mutex_); while (src_len > 0) { size_t avail; size_t offset = size_ % kBlockSize; if (offset != 0) { // There is some room in the last block. avail = kBlockSize - offset; } else { // No room in the last block; push new one. blocks_.push_back(new char[kBlockSize]); avail = kBlockSize; } if (avail > src_len) { avail = src_len; } memcpy(blocks_.back() + offset, src, avail); src_len -= avail; src += avail; size_ += avail; } return Status::OK(); } private: enum { kBlockSize = 8 * 1024 }; // Private since only Unref() should be used to delete it. ~FileState() { Truncate(); } port::Mutex refs_mutex_; int refs_ GUARDED_BY(refs_mutex_); mutable port::Mutex blocks_mutex_; std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_); uint64_t size_ GUARDED_BY(blocks_mutex_); }; class SequentialFileImpl : public SequentialFile { public: explicit SequentialFileImpl(FileState* file) : file_(file), pos_(0) { file_->Ref(); } ~SequentialFileImpl() override { file_->Unref(); } Status Read(size_t n, Slice* result, char* scratch) override { Status s = file_->Read(pos_, n, result, scratch); if (s.ok()) { pos_ += result->size(); } return s; } Status Skip(uint64_t n) override { if (pos_ > file_->Size()) { return Status::IOError("pos_ > file_->Size()"); } const uint64_t available = file_->Size() - pos_; if (n > available) { n = available; } pos_ += n; return Status::OK(); } virtual std::string GetName() const override { return "[memenv]"; } private: FileState* file_; uint64_t pos_; }; class RandomAccessFileImpl : public RandomAccessFile { public: explicit RandomAccessFileImpl(FileState* file) : file_(file) { file_->Ref(); } ~RandomAccessFileImpl() override { file_->Unref(); } Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const override { return file_->Read(offset, n, result, scratch); } virtual std::string GetName() const override { return "[memenv]"; } private: FileState* file_; }; class WritableFileImpl : public WritableFile { public: WritableFileImpl(FileState* file) : file_(file) { file_->Ref(); } ~WritableFileImpl() override { file_->Unref(); } Status Append(const Slice& data) override { return file_->Append(data); } Status Close() override { return Status::OK(); } Status Flush() override { return Status::OK(); } Status Sync() override { return Status::OK(); } virtual std::string GetName() const override { return "[memenv]"; } private: FileState* file_; }; class NoOpLogger : public Logger { public: void Logv(const char* format, va_list ap) override {} }; class InMemoryEnv : public EnvWrapper { public: explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) {} ~InMemoryEnv() override { for (const auto& kvp : file_map_) { kvp.second->Unref(); } } // Partial implementation of the Env interface. Status NewSequentialFile(const std::string& fname, SequentialFile** result) override { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { *result = nullptr; return Status::IOError(fname, "File not found"); } *result = new SequentialFileImpl(file_map_[fname]); return Status::OK(); } Status NewRandomAccessFile(const std::string& fname, RandomAccessFile** result) override { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { *result = nullptr; return Status::IOError(fname, "File not found"); } *result = new RandomAccessFileImpl(file_map_[fname]); return Status::OK(); } Status NewWritableFile(const std::string& fname, WritableFile** result) override { MutexLock lock(&mutex_); FileSystem::iterator it = file_map_.find(fname); FileState* file; if (it == file_map_.end()) { // File is not currently open. file = new FileState(); file->Ref(); file_map_[fname] = file; } else { file = it->second; file->Truncate(); } *result = new WritableFileImpl(file); return Status::OK(); } Status NewAppendableFile(const std::string& fname, WritableFile** result) override { MutexLock lock(&mutex_); FileState** sptr = &file_map_[fname]; FileState* file = *sptr; if (file == nullptr) { file = new FileState(); file->Ref(); } *result = new WritableFileImpl(file); return Status::OK(); } bool FileExists(const std::string& fname) override { MutexLock lock(&mutex_); return file_map_.find(fname) != file_map_.end(); } Status GetChildren(const std::string& dir, std::vector<std::string>* result) override { MutexLock lock(&mutex_); result->clear(); for (const auto& kvp : file_map_) { const std::string& filename = kvp.first; if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' && Slice(filename).starts_with(Slice(dir))) { result->push_back(filename.substr(dir.size() + 1)); } } return Status::OK(); } void DeleteFileInternal(const std::string& fname) EXCLUSIVE_LOCKS_REQUIRED(mutex_) { if (file_map_.find(fname) == file_map_.end()) { return; } file_map_[fname]->Unref(); file_map_.erase(fname); } Status DeleteFile(const std::string& fname) override { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { return Status::IOError(fname, "File not found"); } DeleteFileInternal(fname); return Status::OK(); } Status CreateDir(const std::string& dirname) override { return Status::OK(); } Status DeleteDir(const std::string& dirname) override { return Status::OK(); } Status GetFileSize(const std::string& fname, uint64_t* file_size) override { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { return Status::IOError(fname, "File not found"); } *file_size = file_map_[fname]->Size(); return Status::OK(); } Status RenameFile(const std::string& src, const std::string& target) override { MutexLock lock(&mutex_); if (file_map_.find(src) == file_map_.end()) { return Status::IOError(src, "File not found"); } DeleteFileInternal(target); file_map_[target] = file_map_[src]; file_map_.erase(src); return Status::OK(); } Status LockFile(const std::string& fname, FileLock** lock) override { *lock = new FileLock; return Status::OK(); } Status UnlockFile(FileLock* lock) override { delete lock; return Status::OK(); } Status GetTestDirectory(std::string* path) override { *path = "/test"; return Status::OK(); } Status NewLogger(const std::string& fname, Logger** result) override { *result = new NoOpLogger; return Status::OK(); } private: // Map from filenames to FileState objects, representing a simple file system. typedef std::map<std::string, FileState*> FileSystem; port::Mutex mutex_; FileSystem file_map_ GUARDED_BY(mutex_); }; } // namespace Env* NewMemEnv(Env* base_env) { return new InMemoryEnv(base_env); } } // namespace leveldb
0
bitcoin/src/leveldb/helpers
bitcoin/src/leveldb/helpers/memenv/memenv_test.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "helpers/memenv/memenv.h" #include <string> #include <vector> #include "db/db_impl.h" #include "leveldb/db.h" #include "leveldb/env.h" #include "util/testharness.h" namespace leveldb { class MemEnvTest { public: MemEnvTest() : env_(NewMemEnv(Env::Default())) {} ~MemEnvTest() { delete env_; } Env* env_; }; TEST(MemEnvTest, Basics) { uint64_t file_size; WritableFile* writable_file; std::vector<std::string> children; ASSERT_OK(env_->CreateDir("/dir")); // Check that the directory is empty. ASSERT_TRUE(!env_->FileExists("/dir/non_existent")); ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok()); ASSERT_OK(env_->GetChildren("/dir", &children)); ASSERT_EQ(0, children.size()); // Create a file. ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_EQ(0, file_size); delete writable_file; // Check that the file exists. ASSERT_TRUE(env_->FileExists("/dir/f")); ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_EQ(0, file_size); ASSERT_OK(env_->GetChildren("/dir", &children)); ASSERT_EQ(1, children.size()); ASSERT_EQ("f", children[0]); // Write to the file. ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); ASSERT_OK(writable_file->Append("abc")); delete writable_file; // Check that append works. ASSERT_OK(env_->NewAppendableFile("/dir/f", &writable_file)); ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_EQ(3, file_size); ASSERT_OK(writable_file->Append("hello")); delete writable_file; // Check for expected size. ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_EQ(8, file_size); // Check that renaming works. ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok()); ASSERT_OK(env_->RenameFile("/dir/f", "/dir/g")); ASSERT_TRUE(!env_->FileExists("/dir/f")); ASSERT_TRUE(env_->FileExists("/dir/g")); ASSERT_OK(env_->GetFileSize("/dir/g", &file_size)); ASSERT_EQ(8, file_size); // Check that opening non-existent file fails. SequentialFile* seq_file; RandomAccessFile* rand_file; ASSERT_TRUE(!env_->NewSequentialFile("/dir/non_existent", &seq_file).ok()); ASSERT_TRUE(!seq_file); ASSERT_TRUE(!env_->NewRandomAccessFile("/dir/non_existent", &rand_file).ok()); ASSERT_TRUE(!rand_file); // Check that deleting works. ASSERT_TRUE(!env_->DeleteFile("/dir/non_existent").ok()); ASSERT_OK(env_->DeleteFile("/dir/g")); ASSERT_TRUE(!env_->FileExists("/dir/g")); ASSERT_OK(env_->GetChildren("/dir", &children)); ASSERT_EQ(0, children.size()); ASSERT_OK(env_->DeleteDir("/dir")); } TEST(MemEnvTest, ReadWrite) { WritableFile* writable_file; SequentialFile* seq_file; RandomAccessFile* rand_file; Slice result; char scratch[100]; ASSERT_OK(env_->CreateDir("/dir")); ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); ASSERT_OK(writable_file->Append("hello ")); ASSERT_OK(writable_file->Append("world")); delete writable_file; // Read sequentially. ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file)); ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello". ASSERT_EQ(0, result.compare("hello")); ASSERT_OK(seq_file->Skip(1)); ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world". ASSERT_EQ(0, result.compare("world")); ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF. ASSERT_EQ(0, result.size()); ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file. ASSERT_OK(seq_file->Read(1000, &result, scratch)); ASSERT_EQ(0, result.size()); delete seq_file; // Random reads. ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file)); ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world". ASSERT_EQ(0, result.compare("world")); ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello". ASSERT_EQ(0, result.compare("hello")); ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d". ASSERT_EQ(0, result.compare("d")); // Too high offset. ASSERT_TRUE(!rand_file->Read(1000, 5, &result, scratch).ok()); delete rand_file; } TEST(MemEnvTest, Locks) { FileLock* lock; // These are no-ops, but we test they return success. ASSERT_OK(env_->LockFile("some file", &lock)); ASSERT_OK(env_->UnlockFile(lock)); } TEST(MemEnvTest, Misc) { std::string test_dir; ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_TRUE(!test_dir.empty()); WritableFile* writable_file; ASSERT_OK(env_->NewWritableFile("/a/b", &writable_file)); // These are no-ops, but we test they return success. ASSERT_OK(writable_file->Sync()); ASSERT_OK(writable_file->Flush()); ASSERT_OK(writable_file->Close()); delete writable_file; } TEST(MemEnvTest, LargeWrite) { const size_t kWriteSize = 300 * 1024; char* scratch = new char[kWriteSize * 2]; std::string write_data; for (size_t i = 0; i < kWriteSize; ++i) { write_data.append(1, static_cast<char>(i)); } WritableFile* writable_file; ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); ASSERT_OK(writable_file->Append("foo")); ASSERT_OK(writable_file->Append(write_data)); delete writable_file; SequentialFile* seq_file; Slice result; ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file)); ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo". ASSERT_EQ(0, result.compare("foo")); size_t read = 0; std::string read_data; while (read < kWriteSize) { ASSERT_OK(seq_file->Read(kWriteSize - read, &result, scratch)); read_data.append(result.data(), result.size()); read += result.size(); } ASSERT_TRUE(write_data == read_data); delete seq_file; delete[] scratch; } TEST(MemEnvTest, OverwriteOpenFile) { const char kWrite1Data[] = "Write #1 data"; const size_t kFileDataLen = sizeof(kWrite1Data) - 1; const std::string kTestFileName = test::TmpDir() + "/leveldb-TestFile.dat"; ASSERT_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName)); RandomAccessFile* rand_file; ASSERT_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file)); const char kWrite2Data[] = "Write #2 data"; ASSERT_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName)); // Verify that overwriting an open file will result in the new file data // being read from files opened before the write. Slice result; char scratch[kFileDataLen]; ASSERT_OK(rand_file->Read(0, kFileDataLen, &result, scratch)); ASSERT_EQ(0, result.compare(kWrite2Data)); delete rand_file; } TEST(MemEnvTest, DBTest) { Options options; options.create_if_missing = true; options.env = env_; DB* db; const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")}; const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")}; ASSERT_OK(DB::Open(options, "/dir/db", &db)); for (size_t i = 0; i < 3; ++i) { ASSERT_OK(db->Put(WriteOptions(), keys[i], vals[i])); } for (size_t i = 0; i < 3; ++i) { std::string res; ASSERT_OK(db->Get(ReadOptions(), keys[i], &res)); ASSERT_TRUE(res == vals[i]); } Iterator* iterator = db->NewIterator(ReadOptions()); iterator->SeekToFirst(); for (size_t i = 0; i < 3; ++i) { ASSERT_TRUE(iterator->Valid()); ASSERT_TRUE(keys[i] == iterator->key()); ASSERT_TRUE(vals[i] == iterator->value()); iterator->Next(); } ASSERT_TRUE(!iterator->Valid()); delete iterator; DBImpl* dbi = reinterpret_cast<DBImpl*>(db); ASSERT_OK(dbi->TEST_CompactMemTable()); for (size_t i = 0; i < 3; ++i) { std::string res; ASSERT_OK(db->Get(ReadOptions(), keys[i], &res)); ASSERT_TRUE(res == vals[i]); } delete db; } } // namespace leveldb int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
0
bitcoin/src/leveldb/helpers
bitcoin/src/leveldb/helpers/memenv/memenv.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. #ifndef STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_ #define STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_ #include "leveldb/export.h" namespace leveldb { class Env; // Returns a new environment that stores its data in memory and delegates // all non-file-storage tasks to base_env. The caller must delete the result // when it is no longer needed. // *base_env must remain live while the result is in use. LEVELDB_EXPORT Env* NewMemEnv(Env* base_env); } // namespace leveldb #endif // STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_
0
bitcoin/src
bitcoin/src/bench/descriptors.cpp
// Copyright (c) 2019-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <key.h> #include <pubkey.h> #include <script/descriptor.h> #include <string> #include <utility> static void ExpandDescriptor(benchmark::Bench& bench) { ECC_Start(); const auto desc_str = "sh(wsh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232)))"; const std::pair<int64_t, int64_t> range = {0, 1000}; FlatSigningProvider provider; std::string error; auto desc = Parse(desc_str, provider, error); bench.run([&] { for (int i = range.first; i <= range.second; ++i) { std::vector<CScript> scripts; bool success = desc->Expand(i, provider, scripts, provider); assert(success); } }); ECC_Stop(); } BENCHMARK(ExpandDescriptor, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/nanobench.h
// __ _ _______ __ _ _____ ______ _______ __ _ _______ _ _ // | \ | |_____| | \ | | | |_____] |______ | \ | | |_____| // | \_| | | | \_| |_____| |_____] |______ | \_| |_____ | | // // Microbenchmark framework for C++11/14/17/20 // https://github.com/martinus/nanobench // // Licensed under the MIT License <http://opensource.org/licenses/MIT>. // SPDX-License-Identifier: MIT // Copyright (c) 2019-2023 Martin Leitner-Ankerl <[email protected]> // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #ifndef ANKERL_NANOBENCH_H_INCLUDED #define ANKERL_NANOBENCH_H_INCLUDED // see https://semver.org/ #define ANKERL_NANOBENCH_VERSION_MAJOR 4 // incompatible API changes #define ANKERL_NANOBENCH_VERSION_MINOR 3 // backwards-compatible changes #define ANKERL_NANOBENCH_VERSION_PATCH 11 // backwards-compatible bug fixes /////////////////////////////////////////////////////////////////////////////////////////////////// // public facing api - as minimal as possible /////////////////////////////////////////////////////////////////////////////////////////////////// #include <chrono> // high_resolution_clock #include <cstring> // memcpy #include <iosfwd> // for std::ostream* custom output target in Config #include <string> // all names #include <unordered_map> // holds context information of results #include <vector> // holds all results #define ANKERL_NANOBENCH(x) ANKERL_NANOBENCH_PRIVATE_##x() #define ANKERL_NANOBENCH_PRIVATE_CXX() __cplusplus #define ANKERL_NANOBENCH_PRIVATE_CXX98() 199711L #define ANKERL_NANOBENCH_PRIVATE_CXX11() 201103L #define ANKERL_NANOBENCH_PRIVATE_CXX14() 201402L #define ANKERL_NANOBENCH_PRIVATE_CXX17() 201703L #if ANKERL_NANOBENCH(CXX) >= ANKERL_NANOBENCH(CXX17) # define ANKERL_NANOBENCH_PRIVATE_NODISCARD() [[nodiscard]] #else # define ANKERL_NANOBENCH_PRIVATE_NODISCARD() #endif #if defined(__clang__) # define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_PUSH() \ _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wpadded\"") # define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_POP() _Pragma("clang diagnostic pop") #else # define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_PUSH() # define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_POP() #endif #if defined(__GNUC__) # define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_PUSH() _Pragma("GCC diagnostic push") _Pragma("GCC diagnostic ignored \"-Weffc++\"") # define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_POP() _Pragma("GCC diagnostic pop") #else # define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_PUSH() # define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_POP() #endif #if defined(ANKERL_NANOBENCH_LOG_ENABLED) # include <iostream> # define ANKERL_NANOBENCH_LOG(x) \ do { \ std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << x << std::endl; \ } while (0) #else # define ANKERL_NANOBENCH_LOG(x) \ do { \ } while (0) #endif #define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 0 #if defined(__linux__) && !defined(ANKERL_NANOBENCH_DISABLE_PERF_COUNTERS) # include <linux/version.h> # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0) // PERF_COUNT_HW_REF_CPU_CYCLES only available since kernel 3.3 // PERF_FLAG_FD_CLOEXEC since kernel 3.14 # undef ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS # define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 1 # endif #endif #if defined(__clang__) # define ANKERL_NANOBENCH_NO_SANITIZE(...) __attribute__((no_sanitize(__VA_ARGS__))) #else # define ANKERL_NANOBENCH_NO_SANITIZE(...) #endif #if defined(_MSC_VER) # define ANKERL_NANOBENCH_PRIVATE_NOINLINE() __declspec(noinline) #else # define ANKERL_NANOBENCH_PRIVATE_NOINLINE() __attribute__((noinline)) #endif // workaround missing "is_trivially_copyable" in g++ < 5.0 // See https://stackoverflow.com/a/31798726/48181 #if defined(__GNUC__) && __GNUC__ < 5 # define ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__) #else # define ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value #endif // noexcept may be missing for std::string. // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58265 #define ANKERL_NANOBENCH_PRIVATE_NOEXCEPT_STRING_MOVE() std::is_nothrow_move_assignable<std::string>::value // declarations /////////////////////////////////////////////////////////////////////////////////// namespace ankerl { namespace nanobench { using Clock = std::conditional<std::chrono::high_resolution_clock::is_steady, std::chrono::high_resolution_clock, std::chrono::steady_clock>::type; class Bench; struct Config; class Result; class Rng; class BigO; /** * @brief Renders output from a mustache-like template and benchmark results. * * The templating facility here is heavily inspired by [mustache - logic-less templates](https://mustache.github.io/). * It adds a few more features that are necessary to get all of the captured data out of nanobench. Please read the * excellent [mustache manual](https://mustache.github.io/mustache.5.html) to see what this is all about. * * nanobench output has two nested layers, *result* and *measurement*. Here is a hierarchy of the allowed tags: * * * `{{#result}}` Marks the begin of the result layer. Whatever comes after this will be instantiated as often as * a benchmark result is available. Within it, you can use these tags: * * * `{{title}}` See Bench::title. * * * `{{name}}` Benchmark name, usually directly provided with Bench::run, but can also be set with Bench::name. * * * `{{unit}}` Unit, e.g. `byte`. Defaults to `op`, see Bench::unit. * * * `{{batch}}` Batch size, see Bench::batch. * * * `{{complexityN}}` Value used for asymptotic complexity calculation. See Bench::complexityN. * * * `{{epochs}}` Number of epochs, see Bench::epochs. * * * `{{clockResolution}}` Accuracy of the clock, i.e. what's the smallest time possible to measure with the clock. * For modern systems, this can be around 20 ns. This value is automatically determined by nanobench at the first * benchmark that is run, and used as a static variable throughout the application's runtime. * * * `{{clockResolutionMultiple}}` Configuration multiplier for `clockResolution`. See Bench::clockResolutionMultiple. * This is the target runtime for each measurement (epoch). That means the more accurate your clock is, the faster * will be the benchmark. Basing the measurement's runtime on the clock resolution is the main reason why nanobench is so fast. * * * `{{maxEpochTime}}` Configuration for a maximum time each measurement (epoch) is allowed to take. Note that at least * a single iteration will be performed, even when that takes longer than maxEpochTime. See Bench::maxEpochTime. * * * `{{minEpochTime}}` Minimum epoch time, defaults to 1ms. See Bench::minEpochTime. * * * `{{minEpochIterations}}` See Bench::minEpochIterations. * * * `{{epochIterations}}` See Bench::epochIterations. * * * `{{warmup}}` Number of iterations used before measuring starts. See Bench::warmup. * * * `{{relative}}` True or false, depending on the setting you have used. See Bench::relative. * * * `{{context(variableName)}}` See Bench::context. * * Apart from these tags, it is also possible to use some mathematical operations on the measurement data. The operations * are of the form `{{command(name)}}`. Currently `name` can be one of `elapsed`, `iterations`. If performance counters * are available (currently only on current Linux systems), you also have `pagefaults`, `cpucycles`, * `contextswitches`, `instructions`, `branchinstructions`, and `branchmisses`. All the measures (except `iterations`) are * provided for a single iteration (so `elapsed` is the time a single iteration took). The following tags are available: * * * `{{median(<name>)}}` Calculate median of a measurement data set, e.g. `{{median(elapsed)}}`. * * * `{{average(<name>)}}` Average (mean) calculation. * * * `{{medianAbsolutePercentError(<name>)}}` Calculates MdAPE, the Median Absolute Percentage Error. The MdAPE is an excellent * metric for the variation of measurements. It is more robust to outliers than the * [Mean absolute percentage error (M-APE)](https://en.wikipedia.org/wiki/Mean_absolute_percentage_error). * @f[ * \mathrm{MdAPE}(e) = \mathrm{med}\{| \frac{e_i - \mathrm{med}\{e\}}{e_i}| \} * @f] * E.g. for *elapsed*: First, @f$ \mathrm{med}\{e\} @f$ calculates the median by sorting and then taking the middle element * of all *elapsed* measurements. This is used to calculate the absolute percentage * error to this median for each measurement, as in @f$ | \frac{e_i - \mathrm{med}\{e\}}{e_i}| @f$. All these results * are sorted, and the middle value is chosen as the median absolute percent error. * * This measurement is a bit hard to interpret, but it is very robust against outliers. E.g. a value of 5% means that half of the * measurements deviate less than 5% from the median, and the other deviate more than 5% from the median. * * * `{{sum(<name>)}}` Sum of all the measurements. E.g. `{{sum(iterations)}}` will give you the total number of iterations * measured in this benchmark. * * * `{{minimum(<name>)}}` Minimum of all measurements. * * * `{{maximum(<name>)}}` Maximum of all measurements. * * * `{{sumProduct(<first>, <second>)}}` Calculates the sum of the products of corresponding measures: * @f[ * \mathrm{sumProduct}(a,b) = \sum_{i=1}^{n}a_i\cdot b_i * @f] * E.g. to calculate total runtime of the benchmark, you multiply iterations with elapsed time for each measurement, and * sum these results up: * `{{sumProduct(iterations, elapsed)}}`. * * * `{{#measurement}}` To access individual measurement results, open the begin tag for measurements. * * * `{{elapsed}}` Average elapsed wall clock time per iteration, in seconds. * * * `{{iterations}}` Number of iterations in the measurement. The number of iterations will fluctuate due * to some applied randomness, to enhance accuracy. * * * `{{pagefaults}}` Average number of pagefaults per iteration. * * * `{{cpucycles}}` Average number of CPU cycles processed per iteration. * * * `{{contextswitches}}` Average number of context switches per iteration. * * * `{{instructions}}` Average number of retired instructions per iteration. * * * `{{branchinstructions}}` Average number of branches executed per iteration. * * * `{{branchmisses}}` Average number of branches that were missed per iteration. * * * `{{/measurement}}` Ends the measurement tag. * * * `{{/result}}` Marks the end of the result layer. This is the end marker for the template part that will be instantiated * for each benchmark result. * * * For the layer tags *result* and *measurement* you additionally can use these special markers: * * * ``{{#-first}}`` - Begin marker of a template that will be instantiated *only for the first* entry in the layer. Use is only * allowed between the begin and end marker of the layer. So between ``{{#result}}`` and ``{{/result}}``, or between * ``{{#measurement}}`` and ``{{/measurement}}``. Finish the template with ``{{/-first}}``. * * * ``{{^-first}}`` - Begin marker of a template that will be instantiated *for each except the first* entry in the layer. This, * this is basically the inversion of ``{{#-first}}``. Use is only allowed between the begin and end marker of the layer. * So between ``{{#result}}`` and ``{{/result}}``, or between ``{{#measurement}}`` and ``{{/measurement}}``. * * * ``{{/-first}}`` - End marker for either ``{{#-first}}`` or ``{{^-first}}``. * * * ``{{#-last}}`` - Begin marker of a template that will be instantiated *only for the last* entry in the layer. Use is only * allowed between the begin and end marker of the layer. So between ``{{#result}}`` and ``{{/result}}``, or between * ``{{#measurement}}`` and ``{{/measurement}}``. Finish the template with ``{{/-last}}``. * * * ``{{^-last}}`` - Begin marker of a template that will be instantiated *for each except the last* entry in the layer. This, * this is basically the inversion of ``{{#-last}}``. Use is only allowed between the begin and end marker of the layer. * So between ``{{#result}}`` and ``{{/result}}``, or between ``{{#measurement}}`` and ``{{/measurement}}``. * * * ``{{/-last}}`` - End marker for either ``{{#-last}}`` or ``{{^-last}}``. * @verbatim embed:rst For an overview of all the possible data you can get out of nanobench, please see the tutorial at :ref:`tutorial-template-json`. The templates that ship with nanobench are: * :cpp:func:`templates::csv() <ankerl::nanobench::templates::csv()>` * :cpp:func:`templates::json() <ankerl::nanobench::templates::json()>` * :cpp:func:`templates::htmlBoxplot() <ankerl::nanobench::templates::htmlBoxplot()>` * :cpp:func:`templates::pyperf() <ankerl::nanobench::templates::pyperf()>` @endverbatim * * @param mustacheTemplate The template. * @param bench Benchmark, containing all the results. * @param out Output for the generated output. */ void render(char const* mustacheTemplate, Bench const& bench, std::ostream& out); void render(std::string const& mustacheTemplate, Bench const& bench, std::ostream& out); /** * Same as render(char const* mustacheTemplate, Bench const& bench, std::ostream& out), but for when * you only have results available. * * @param mustacheTemplate The template. * @param results All the results to be used for rendering. * @param out Output for the generated output. */ void render(char const* mustacheTemplate, std::vector<Result> const& results, std::ostream& out); void render(std::string const& mustacheTemplate, std::vector<Result> const& results, std::ostream& out); // Contains mustache-like templates namespace templates { /*! @brief CSV data for the benchmark results. Generates a comma-separated values dataset. First line is the header, each following line is a summary of each benchmark run. @verbatim embed:rst See the tutorial at :ref:`tutorial-template-csv` for an example. @endverbatim */ char const* csv() noexcept; /*! @brief HTML output that uses plotly to generate an interactive boxplot chart. See the tutorial for an example output. The output uses only the elapsed wall clock time, and displays each epoch as a single dot. @verbatim embed:rst See the tutorial at :ref:`tutorial-template-html` for an example. @endverbatim @see also ankerl::nanobench::render() */ char const* htmlBoxplot() noexcept; /*! @brief Output in pyperf compatible JSON format, which can be used for more analyzation. @verbatim embed:rst See the tutorial at :ref:`tutorial-template-pyperf` for an example how to further analyze the output. @endverbatim */ char const* pyperf() noexcept; /*! @brief Template to generate JSON data. The generated JSON data contains *all* data that has been generated. All times are as double values, in seconds. The output can get quite large. @verbatim embed:rst See the tutorial at :ref:`tutorial-template-json` for an example. @endverbatim */ char const* json() noexcept; } // namespace templates namespace detail { template <typename T> struct PerfCountSet; class IterationLogic; class PerformanceCounters; #if ANKERL_NANOBENCH(PERF_COUNTERS) class LinuxPerformanceCounters; #endif } // namespace detail } // namespace nanobench } // namespace ankerl // definitions //////////////////////////////////////////////////////////////////////////////////// namespace ankerl { namespace nanobench { namespace detail { template <typename T> struct PerfCountSet { T pageFaults{}; T cpuCycles{}; T contextSwitches{}; T instructions{}; T branchInstructions{}; T branchMisses{}; }; } // namespace detail ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) struct Config { // actual benchmark config std::string mBenchmarkTitle = "benchmark"; // NOLINT(misc-non-private-member-variables-in-classes) std::string mBenchmarkName = "noname"; // NOLINT(misc-non-private-member-variables-in-classes) std::string mUnit = "op"; // NOLINT(misc-non-private-member-variables-in-classes) double mBatch = 1.0; // NOLINT(misc-non-private-member-variables-in-classes) double mComplexityN = -1.0; // NOLINT(misc-non-private-member-variables-in-classes) size_t mNumEpochs = 11; // NOLINT(misc-non-private-member-variables-in-classes) size_t mClockResolutionMultiple = static_cast<size_t>(1000); // NOLINT(misc-non-private-member-variables-in-classes) std::chrono::nanoseconds mMaxEpochTime = std::chrono::milliseconds(100); // NOLINT(misc-non-private-member-variables-in-classes) std::chrono::nanoseconds mMinEpochTime = std::chrono::milliseconds(1); // NOLINT(misc-non-private-member-variables-in-classes) uint64_t mMinEpochIterations{1}; // NOLINT(misc-non-private-member-variables-in-classes) // If not 0, run *exactly* these number of iterations per epoch. uint64_t mEpochIterations{0}; // NOLINT(misc-non-private-member-variables-in-classes) uint64_t mWarmup = 0; // NOLINT(misc-non-private-member-variables-in-classes) std::ostream* mOut = nullptr; // NOLINT(misc-non-private-member-variables-in-classes) std::chrono::duration<double> mTimeUnit = std::chrono::nanoseconds{1}; // NOLINT(misc-non-private-member-variables-in-classes) std::string mTimeUnitName = "ns"; // NOLINT(misc-non-private-member-variables-in-classes) bool mShowPerformanceCounters = true; // NOLINT(misc-non-private-member-variables-in-classes) bool mIsRelative = false; // NOLINT(misc-non-private-member-variables-in-classes) std::unordered_map<std::string, std::string> mContext{}; // NOLINT(misc-non-private-member-variables-in-classes) Config(); ~Config(); Config& operator=(Config const& other); Config& operator=(Config&& other) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE)); Config(Config const& other); Config(Config&& other) noexcept; }; ANKERL_NANOBENCH(IGNORE_PADDED_POP) // Result returned after a benchmark has finished. Can be used as a baseline for relative(). ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) class Result { public: enum class Measure : size_t { elapsed, iterations, pagefaults, cpucycles, contextswitches, instructions, branchinstructions, branchmisses, _size }; explicit Result(Config benchmarkConfig); ~Result(); Result& operator=(Result const& other); Result& operator=(Result&& other) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE)); Result(Result const& other); Result(Result&& other) noexcept; // adds new measurement results // all values are scaled by iters (except iters...) void add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters const& pc); ANKERL_NANOBENCH(NODISCARD) Config const& config() const noexcept; ANKERL_NANOBENCH(NODISCARD) double median(Measure m) const; ANKERL_NANOBENCH(NODISCARD) double medianAbsolutePercentError(Measure m) const; ANKERL_NANOBENCH(NODISCARD) double average(Measure m) const; ANKERL_NANOBENCH(NODISCARD) double sum(Measure m) const noexcept; ANKERL_NANOBENCH(NODISCARD) double sumProduct(Measure m1, Measure m2) const noexcept; ANKERL_NANOBENCH(NODISCARD) double minimum(Measure m) const noexcept; ANKERL_NANOBENCH(NODISCARD) double maximum(Measure m) const noexcept; ANKERL_NANOBENCH(NODISCARD) std::string const& context(char const* variableName) const; ANKERL_NANOBENCH(NODISCARD) std::string const& context(std::string const& variableName) const; ANKERL_NANOBENCH(NODISCARD) bool has(Measure m) const noexcept; ANKERL_NANOBENCH(NODISCARD) double get(size_t idx, Measure m) const; ANKERL_NANOBENCH(NODISCARD) bool empty() const noexcept; ANKERL_NANOBENCH(NODISCARD) size_t size() const noexcept; // Finds string, if not found, returns _size. static Measure fromString(std::string const& str); private: Config mConfig{}; std::vector<std::vector<double>> mNameToMeasurements{}; }; ANKERL_NANOBENCH(IGNORE_PADDED_POP) /** * An extremely fast random generator. Currently, this implements *RomuDuoJr*, developed by Mark Overton. Source: * http://www.romu-random.org/ * * RomuDuoJr is extremely fast and provides reasonable good randomness. Not enough for large jobs, but definitely * good enough for a benchmarking framework. * * * Estimated capacity: @f$ 2^{51} @f$ bytes * * Register pressure: 4 * * State size: 128 bits * * This random generator is a drop-in replacement for the generators supplied by ``<random>``. It is not * cryptographically secure. It's intended purpose is to be very fast so that benchmarks that make use * of randomness are not distorted too much by the random generator. * * Rng also provides a few non-standard helpers, optimized for speed. */ class Rng final { public: /** * @brief This RNG provides 64bit randomness. */ using result_type = uint64_t; static constexpr uint64_t(min)(); static constexpr uint64_t(max)(); /** * As a safety precaution, we don't allow copying. Copying a PRNG would mean you would have two random generators that produce the * same sequence, which is generally not what one wants. Instead create a new rng with the default constructor Rng(), which is * automatically seeded from `std::random_device`. If you really need a copy, use `copy()`. */ Rng(Rng const&) = delete; /** * Same as Rng(Rng const&), we don't allow assignment. If you need a new Rng create one with the default constructor Rng(). */ Rng& operator=(Rng const&) = delete; // moving is ok Rng(Rng&&) noexcept = default; Rng& operator=(Rng&&) noexcept = default; ~Rng() noexcept = default; /** * @brief Creates a new Random generator with random seed. * * Instead of a default seed (as the random generators from the STD), this properly seeds the random generator from * `std::random_device`. It guarantees correct seeding. Note that seeding can be relatively slow, depending on the source of * randomness used. So it is best to create a Rng once and use it for all your randomness purposes. */ Rng(); /*! Creates a new Rng that is seeded with a specific seed. Each Rng created from the same seed will produce the same randomness sequence. This can be useful for deterministic behavior. @verbatim embed:rst .. note:: The random algorithm might change between nanobench releases. Whenever a faster and/or better random generator becomes available, I will switch the implementation. @endverbatim As per the Romu paper, this seeds the Rng with splitMix64 algorithm and performs 10 initial rounds for further mixing up of the internal state. @param seed The 64bit seed. All values are allowed, even 0. */ explicit Rng(uint64_t seed) noexcept; Rng(uint64_t x, uint64_t y) noexcept; explicit Rng(std::vector<uint64_t> const& data); /** * Creates a copy of the Rng, thus the copy provides exactly the same random sequence as the original. */ ANKERL_NANOBENCH(NODISCARD) Rng copy() const noexcept; /** * @brief Produces a 64bit random value. This should be very fast, thus it is marked as inline. In my benchmark, this is ~46 times * faster than `std::default_random_engine` for producing 64bit random values. It seems that the fastest std contender is * `std::mt19937_64`. Still, this RNG is 2-3 times as fast. * * @return uint64_t The next 64 bit random value. */ inline uint64_t operator()() noexcept; // This is slightly biased. See /** * Generates a random number between 0 and range (excluding range). * * The algorithm only produces 32bit numbers, and is slightly biased. The effect is quite small unless your range is close to the * maximum value of an integer. It is possible to correct the bias with rejection sampling (see * [here](https://lemire.me/blog/2016/06/30/fast-random-shuffling/), but this is most likely irrelevant in practices for the * purposes of this Rng. * * See Daniel Lemire's blog post [A fast alternative to the modulo * reduction](https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/) * * @param range Upper exclusive range. E.g a value of 3 will generate random numbers 0, 1, 2. * @return uint32_t Generated random values in range [0, range(. */ inline uint32_t bounded(uint32_t range) noexcept; // random double in range [0, 1( // see http://prng.di.unimi.it/ /** * Provides a random uniform double value between 0 and 1. This uses the method described in [Generating uniform doubles in the * unit interval](http://prng.di.unimi.it/), and is extremely fast. * * @return double Uniformly distributed double value in range [0,1(, excluding 1. */ inline double uniform01() noexcept; /** * Shuffles all entries in the given container. Although this has a slight bias due to the implementation of bounded(), this is * preferable to `std::shuffle` because it is over 5 times faster. See Daniel Lemire's blog post [Fast random * shuffling](https://lemire.me/blog/2016/06/30/fast-random-shuffling/). * * @param container The whole container will be shuffled. */ template <typename Container> void shuffle(Container& container) noexcept; /** * Extracts the full state of the generator, e.g. for serialization. For this RNG this is just 2 values, but to stay API compatible * with future implementations that potentially use more state, we use a vector. * * @return Vector containing the full state: */ ANKERL_NANOBENCH(NODISCARD) std::vector<uint64_t> state() const; private: static constexpr uint64_t rotl(uint64_t x, unsigned k) noexcept; uint64_t mX; uint64_t mY; }; /** * @brief Main entry point to nanobench's benchmarking facility. * * It holds configuration and results from one or more benchmark runs. Usually it is used in a single line, where the object is * constructed, configured, and then a benchmark is run. E.g. like this: * * ankerl::nanobench::Bench().unit("byte").batch(1000).run("random fluctuations", [&] { * // here be the benchmark code * }); * * In that example Bench() constructs the benchmark, it is then configured with unit() and batch(), and after configuration a * benchmark is executed with run(). Once run() has finished, it prints the result to `std::cout`. It would also store the results * in the Bench instance, but in this case the object is immediately destroyed so it's not available any more. */ ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) class Bench { public: /** * @brief Creates a new benchmark for configuration and running of benchmarks. */ Bench(); Bench(Bench&& other) noexcept; Bench& operator=(Bench&& other) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE)); Bench(Bench const& other); Bench& operator=(Bench const& other); ~Bench() noexcept; /*! @brief Repeatedly calls `op()` based on the configuration, and performs measurements. This call is marked with `noinline` to prevent the compiler to optimize beyond different benchmarks. This can have quite a big effect on benchmark accuracy. @verbatim embed:rst .. note:: Each call to your lambda must have a side effect that the compiler can't possibly optimize it away. E.g. add a result to an externally defined number (like `x` in the above example), and finally call `doNotOptimizeAway` on the variables the compiler must not remove. You can also use :cpp:func:`ankerl::nanobench::doNotOptimizeAway` directly in the lambda, but be aware that this has a small overhead. @endverbatim @tparam Op The code to benchmark. */ template <typename Op> ANKERL_NANOBENCH(NOINLINE) Bench& run(char const* benchmarkName, Op&& op); template <typename Op> ANKERL_NANOBENCH(NOINLINE) Bench& run(std::string const& benchmarkName, Op&& op); /** * @brief Same as run(char const* benchmarkName, Op op), but instead uses the previously set name. * @tparam Op The code to benchmark. */ template <typename Op> ANKERL_NANOBENCH(NOINLINE) Bench& run(Op&& op); /** * @brief Title of the benchmark, will be shown in the table header. Changing the title will start a new markdown table. * * @param benchmarkTitle The title of the benchmark. */ Bench& title(char const* benchmarkTitle); Bench& title(std::string const& benchmarkTitle); /** * @brief Gets the title of the benchmark */ ANKERL_NANOBENCH(NODISCARD) std::string const& title() const noexcept; /// Name of the benchmark, will be shown in the table row. Bench& name(char const* benchmarkName); Bench& name(std::string const& benchmarkName); ANKERL_NANOBENCH(NODISCARD) std::string const& name() const noexcept; /** * @brief Set context information. * * The information can be accessed using custom render templates via `{{context(variableName)}}`. * Trying to render a variable that hasn't been set before raises an exception. * Not included in (default) markdown table. * * @see clearContext, render * * @param variableName The name of the context variable. * @param variableValue The value of the context variable. */ Bench& context(char const* variableName, char const* variableValue); Bench& context(std::string const& variableName, std::string const& variableValue); /** * @brief Reset context information. * * This may improve efficiency when using many context entries, * or improve robustness by removing spurious context entries. * * @see context */ Bench& clearContext(); /** * @brief Sets the batch size. * * E.g. number of processed byte, or some other metric for the size of the processed data in each iteration. If you benchmark * hashing of a 1000 byte long string and want byte/sec as a result, you can specify 1000 as the batch size. * * @tparam T Any input type is internally cast to `double`. * @param b batch size */ template <typename T> Bench& batch(T b) noexcept; ANKERL_NANOBENCH(NODISCARD) double batch() const noexcept; /** * @brief Sets the operation unit. * * Defaults to "op". Could be e.g. "byte" for string processing. This is used for the table header, e.g. to show `ns/byte`. Use * singular (*byte*, not *bytes*). A change clears the currently collected results. * * @param unit The unit name. */ Bench& unit(char const* unit); Bench& unit(std::string const& unit); ANKERL_NANOBENCH(NODISCARD) std::string const& unit() const noexcept; /** * @brief Sets the time unit to be used for the default output. * * Nanobench defaults to using ns (nanoseconds) as output in the markdown. For some benchmarks this is too coarse, so it is * possible to configure this. E.g. use `timeUnit(1ms, "ms")` to show `ms/op` instead of `ns/op`. * * @param tu Time unit to display the results in, default is 1ns. * @param tuName Name for the time unit, default is "ns" */ Bench& timeUnit(std::chrono::duration<double> const& tu, std::string const& tuName); ANKERL_NANOBENCH(NODISCARD) std::string const& timeUnitName() const noexcept; ANKERL_NANOBENCH(NODISCARD) std::chrono::duration<double> const& timeUnit() const noexcept; /** * @brief Set the output stream where the resulting markdown table will be printed to. * * The default is `&std::cout`. You can disable all output by setting `nullptr`. * * @param outstream Pointer to output stream, can be `nullptr`. */ Bench& output(std::ostream* outstream) noexcept; ANKERL_NANOBENCH(NODISCARD) std::ostream* output() const noexcept; /** * Modern processors have a very accurate clock, being able to measure as low as 20 nanoseconds. This is the main trick nanobech to * be so fast: we find out how accurate the clock is, then run the benchmark only so often that the clock's accuracy is good enough * for accurate measurements. * * The default is to run one epoch for 1000 times the clock resolution. So for 20ns resolution and 11 epochs, this gives a total * runtime of * * @f[ * 20ns * 1000 * 11 \approx 0.2ms * @f] * * To be precise, nanobench adds a 0-20% random noise to each evaluation. This is to prevent any aliasing effects, and further * improves accuracy. * * Total runtime will be higher though: Some initial time is needed to find out the target number of iterations for each epoch, and * there is some overhead involved to start & stop timers and calculate resulting statistics and writing the output. * * @param multiple Target number of times of clock resolution. Usually 1000 is a good compromise between runtime and accuracy. */ Bench& clockResolutionMultiple(size_t multiple) noexcept; ANKERL_NANOBENCH(NODISCARD) size_t clockResolutionMultiple() const noexcept; /** * @brief Controls number of epochs, the number of measurements to perform. * * The reported result will be the median of evaluation of each epoch. The higher you choose this, the more * deterministic the result be and outliers will be more easily removed. Also the `err%` will be more accurate the higher this * number is. Note that the `err%` will not necessarily decrease when number of epochs is increased. But it will be a more accurate * representation of the benchmarked code's runtime stability. * * Choose the value wisely. In practice, 11 has been shown to be a reasonable choice between runtime performance and accuracy. * This setting goes hand in hand with minEpochIterations() (or minEpochTime()). If you are more interested in *median* runtime, * you might want to increase epochs(). If you are more interested in *mean* runtime, you might want to increase * minEpochIterations() instead. * * @param numEpochs Number of epochs. */ Bench& epochs(size_t numEpochs) noexcept; ANKERL_NANOBENCH(NODISCARD) size_t epochs() const noexcept; /** * @brief Upper limit for the runtime of each epoch. * * As a safety precaution if the clock is not very accurate, we can set an upper limit for the maximum evaluation time per * epoch. Default is 100ms. At least a single evaluation of the benchmark is performed. * * @see minEpochTime, minEpochIterations * * @param t Maximum target runtime for a single epoch. */ Bench& maxEpochTime(std::chrono::nanoseconds t) noexcept; ANKERL_NANOBENCH(NODISCARD) std::chrono::nanoseconds maxEpochTime() const noexcept; /** * @brief Minimum time each epoch should take. * * Default is zero, so we are fully relying on clockResolutionMultiple(). In most cases this is exactly what you want. If you see * that the evaluation is unreliable with a high `err%`, you can increase either minEpochTime() or minEpochIterations(). * * @see maxEpochTime, minEpochIterations * * @param t Minimum time each epoch should take. */ Bench& minEpochTime(std::chrono::nanoseconds t) noexcept; ANKERL_NANOBENCH(NODISCARD) std::chrono::nanoseconds minEpochTime() const noexcept; /** * @brief Sets the minimum number of iterations each epoch should take. * * Default is 1, and we rely on clockResolutionMultiple(). If the `err%` is high and you want a more smooth result, you might want * to increase the minimum number of iterations, or increase the minEpochTime(). * * @see minEpochTime, maxEpochTime, minEpochIterations * * @param numIters Minimum number of iterations per epoch. */ Bench& minEpochIterations(uint64_t numIters) noexcept; ANKERL_NANOBENCH(NODISCARD) uint64_t minEpochIterations() const noexcept; /** * Sets exactly the number of iterations for each epoch. Ignores all other epoch limits. This forces nanobench to use exactly * the given number of iterations for each epoch, not more and not less. Default is 0 (disabled). * * @param numIters Exact number of iterations to use. Set to 0 to disable. */ Bench& epochIterations(uint64_t numIters) noexcept; ANKERL_NANOBENCH(NODISCARD) uint64_t epochIterations() const noexcept; /** * @brief Sets a number of iterations that are initially performed without any measurements. * * Some benchmarks need a few evaluations to warm up caches / database / whatever access. Normally this should not be needed, since * we show the median result so initial outliers will be filtered away automatically. If the warmup effect is large though, you * might want to set it. Default is 0. * * @param numWarmupIters Number of warmup iterations. */ Bench& warmup(uint64_t numWarmupIters) noexcept; ANKERL_NANOBENCH(NODISCARD) uint64_t warmup() const noexcept; /** * @brief Marks the next run as the baseline. * * Call `relative(true)` to mark the run as the baseline. Successive runs will be compared to this run. It is calculated by * * @f[ * 100\% * \frac{baseline}{runtime} * @f] * * * 100% means it is exactly as fast as the baseline * * >100% means it is faster than the baseline. E.g. 200% means the current run is twice as fast as the baseline. * * <100% means it is slower than the baseline. E.g. 50% means it is twice as slow as the baseline. * * See the tutorial section "Comparing Results" for example usage. * * @param isRelativeEnabled True to enable processing */ Bench& relative(bool isRelativeEnabled) noexcept; ANKERL_NANOBENCH(NODISCARD) bool relative() const noexcept; /** * @brief Enables/disables performance counters. * * On Linux nanobench has a powerful feature to use performance counters. This enables counting of retired instructions, count * number of branches, missed branches, etc. On default this is enabled, but you can disable it if you don't need that feature. * * @param showPerformanceCounters True to enable, false to disable. */ Bench& performanceCounters(bool showPerformanceCounters) noexcept; ANKERL_NANOBENCH(NODISCARD) bool performanceCounters() const noexcept; /** * @brief Retrieves all benchmark results collected by the bench object so far. * * Each call to run() generates a Result that is stored within the Bench instance. This is mostly for advanced users who want to * see all the nitty gritty details. * * @return All results collected so far. */ ANKERL_NANOBENCH(NODISCARD) std::vector<Result> const& results() const noexcept; /*! @verbatim embed:rst Convenience shortcut to :cpp:func:`ankerl::nanobench::doNotOptimizeAway`. @endverbatim */ template <typename Arg> Bench& doNotOptimizeAway(Arg&& arg); /*! @verbatim embed:rst Sets N for asymptotic complexity calculation, so it becomes possible to calculate `Big O <https://en.wikipedia.org/wiki/Big_O_notation>`_ from multiple benchmark evaluations. Use :cpp:func:`ankerl::nanobench::Bench::complexityBigO` when the evaluation has finished. See the tutorial :ref:`asymptotic-complexity` for details. @endverbatim @tparam T Any type is cast to `double`. @param n Length of N for the next benchmark run, so it is possible to calculate `bigO`. */ template <typename T> Bench& complexityN(T n) noexcept; ANKERL_NANOBENCH(NODISCARD) double complexityN() const noexcept; /*! Calculates [Big O](https://en.wikipedia.org/wiki/Big_O_notation>) of the results with all preconfigured complexity functions. Currently these complexity functions are fitted into the benchmark results: @f$ \mathcal{O}(1) @f$, @f$ \mathcal{O}(n) @f$, @f$ \mathcal{O}(\log{}n) @f$, @f$ \mathcal{O}(n\log{}n) @f$, @f$ \mathcal{O}(n^2) @f$, @f$ \mathcal{O}(n^3) @f$. If we e.g. evaluate the complexity of `std::sort`, this is the result of `std::cout << bench.complexityBigO()`: ``` | coefficient | err% | complexity |--------------:|-------:|------------ | 5.08935e-09 | 2.6% | O(n log n) | 6.10608e-08 | 8.0% | O(n) | 1.29307e-11 | 47.2% | O(n^2) | 2.48677e-15 | 69.6% | O(n^3) | 9.88133e-06 | 132.3% | O(log n) | 5.98793e-05 | 162.5% | O(1) ``` So in this case @f$ \mathcal{O}(n\log{}n) @f$ provides the best approximation. @verbatim embed:rst See the tutorial :ref:`asymptotic-complexity` for details. @endverbatim @return Evaluation results, which can be printed or otherwise inspected. */ std::vector<BigO> complexityBigO() const; /** * @brief Calculates bigO for a custom function. * * E.g. to calculate the mean squared error for @f$ \mathcal{O}(\log{}\log{}n) @f$, which is not part of the default set of * complexityBigO(), you can do this: * * ``` * auto logLogN = bench.complexityBigO("O(log log n)", [](double n) { * return std::log2(std::log2(n)); * }); * ``` * * The resulting mean squared error can be printed with `std::cout << logLogN`. E.g. it prints something like this: * * ```text * 2.46985e-05 * O(log log n), rms=1.48121 * ``` * * @tparam Op Type of mapping operation. * @param name Name for the function, e.g. "O(log log n)" * @param op Op's operator() maps a `double` with the desired complexity function, e.g. `log2(log2(n))`. * @return BigO Error calculation, which is streamable to std::cout. */ template <typename Op> BigO complexityBigO(char const* name, Op op) const; template <typename Op> BigO complexityBigO(std::string const& name, Op op) const; /*! @verbatim embed:rst Convenience shortcut to :cpp:func:`ankerl::nanobench::render`. @endverbatim */ Bench& render(char const* templateContent, std::ostream& os); Bench& render(std::string const& templateContent, std::ostream& os); Bench& config(Config const& benchmarkConfig); ANKERL_NANOBENCH(NODISCARD) Config const& config() const noexcept; private: Config mConfig{}; std::vector<Result> mResults{}; }; ANKERL_NANOBENCH(IGNORE_PADDED_POP) /** * @brief Makes sure none of the given arguments are optimized away by the compiler. * * @tparam Arg Type of the argument that shouldn't be optimized away. * @param arg The input that we mark as being used, even though we don't do anything with it. */ template <typename Arg> void doNotOptimizeAway(Arg&& arg); namespace detail { #if defined(_MSC_VER) void doNotOptimizeAwaySink(void const*); template <typename T> void doNotOptimizeAway(T const& val); #else // These assembly magic is directly from what Google Benchmark is doing. I have previously used what facebook's folly was doing, but // this seemed to have compilation problems in some cases. Google Benchmark seemed to be the most well tested anyways. // see https://github.com/google/benchmark/blob/v1.7.1/include/benchmark/benchmark.h#L443-L446 template <typename T> void doNotOptimizeAway(T const& val) { // NOLINTNEXTLINE(hicpp-no-assembler) asm volatile("" : : "r,m"(val) : "memory"); } template <typename T> void doNotOptimizeAway(T& val) { # if defined(__clang__) // NOLINTNEXTLINE(hicpp-no-assembler) asm volatile("" : "+r,m"(val) : : "memory"); # else // NOLINTNEXTLINE(hicpp-no-assembler) asm volatile("" : "+m,r"(val) : : "memory"); # endif } #endif // internally used, but visible because run() is templated. // Not movable/copy-able, so we simply use a pointer instead of unique_ptr. This saves us from // having to include <memory>, and the template instantiation overhead of unique_ptr which is unfortunately quite significant. ANKERL_NANOBENCH(IGNORE_EFFCPP_PUSH) class IterationLogic { public: explicit IterationLogic(Bench const& bench); IterationLogic(IterationLogic&&) = delete; IterationLogic& operator=(IterationLogic&&) = delete; IterationLogic(IterationLogic const&) = delete; IterationLogic& operator=(IterationLogic const&) = delete; ~IterationLogic(); ANKERL_NANOBENCH(NODISCARD) uint64_t numIters() const noexcept; void add(std::chrono::nanoseconds elapsed, PerformanceCounters const& pc) noexcept; void moveResultTo(std::vector<Result>& results) noexcept; private: struct Impl; Impl* mPimpl; }; ANKERL_NANOBENCH(IGNORE_EFFCPP_POP) ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) class PerformanceCounters { public: PerformanceCounters(PerformanceCounters const&) = delete; PerformanceCounters(PerformanceCounters&&) = delete; PerformanceCounters& operator=(PerformanceCounters const&) = delete; PerformanceCounters& operator=(PerformanceCounters&&) = delete; PerformanceCounters(); ~PerformanceCounters(); void beginMeasure(); void endMeasure(); void updateResults(uint64_t numIters); ANKERL_NANOBENCH(NODISCARD) PerfCountSet<uint64_t> const& val() const noexcept; ANKERL_NANOBENCH(NODISCARD) PerfCountSet<bool> const& has() const noexcept; private: #if ANKERL_NANOBENCH(PERF_COUNTERS) LinuxPerformanceCounters* mPc = nullptr; #endif PerfCountSet<uint64_t> mVal{}; PerfCountSet<bool> mHas{}; }; ANKERL_NANOBENCH(IGNORE_PADDED_POP) // Gets the singleton PerformanceCounters& performanceCounters(); } // namespace detail class BigO { public: using RangeMeasure = std::vector<std::pair<double, double>>; template <typename Op> static RangeMeasure mapRangeMeasure(RangeMeasure data, Op op) { for (auto& rangeMeasure : data) { rangeMeasure.first = op(rangeMeasure.first); } return data; } static RangeMeasure collectRangeMeasure(std::vector<Result> const& results); template <typename Op> BigO(char const* bigOName, RangeMeasure const& rangeMeasure, Op rangeToN) : BigO(bigOName, mapRangeMeasure(rangeMeasure, rangeToN)) {} template <typename Op> BigO(std::string bigOName, RangeMeasure const& rangeMeasure, Op rangeToN) : BigO(std::move(bigOName), mapRangeMeasure(rangeMeasure, rangeToN)) {} BigO(char const* bigOName, RangeMeasure const& scaledRangeMeasure); BigO(std::string bigOName, RangeMeasure const& scaledRangeMeasure); ANKERL_NANOBENCH(NODISCARD) std::string const& name() const noexcept; ANKERL_NANOBENCH(NODISCARD) double constant() const noexcept; ANKERL_NANOBENCH(NODISCARD) double normalizedRootMeanSquare() const noexcept; ANKERL_NANOBENCH(NODISCARD) bool operator<(BigO const& other) const noexcept; private: std::string mName{}; double mConstant{}; double mNormalizedRootMeanSquare{}; }; std::ostream& operator<<(std::ostream& os, BigO const& bigO); std::ostream& operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO> const& bigOs); } // namespace nanobench } // namespace ankerl // implementation ///////////////////////////////////////////////////////////////////////////////// namespace ankerl { namespace nanobench { constexpr uint64_t(Rng::min)() { return 0; } constexpr uint64_t(Rng::max)() { return (std::numeric_limits<uint64_t>::max)(); } ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") uint64_t Rng::operator()() noexcept { auto x = mX; mX = UINT64_C(15241094284759029579) * mY; mY = rotl(mY - x, 27); return x; } ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") uint32_t Rng::bounded(uint32_t range) noexcept { uint64_t const r32 = static_cast<uint32_t>(operator()()); auto multiresult = r32 * range; return static_cast<uint32_t>(multiresult >> 32U); } double Rng::uniform01() noexcept { auto i = (UINT64_C(0x3ff) << 52U) | (operator()() >> 12U); // can't use union in c++ here for type puning, it's undefined behavior. // std::memcpy is optimized anyways. double d{}; std::memcpy(&d, &i, sizeof(double)); return d - 1.0; } template <typename Container> void Rng::shuffle(Container& container) noexcept { auto i = container.size(); while (i > 1U) { using std::swap; auto n = operator()(); // using decltype(i) instead of size_t to be compatible to containers with 32bit index (see #80) auto b1 = static_cast<decltype(i)>((static_cast<uint32_t>(n) * static_cast<uint64_t>(i)) >> 32U); swap(container[--i], container[b1]); auto b2 = static_cast<decltype(i)>(((n >> 32U) * static_cast<uint64_t>(i)) >> 32U); swap(container[--i], container[b2]); } } ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") constexpr uint64_t Rng::rotl(uint64_t x, unsigned k) noexcept { return (x << k) | (x >> (64U - k)); } template <typename Op> ANKERL_NANOBENCH_NO_SANITIZE("integer") Bench& Bench::run(Op&& op) { // It is important that this method is kept short so the compiler can do better optimizations/ inlining of op() detail::IterationLogic iterationLogic(*this); auto& pc = detail::performanceCounters(); while (auto n = iterationLogic.numIters()) { pc.beginMeasure(); Clock::time_point const before = Clock::now(); while (n-- > 0) { op(); } Clock::time_point const after = Clock::now(); pc.endMeasure(); pc.updateResults(iterationLogic.numIters()); iterationLogic.add(after - before, pc); } iterationLogic.moveResultTo(mResults); return *this; } // Performs all evaluations. template <typename Op> Bench& Bench::run(char const* benchmarkName, Op&& op) { name(benchmarkName); return run(std::forward<Op>(op)); } template <typename Op> Bench& Bench::run(std::string const& benchmarkName, Op&& op) { name(benchmarkName); return run(std::forward<Op>(op)); } template <typename Op> BigO Bench::complexityBigO(char const* benchmarkName, Op op) const { return BigO(benchmarkName, BigO::collectRangeMeasure(mResults), op); } template <typename Op> BigO Bench::complexityBigO(std::string const& benchmarkName, Op op) const { return BigO(benchmarkName, BigO::collectRangeMeasure(mResults), op); } // Set the batch size, e.g. number of processed bytes, or some other metric for the size of the processed data in each iteration. // Any argument is cast to double. template <typename T> Bench& Bench::batch(T b) noexcept { mConfig.mBatch = static_cast<double>(b); return *this; } // Sets the computation complexity of the next run. Any argument is cast to double. template <typename T> Bench& Bench::complexityN(T n) noexcept { mConfig.mComplexityN = static_cast<double>(n); return *this; } // Convenience: makes sure none of the given arguments are optimized away by the compiler. template <typename Arg> Bench& Bench::doNotOptimizeAway(Arg&& arg) { detail::doNotOptimizeAway(std::forward<Arg>(arg)); return *this; } // Makes sure none of the given arguments are optimized away by the compiler. template <typename Arg> void doNotOptimizeAway(Arg&& arg) { detail::doNotOptimizeAway(std::forward<Arg>(arg)); } namespace detail { #if defined(_MSC_VER) template <typename T> void doNotOptimizeAway(T const& val) { doNotOptimizeAwaySink(&val); } #endif } // namespace detail } // namespace nanobench } // namespace ankerl #if defined(ANKERL_NANOBENCH_IMPLEMENT) /////////////////////////////////////////////////////////////////////////////////////////////////// // implementation part - only visible in .cpp /////////////////////////////////////////////////////////////////////////////////////////////////// # include <algorithm> // sort, reverse # include <atomic> // compare_exchange_strong in loop overhead # include <cstdlib> // getenv # include <cstring> // strstr, strncmp # include <fstream> // ifstream to parse proc files # include <iomanip> // setw, setprecision # include <iostream> // cout # include <numeric> // accumulate # include <random> // random_device # include <sstream> // to_s in Number # include <stdexcept> // throw for rendering templates # include <tuple> // std::tie # if defined(__linux__) # include <unistd.h> //sysconf # endif # if ANKERL_NANOBENCH(PERF_COUNTERS) # include <map> // map # include <linux/perf_event.h> # include <sys/ioctl.h> # include <sys/syscall.h> # endif // declarations /////////////////////////////////////////////////////////////////////////////////// namespace ankerl { namespace nanobench { // helper stuff that is only intended to be used internally namespace detail { struct TableInfo; // formatting utilities namespace fmt { class NumSep; class StreamStateRestorer; class Number; class MarkDownColumn; class MarkDownCode; } // namespace fmt } // namespace detail } // namespace nanobench } // namespace ankerl // definitions //////////////////////////////////////////////////////////////////////////////////// namespace ankerl { namespace nanobench { uint64_t splitMix64(uint64_t& state) noexcept; namespace detail { // helpers to get double values template <typename T> inline double d(T t) noexcept { return static_cast<double>(t); } inline double d(Clock::duration duration) noexcept { return std::chrono::duration_cast<std::chrono::duration<double>>(duration).count(); } // Calculates clock resolution once, and remembers the result inline Clock::duration clockResolution() noexcept; } // namespace detail namespace templates { char const* csv() noexcept { return R"DELIM("title";"name";"unit";"batch";"elapsed";"error %";"instructions";"branches";"branch misses";"total" {{#result}}"{{title}}";"{{name}}";"{{unit}}";{{batch}};{{median(elapsed)}};{{medianAbsolutePercentError(elapsed)}};{{median(instructions)}};{{median(branchinstructions)}};{{median(branchmisses)}};{{sumProduct(iterations, elapsed)}} {{/result}})DELIM"; } char const* htmlBoxplot() noexcept { return R"DELIM(<html> <head> <script src="https://cdn.plot.ly/plotly-latest.min.js"></script> </head> <body> <div id="myDiv"></div> <script> var data = [ {{#result}}{ name: '{{name}}', y: [{{#measurement}}{{elapsed}}{{^-last}}, {{/last}}{{/measurement}}], }, {{/result}} ]; var title = '{{title}}'; data = data.map(a => Object.assign(a, { boxpoints: 'all', pointpos: 0, type: 'box' })); var layout = { title: { text: title }, showlegend: false, yaxis: { title: 'time per unit', rangemode: 'tozero', autorange: true } }; Plotly.newPlot('myDiv', data, layout, {responsive: true}); </script> </body> </html>)DELIM"; } char const* pyperf() noexcept { return R"DELIM({ "benchmarks": [ { "runs": [ { "values": [ {{#measurement}} {{elapsed}}{{^-last}}, {{/last}}{{/measurement}} ] } ] } ], "metadata": { "loops": {{sum(iterations)}}, "inner_loops": {{batch}}, "name": "{{title}}", "unit": "second" }, "version": "1.0" })DELIM"; } char const* json() noexcept { return R"DELIM({ "results": [ {{#result}} { "title": "{{title}}", "name": "{{name}}", "unit": "{{unit}}", "batch": {{batch}}, "complexityN": {{complexityN}}, "epochs": {{epochs}}, "clockResolution": {{clockResolution}}, "clockResolutionMultiple": {{clockResolutionMultiple}}, "maxEpochTime": {{maxEpochTime}}, "minEpochTime": {{minEpochTime}}, "minEpochIterations": {{minEpochIterations}}, "epochIterations": {{epochIterations}}, "warmup": {{warmup}}, "relative": {{relative}}, "median(elapsed)": {{median(elapsed)}}, "medianAbsolutePercentError(elapsed)": {{medianAbsolutePercentError(elapsed)}}, "median(instructions)": {{median(instructions)}}, "medianAbsolutePercentError(instructions)": {{medianAbsolutePercentError(instructions)}}, "median(cpucycles)": {{median(cpucycles)}}, "median(contextswitches)": {{median(contextswitches)}}, "median(pagefaults)": {{median(pagefaults)}}, "median(branchinstructions)": {{median(branchinstructions)}}, "median(branchmisses)": {{median(branchmisses)}}, "totalTime": {{sumProduct(iterations, elapsed)}}, "measurements": [ {{#measurement}} { "iterations": {{iterations}}, "elapsed": {{elapsed}}, "pagefaults": {{pagefaults}}, "cpucycles": {{cpucycles}}, "contextswitches": {{contextswitches}}, "instructions": {{instructions}}, "branchinstructions": {{branchinstructions}}, "branchmisses": {{branchmisses}} }{{^-last}},{{/-last}} {{/measurement}} ] }{{^-last}},{{/-last}} {{/result}} ] })DELIM"; } ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) struct Node { enum class Type { tag, content, section, inverted_section }; char const* begin; char const* end; std::vector<Node> children; Type type; template <size_t N> // NOLINTNEXTLINE(hicpp-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays) bool operator==(char const (&str)[N]) const noexcept { // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) return static_cast<size_t>(std::distance(begin, end) + 1) == N && 0 == strncmp(str, begin, N - 1); } }; ANKERL_NANOBENCH(IGNORE_PADDED_POP) // NOLINTNEXTLINE(misc-no-recursion) static std::vector<Node> parseMustacheTemplate(char const** tpl) { std::vector<Node> nodes; while (true) { auto const* begin = std::strstr(*tpl, "{{"); auto const* end = begin; if (begin != nullptr) { // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) begin += 2; end = std::strstr(begin, "}}"); } if (begin == nullptr || end == nullptr) { // nothing found, finish node // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) nodes.emplace_back(Node{*tpl, *tpl + std::strlen(*tpl), std::vector<Node>{}, Node::Type::content}); return nodes; } // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) nodes.emplace_back(Node{*tpl, begin - 2, std::vector<Node>{}, Node::Type::content}); // we found a tag // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) *tpl = end + 2; switch (*begin) { case '/': // finished! bail out return nodes; case '#': // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::section}); break; case '^': // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::inverted_section}); break; default: nodes.emplace_back(Node{begin, end, std::vector<Node>{}, Node::Type::tag}); break; } } } static bool generateFirstLast(Node const& n, size_t idx, size_t size, std::ostream& out) { ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type)); bool const matchFirst = n == "-first"; bool const matchLast = n == "-last"; if (!matchFirst && !matchLast) { return false; } bool doWrite = false; if (n.type == Node::Type::section) { doWrite = (matchFirst && idx == 0) || (matchLast && idx == size - 1); } else if (n.type == Node::Type::inverted_section) { doWrite = (matchFirst && idx != 0) || (matchLast && idx != size - 1); } if (doWrite) { for (auto const& child : n.children) { if (child.type == Node::Type::content) { out.write(child.begin, std::distance(child.begin, child.end)); } } } return true; } static bool matchCmdArgs(std::string const& str, std::vector<std::string>& matchResult) { matchResult.clear(); auto idxOpen = str.find('('); auto idxClose = str.find(')', idxOpen); if (idxClose == std::string::npos) { return false; } matchResult.emplace_back(str.substr(0, idxOpen)); // split by comma matchResult.emplace_back(); for (size_t i = idxOpen + 1; i != idxClose; ++i) { if (str[i] == ' ' || str[i] == '\t') { // skip whitespace continue; } if (str[i] == ',') { // got a comma => new string matchResult.emplace_back(); continue; } // no whitespace no comma, append matchResult.back() += str[i]; } return true; } static bool generateConfigTag(Node const& n, Config const& config, std::ostream& out) { using detail::d; if (n == "title") { out << config.mBenchmarkTitle; return true; } if (n == "name") { out << config.mBenchmarkName; return true; } if (n == "unit") { out << config.mUnit; return true; } if (n == "batch") { out << config.mBatch; return true; } if (n == "complexityN") { out << config.mComplexityN; return true; } if (n == "epochs") { out << config.mNumEpochs; return true; } if (n == "clockResolution") { out << d(detail::clockResolution()); return true; } if (n == "clockResolutionMultiple") { out << config.mClockResolutionMultiple; return true; } if (n == "maxEpochTime") { out << d(config.mMaxEpochTime); return true; } if (n == "minEpochTime") { out << d(config.mMinEpochTime); return true; } if (n == "minEpochIterations") { out << config.mMinEpochIterations; return true; } if (n == "epochIterations") { out << config.mEpochIterations; return true; } if (n == "warmup") { out << config.mWarmup; return true; } if (n == "relative") { out << config.mIsRelative; return true; } return false; } // NOLINTNEXTLINE(readability-function-cognitive-complexity) static std::ostream& generateResultTag(Node const& n, Result const& r, std::ostream& out) { if (generateConfigTag(n, r.config(), out)) { return out; } // match e.g. "median(elapsed)" // g++ 4.8 doesn't implement std::regex :( // static std::regex const regOpArg1("^([a-zA-Z]+)\\(([a-zA-Z]*)\\)$"); // std::cmatch matchResult; // if (std::regex_match(n.begin, n.end, matchResult, regOpArg1)) { std::vector<std::string> matchResult; if (matchCmdArgs(std::string(n.begin, n.end), matchResult)) { if (matchResult.size() == 2) { if (matchResult[0] == "context") { return out << r.context(matchResult[1]); } auto m = Result::fromString(matchResult[1]); if (m == Result::Measure::_size) { return out << 0.0; } if (matchResult[0] == "median") { return out << r.median(m); } if (matchResult[0] == "average") { return out << r.average(m); } if (matchResult[0] == "medianAbsolutePercentError") { return out << r.medianAbsolutePercentError(m); } if (matchResult[0] == "sum") { return out << r.sum(m); } if (matchResult[0] == "minimum") { return out << r.minimum(m); } if (matchResult[0] == "maximum") { return out << r.maximum(m); } } else if (matchResult.size() == 3) { auto m1 = Result::fromString(matchResult[1]); auto m2 = Result::fromString(matchResult[2]); if (m1 == Result::Measure::_size || m2 == Result::Measure::_size) { return out << 0.0; } if (matchResult[0] == "sumProduct") { return out << r.sumProduct(m1, m2); } } } // match e.g. "sumProduct(elapsed, iterations)" // static std::regex const regOpArg2("^([a-zA-Z]+)\\(([a-zA-Z]*)\\s*,\\s+([a-zA-Z]*)\\)$"); // nothing matches :( throw std::runtime_error("command '" + std::string(n.begin, n.end) + "' not understood"); } static void generateResultMeasurement(std::vector<Node> const& nodes, size_t idx, Result const& r, std::ostream& out) { for (auto const& n : nodes) { if (!generateFirstLast(n, idx, r.size(), out)) { ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type)); switch (n.type) { case Node::Type::content: out.write(n.begin, std::distance(n.begin, n.end)); break; case Node::Type::inverted_section: throw std::runtime_error("got a inverted section inside measurement"); case Node::Type::section: throw std::runtime_error("got a section inside measurement"); case Node::Type::tag: { auto m = Result::fromString(std::string(n.begin, n.end)); if (m == Result::Measure::_size || !r.has(m)) { out << 0.0; } else { out << r.get(idx, m); } break; } } } } } static void generateResult(std::vector<Node> const& nodes, size_t idx, std::vector<Result> const& results, std::ostream& out) { auto const& r = results[idx]; for (auto const& n : nodes) { if (!generateFirstLast(n, idx, results.size(), out)) { ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type)); switch (n.type) { case Node::Type::content: out.write(n.begin, std::distance(n.begin, n.end)); break; case Node::Type::inverted_section: throw std::runtime_error("got a inverted section inside result"); case Node::Type::section: if (n == "measurement") { for (size_t i = 0; i < r.size(); ++i) { generateResultMeasurement(n.children, i, r, out); } } else { throw std::runtime_error("got a section inside result"); } break; case Node::Type::tag: generateResultTag(n, r, out); break; } } } } } // namespace templates // helper stuff that only intended to be used internally namespace detail { char const* getEnv(char const* name); bool isEndlessRunning(std::string const& name); bool isWarningsEnabled(); template <typename T> T parseFile(std::string const& filename, bool* fail); void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations); void printStabilityInformationOnce(std::ostream* outStream); // remembers the last table settings used. When it changes, a new table header is automatically written for the new entry. uint64_t& singletonHeaderHash() noexcept; // determines resolution of the given clock. This is done by measuring multiple times and returning the minimum time difference. Clock::duration calcClockResolution(size_t numEvaluations) noexcept; // formatting utilities namespace fmt { // adds thousands separator to numbers ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) class NumSep : public std::numpunct<char> { public: explicit NumSep(char sep); char do_thousands_sep() const override; std::string do_grouping() const override; private: char mSep; }; ANKERL_NANOBENCH(IGNORE_PADDED_POP) // RAII to save & restore a stream's state ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) class StreamStateRestorer { public: explicit StreamStateRestorer(std::ostream& s); ~StreamStateRestorer(); // sets back all stream info that we remembered at construction void restore(); // don't allow copying / moving StreamStateRestorer(StreamStateRestorer const&) = delete; StreamStateRestorer& operator=(StreamStateRestorer const&) = delete; StreamStateRestorer(StreamStateRestorer&&) = delete; StreamStateRestorer& operator=(StreamStateRestorer&&) = delete; private: std::ostream& mStream; std::locale mLocale; std::streamsize const mPrecision; std::streamsize const mWidth; std::ostream::char_type const mFill; std::ostream::fmtflags const mFmtFlags; }; ANKERL_NANOBENCH(IGNORE_PADDED_POP) // Number formatter class Number { public: Number(int width, int precision, double value); Number(int width, int precision, int64_t value); ANKERL_NANOBENCH(NODISCARD) std::string to_s() const; private: friend std::ostream& operator<<(std::ostream& os, Number const& n); std::ostream& write(std::ostream& os) const; int mWidth; int mPrecision; double mValue; }; // helper replacement for std::to_string of signed/unsigned numbers so we are locale independent std::string to_s(uint64_t n); std::ostream& operator<<(std::ostream& os, Number const& n); class MarkDownColumn { public: MarkDownColumn(int w, int prec, std::string tit, std::string suff, double val) noexcept; ANKERL_NANOBENCH(NODISCARD) std::string title() const; ANKERL_NANOBENCH(NODISCARD) std::string separator() const; ANKERL_NANOBENCH(NODISCARD) std::string invalid() const; ANKERL_NANOBENCH(NODISCARD) std::string value() const; private: int mWidth; int mPrecision; std::string mTitle; std::string mSuffix; double mValue; }; // Formats any text as markdown code, escaping backticks. class MarkDownCode { public: explicit MarkDownCode(std::string const& what); private: friend std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode); std::ostream& write(std::ostream& os) const; std::string mWhat{}; }; std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode); } // namespace fmt } // namespace detail } // namespace nanobench } // namespace ankerl // implementation ///////////////////////////////////////////////////////////////////////////////// namespace ankerl { namespace nanobench { // NOLINTNEXTLINE(readability-function-cognitive-complexity) void render(char const* mustacheTemplate, std::vector<Result> const& results, std::ostream& out) { detail::fmt::StreamStateRestorer const restorer(out); out.precision(std::numeric_limits<double>::digits10); auto nodes = templates::parseMustacheTemplate(&mustacheTemplate); for (auto const& n : nodes) { ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type)); switch (n.type) { case templates::Node::Type::content: out.write(n.begin, std::distance(n.begin, n.end)); break; case templates::Node::Type::inverted_section: throw std::runtime_error("unknown list '" + std::string(n.begin, n.end) + "'"); case templates::Node::Type::section: if (n == "result") { const size_t nbResults = results.size(); for (size_t i = 0; i < nbResults; ++i) { generateResult(n.children, i, results, out); } } else if (n == "measurement") { if (results.size() != 1) { throw std::runtime_error( "render: can only use section 'measurement' here if there is a single result, but there are " + detail::fmt::to_s(results.size())); } // when we only have a single result, we can immediately go into its measurement. auto const& r = results.front(); for (size_t i = 0; i < r.size(); ++i) { generateResultMeasurement(n.children, i, r, out); } } else { throw std::runtime_error("render: unknown section '" + std::string(n.begin, n.end) + "'"); } break; case templates::Node::Type::tag: if (results.size() == 1) { // result & config are both supported there generateResultTag(n, results.front(), out); } else { // This just uses the last result's config. if (!generateConfigTag(n, results.back().config(), out)) { throw std::runtime_error("unknown tag '" + std::string(n.begin, n.end) + "'"); } } break; } } } void render(std::string const& mustacheTemplate, std::vector<Result> const& results, std::ostream& out) { render(mustacheTemplate.c_str(), results, out); } void render(char const* mustacheTemplate, const Bench& bench, std::ostream& out) { render(mustacheTemplate, bench.results(), out); } void render(std::string const& mustacheTemplate, const Bench& bench, std::ostream& out) { render(mustacheTemplate.c_str(), bench.results(), out); } namespace detail { PerformanceCounters& performanceCounters() { # if defined(__clang__) # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wexit-time-destructors" # endif static PerformanceCounters pc; # if defined(__clang__) # pragma clang diagnostic pop # endif return pc; } // Windows version of doNotOptimizeAway // see https://github.com/google/benchmark/blob/v1.7.1/include/benchmark/benchmark.h#L514 // see https://github.com/facebook/folly/blob/v2023.01.30.00/folly/lang/Hint-inl.h#L54-L58 // see https://learn.microsoft.com/en-us/cpp/preprocessor/optimize # if defined(_MSC_VER) # pragma optimize("", off) void doNotOptimizeAwaySink(void const*) {} # pragma optimize("", on) # endif template <typename T> T parseFile(std::string const& filename, bool* fail) { std::ifstream fin(filename); // NOLINT(misc-const-correctness) T num{}; fin >> num; if (fail != nullptr) { *fail = fin.fail(); } return num; } char const* getEnv(char const* name) { # if defined(_MSC_VER) # pragma warning(push) # pragma warning(disable : 4996) // getenv': This function or variable may be unsafe. # endif return std::getenv(name); // NOLINT(concurrency-mt-unsafe) # if defined(_MSC_VER) # pragma warning(pop) # endif } bool isEndlessRunning(std::string const& name) { auto const* const endless = getEnv("NANOBENCH_ENDLESS"); return nullptr != endless && endless == name; } // True when environment variable NANOBENCH_SUPPRESS_WARNINGS is either not set at all, or set to "0" bool isWarningsEnabled() { auto const* const suppression = getEnv("NANOBENCH_SUPPRESS_WARNINGS"); return nullptr == suppression || suppression == std::string("0"); } void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations) { warnings.clear(); recommendations.clear(); # if defined(DEBUG) warnings.emplace_back("DEBUG defined"); bool const recommendCheckFlags = true; # else bool const recommendCheckFlags = false; # endif bool recommendPyPerf = false; # if defined(__linux__) auto nprocs = sysconf(_SC_NPROCESSORS_CONF); if (nprocs <= 0) { warnings.emplace_back("couldn't figure out number of processors - no governor, turbo check possible"); } else { // check frequency scaling for (long id = 0; id < nprocs; ++id) { auto idStr = detail::fmt::to_s(static_cast<uint64_t>(id)); auto sysCpu = "/sys/devices/system/cpu/cpu" + idStr; auto minFreq = parseFile<int64_t>(sysCpu + "/cpufreq/scaling_min_freq", nullptr); auto maxFreq = parseFile<int64_t>(sysCpu + "/cpufreq/scaling_max_freq", nullptr); if (minFreq != maxFreq) { auto minMHz = d(minFreq) / 1000.0; auto maxMHz = d(maxFreq) / 1000.0; warnings.emplace_back("CPU frequency scaling enabled: CPU " + idStr + " between " + detail::fmt::Number(1, 1, minMHz).to_s() + " and " + detail::fmt::Number(1, 1, maxMHz).to_s() + " MHz"); recommendPyPerf = true; break; } } auto fail = false; auto currentGovernor = parseFile<std::string>("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", &fail); if (!fail && "performance" != currentGovernor) { warnings.emplace_back("CPU governor is '" + currentGovernor + "' but should be 'performance'"); recommendPyPerf = true; } auto noTurbo = parseFile<int>("/sys/devices/system/cpu/intel_pstate/no_turbo", &fail); if (!fail && noTurbo == 0) { warnings.emplace_back("Turbo is enabled, CPU frequency will fluctuate"); recommendPyPerf = true; } } # endif if (recommendCheckFlags) { recommendations.emplace_back("Make sure you compile for Release"); } if (recommendPyPerf) { recommendations.emplace_back("Use 'pyperf system tune' before benchmarking. See https://github.com/psf/pyperf"); } } void printStabilityInformationOnce(std::ostream* outStream) { static bool shouldPrint = true; if (shouldPrint && (nullptr != outStream) && isWarningsEnabled()) { auto& os = *outStream; shouldPrint = false; std::vector<std::string> warnings; std::vector<std::string> recommendations; gatherStabilityInformation(warnings, recommendations); if (warnings.empty()) { return; } os << "Warning, results might be unstable:" << std::endl; for (auto const& w : warnings) { os << "* " << w << std::endl; } os << std::endl << "Recommendations" << std::endl; for (auto const& r : recommendations) { os << "* " << r << std::endl; } } } // remembers the last table settings used. When it changes, a new table header is automatically written for the new entry. uint64_t& singletonHeaderHash() noexcept { static uint64_t sHeaderHash{}; return sHeaderHash; } ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") inline uint64_t hash_combine(uint64_t seed, uint64_t val) { return seed ^ (val + UINT64_C(0x9e3779b9) + (seed << 6U) + (seed >> 2U)); } // determines resolution of the given clock. This is done by measuring multiple times and returning the minimum time difference. Clock::duration calcClockResolution(size_t numEvaluations) noexcept { auto bestDuration = Clock::duration::max(); Clock::time_point tBegin; Clock::time_point tEnd; for (size_t i = 0; i < numEvaluations; ++i) { tBegin = Clock::now(); do { tEnd = Clock::now(); } while (tBegin == tEnd); bestDuration = (std::min)(bestDuration, tEnd - tBegin); } return bestDuration; } // Calculates clock resolution once, and remembers the result Clock::duration clockResolution() noexcept { static Clock::duration const sResolution = calcClockResolution(20); return sResolution; } ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) struct IterationLogic::Impl { enum class State { warmup, upscaling_runtime, measuring, endless }; explicit Impl(Bench const& bench) : mBench(bench) , mResult(bench.config()) { printStabilityInformationOnce(mBench.output()); // determine target runtime per epoch mTargetRuntimePerEpoch = detail::clockResolution() * mBench.clockResolutionMultiple(); if (mTargetRuntimePerEpoch > mBench.maxEpochTime()) { mTargetRuntimePerEpoch = mBench.maxEpochTime(); } if (mTargetRuntimePerEpoch < mBench.minEpochTime()) { mTargetRuntimePerEpoch = mBench.minEpochTime(); } if (isEndlessRunning(mBench.name())) { std::cerr << "NANOBENCH_ENDLESS set: running '" << mBench.name() << "' endlessly" << std::endl; mNumIters = (std::numeric_limits<uint64_t>::max)(); mState = State::endless; } else if (0 != mBench.warmup()) { mNumIters = mBench.warmup(); mState = State::warmup; } else if (0 != mBench.epochIterations()) { // exact number of iterations mNumIters = mBench.epochIterations(); mState = State::measuring; } else { mNumIters = mBench.minEpochIterations(); mState = State::upscaling_runtime; } } // directly calculates new iters based on elapsed&iters, and adds a 10% noise. Makes sure we don't underflow. ANKERL_NANOBENCH(NODISCARD) uint64_t calcBestNumIters(std::chrono::nanoseconds elapsed, uint64_t iters) noexcept { auto doubleElapsed = d(elapsed); auto doubleTargetRuntimePerEpoch = d(mTargetRuntimePerEpoch); auto doubleNewIters = doubleTargetRuntimePerEpoch / doubleElapsed * d(iters); auto doubleMinEpochIters = d(mBench.minEpochIterations()); if (doubleNewIters < doubleMinEpochIters) { doubleNewIters = doubleMinEpochIters; } doubleNewIters *= 1.0 + 0.2 * mRng.uniform01(); // +0.5 for correct rounding when casting // NOLINTNEXTLINE(bugprone-incorrect-roundings) return static_cast<uint64_t>(doubleNewIters + 0.5); } ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") void upscale(std::chrono::nanoseconds elapsed) { if (elapsed * 10 < mTargetRuntimePerEpoch) { // we are far below the target runtime. Multiply iterations by 10 (with overflow check) if (mNumIters * 10 < mNumIters) { // overflow :-( showResult("iterations overflow. Maybe your code got optimized away?"); mNumIters = 0; return; } mNumIters *= 10; } else { mNumIters = calcBestNumIters(elapsed, mNumIters); } } void add(std::chrono::nanoseconds elapsed, PerformanceCounters const& pc) noexcept { # if defined(ANKERL_NANOBENCH_LOG_ENABLED) auto oldIters = mNumIters; # endif switch (mState) { case State::warmup: if (isCloseEnoughForMeasurements(elapsed)) { // if elapsed is close enough, we can skip upscaling and go right to measurements // still, we don't add the result to the measurements. mState = State::measuring; mNumIters = calcBestNumIters(elapsed, mNumIters); } else { // not close enough: switch to upscaling mState = State::upscaling_runtime; upscale(elapsed); } break; case State::upscaling_runtime: if (isCloseEnoughForMeasurements(elapsed)) { // if we are close enough, add measurement and switch to always measuring mState = State::measuring; mTotalElapsed += elapsed; mTotalNumIters += mNumIters; mResult.add(elapsed, mNumIters, pc); mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters); } else { upscale(elapsed); } break; case State::measuring: // just add measurements - no questions asked. Even when runtime is low. But we can't ignore // that fluctuation, or else we would bias the result mTotalElapsed += elapsed; mTotalNumIters += mNumIters; mResult.add(elapsed, mNumIters, pc); if (0 != mBench.epochIterations()) { mNumIters = mBench.epochIterations(); } else { mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters); } break; case State::endless: mNumIters = (std::numeric_limits<uint64_t>::max)(); break; } if (static_cast<uint64_t>(mResult.size()) == mBench.epochs()) { // we got all the results that we need, finish it showResult(""); mNumIters = 0; } ANKERL_NANOBENCH_LOG(mBench.name() << ": " << detail::fmt::Number(20, 3, d(elapsed.count())) << " elapsed, " << detail::fmt::Number(20, 3, d(mTargetRuntimePerEpoch.count())) << " target. oldIters=" << oldIters << ", mNumIters=" << mNumIters << ", mState=" << static_cast<int>(mState)); } // NOLINTNEXTLINE(readability-function-cognitive-complexity) void showResult(std::string const& errorMessage) const { ANKERL_NANOBENCH_LOG(errorMessage); if (mBench.output() != nullptr) { // prepare column data /////// std::vector<fmt::MarkDownColumn> columns; auto rMedian = mResult.median(Result::Measure::elapsed); if (mBench.relative()) { double d = 100.0; if (!mBench.results().empty()) { d = rMedian <= 0.0 ? 0.0 : mBench.results().front().median(Result::Measure::elapsed) / rMedian * 100.0; } columns.emplace_back(11, 1, "relative", "%", d); } if (mBench.complexityN() > 0) { columns.emplace_back(14, 0, "complexityN", "", mBench.complexityN()); } columns.emplace_back(22, 2, mBench.timeUnitName() + "/" + mBench.unit(), "", rMedian / (mBench.timeUnit().count() * mBench.batch())); columns.emplace_back(22, 2, mBench.unit() + "/s", "", rMedian <= 0.0 ? 0.0 : mBench.batch() / rMedian); double const rErrorMedian = mResult.medianAbsolutePercentError(Result::Measure::elapsed); columns.emplace_back(10, 1, "err%", "%", rErrorMedian * 100.0); double rInsMedian = -1.0; if (mBench.performanceCounters() && mResult.has(Result::Measure::instructions)) { rInsMedian = mResult.median(Result::Measure::instructions); columns.emplace_back(18, 2, "ins/" + mBench.unit(), "", rInsMedian / mBench.batch()); } double rCycMedian = -1.0; if (mBench.performanceCounters() && mResult.has(Result::Measure::cpucycles)) { rCycMedian = mResult.median(Result::Measure::cpucycles); columns.emplace_back(18, 2, "cyc/" + mBench.unit(), "", rCycMedian / mBench.batch()); } if (rInsMedian > 0.0 && rCycMedian > 0.0) { columns.emplace_back(9, 3, "IPC", "", rCycMedian <= 0.0 ? 0.0 : rInsMedian / rCycMedian); } if (mBench.performanceCounters() && mResult.has(Result::Measure::branchinstructions)) { double const rBraMedian = mResult.median(Result::Measure::branchinstructions); columns.emplace_back(17, 2, "bra/" + mBench.unit(), "", rBraMedian / mBench.batch()); if (mResult.has(Result::Measure::branchmisses)) { double p = 0.0; if (rBraMedian >= 1e-9) { p = 100.0 * mResult.median(Result::Measure::branchmisses) / rBraMedian; } columns.emplace_back(10, 1, "miss%", "%", p); } } columns.emplace_back(12, 2, "total", "", mResult.sumProduct(Result::Measure::iterations, Result::Measure::elapsed)); // write everything auto& os = *mBench.output(); // combine all elements that are relevant for printing the header uint64_t hash = 0; hash = hash_combine(std::hash<std::string>{}(mBench.unit()), hash); hash = hash_combine(std::hash<std::string>{}(mBench.title()), hash); hash = hash_combine(std::hash<std::string>{}(mBench.timeUnitName()), hash); hash = hash_combine(std::hash<double>{}(mBench.timeUnit().count()), hash); hash = hash_combine(std::hash<bool>{}(mBench.relative()), hash); hash = hash_combine(std::hash<bool>{}(mBench.performanceCounters()), hash); if (hash != singletonHeaderHash()) { singletonHeaderHash() = hash; // no result yet, print header os << std::endl; for (auto const& col : columns) { os << col.title(); } os << "| " << mBench.title() << std::endl; for (auto const& col : columns) { os << col.separator(); } os << "|:" << std::string(mBench.title().size() + 1U, '-') << std::endl; } if (!errorMessage.empty()) { for (auto const& col : columns) { os << col.invalid(); } os << "| :boom: " << fmt::MarkDownCode(mBench.name()) << " (" << errorMessage << ')' << std::endl; } else { for (auto const& col : columns) { os << col.value(); } os << "| "; auto showUnstable = isWarningsEnabled() && rErrorMedian >= 0.05; if (showUnstable) { os << ":wavy_dash: "; } os << fmt::MarkDownCode(mBench.name()); if (showUnstable) { auto avgIters = d(mTotalNumIters) / d(mBench.epochs()); // NOLINTNEXTLINE(bugprone-incorrect-roundings) auto suggestedIters = static_cast<uint64_t>(avgIters * 10 + 0.5); os << " (Unstable with ~" << detail::fmt::Number(1, 1, avgIters) << " iters. Increase `minEpochIterations` to e.g. " << suggestedIters << ")"; } os << std::endl; } } } ANKERL_NANOBENCH(NODISCARD) bool isCloseEnoughForMeasurements(std::chrono::nanoseconds elapsed) const noexcept { return elapsed * 3 >= mTargetRuntimePerEpoch * 2; } uint64_t mNumIters = 1; // NOLINT(misc-non-private-member-variables-in-classes) Bench const& mBench; // NOLINT(misc-non-private-member-variables-in-classes) std::chrono::nanoseconds mTargetRuntimePerEpoch{}; // NOLINT(misc-non-private-member-variables-in-classes) Result mResult; // NOLINT(misc-non-private-member-variables-in-classes) Rng mRng{123}; // NOLINT(misc-non-private-member-variables-in-classes) std::chrono::nanoseconds mTotalElapsed{}; // NOLINT(misc-non-private-member-variables-in-classes) uint64_t mTotalNumIters = 0; // NOLINT(misc-non-private-member-variables-in-classes) State mState = State::upscaling_runtime; // NOLINT(misc-non-private-member-variables-in-classes) }; ANKERL_NANOBENCH(IGNORE_PADDED_POP) IterationLogic::IterationLogic(Bench const& bench) : mPimpl(new Impl(bench)) {} IterationLogic::~IterationLogic() { delete mPimpl; } uint64_t IterationLogic::numIters() const noexcept { ANKERL_NANOBENCH_LOG(mPimpl->mBench.name() << ": mNumIters=" << mPimpl->mNumIters); return mPimpl->mNumIters; } void IterationLogic::add(std::chrono::nanoseconds elapsed, PerformanceCounters const& pc) noexcept { mPimpl->add(elapsed, pc); } void IterationLogic::moveResultTo(std::vector<Result>& results) noexcept { results.emplace_back(std::move(mPimpl->mResult)); } # if ANKERL_NANOBENCH(PERF_COUNTERS) ANKERL_NANOBENCH(IGNORE_PADDED_PUSH) class LinuxPerformanceCounters { public: struct Target { Target(uint64_t* targetValue_, bool correctMeasuringOverhead_, bool correctLoopOverhead_) : targetValue(targetValue_) , correctMeasuringOverhead(correctMeasuringOverhead_) , correctLoopOverhead(correctLoopOverhead_) {} uint64_t* targetValue{}; // NOLINT(misc-non-private-member-variables-in-classes) bool correctMeasuringOverhead{}; // NOLINT(misc-non-private-member-variables-in-classes) bool correctLoopOverhead{}; // NOLINT(misc-non-private-member-variables-in-classes) }; LinuxPerformanceCounters() = default; LinuxPerformanceCounters(LinuxPerformanceCounters const&) = delete; LinuxPerformanceCounters(LinuxPerformanceCounters&&) = delete; LinuxPerformanceCounters& operator=(LinuxPerformanceCounters const&) = delete; LinuxPerformanceCounters& operator=(LinuxPerformanceCounters&&) = delete; ~LinuxPerformanceCounters(); // quick operation inline void start() {} inline void stop() {} bool monitor(perf_sw_ids swId, Target target); bool monitor(perf_hw_id hwId, Target target); ANKERL_NANOBENCH(NODISCARD) bool hasError() const noexcept { return mHasError; } // Just reading data is faster than enable & disabling. // we subtract data ourselves. inline void beginMeasure() { if (mHasError) { return; } // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg) mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP); if (mHasError) { return; } // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg) mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP); } inline void endMeasure() { if (mHasError) { return; } // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg) mHasError = (-1 == ioctl(mFd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP)); if (mHasError) { return; } auto const numBytes = sizeof(uint64_t) * mCounters.size(); auto ret = read(mFd, mCounters.data(), numBytes); mHasError = ret != static_cast<ssize_t>(numBytes); } void updateResults(uint64_t numIters); // rounded integer division template <typename T> static inline T divRounded(T a, T divisor) { return (a + divisor / 2) / divisor; } ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") static inline uint32_t mix(uint32_t x) noexcept { x ^= x << 13U; x ^= x >> 17U; x ^= x << 5U; return x; } template <typename Op> ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") void calibrate(Op&& op) { // clear current calibration data, for (auto& v : mCalibratedOverhead) { v = UINT64_C(0); } // create new calibration data auto newCalibration = mCalibratedOverhead; for (auto& v : newCalibration) { v = (std::numeric_limits<uint64_t>::max)(); } for (size_t iter = 0; iter < 100; ++iter) { beginMeasure(); op(); endMeasure(); if (mHasError) { return; } for (size_t i = 0; i < newCalibration.size(); ++i) { auto diff = mCounters[i]; if (newCalibration[i] > diff) { newCalibration[i] = diff; } } } mCalibratedOverhead = std::move(newCalibration); { // calibrate loop overhead. For branches & instructions this makes sense, not so much for everything else like cycles. // marsaglia's xorshift: mov, sal/shr, xor. Times 3. // This has the nice property that the compiler doesn't seem to be able to optimize multiple calls any further. // see https://godbolt.org/z/49RVQ5 uint64_t const numIters = 100000U + (std::random_device{}() & 3U); uint64_t n = numIters; uint32_t x = 1234567; beginMeasure(); while (n-- > 0) { x = mix(x); } endMeasure(); detail::doNotOptimizeAway(x); auto measure1 = mCounters; n = numIters; beginMeasure(); while (n-- > 0) { // we now run *twice* so we can easily calculate the overhead x = mix(x); x = mix(x); } endMeasure(); detail::doNotOptimizeAway(x); auto measure2 = mCounters; for (size_t i = 0; i < mCounters.size(); ++i) { // factor 2 because we have two instructions per loop auto m1 = measure1[i] > mCalibratedOverhead[i] ? measure1[i] - mCalibratedOverhead[i] : 0; auto m2 = measure2[i] > mCalibratedOverhead[i] ? measure2[i] - mCalibratedOverhead[i] : 0; auto overhead = m1 * 2 > m2 ? m1 * 2 - m2 : 0; mLoopOverhead[i] = divRounded(overhead, numIters); } } } private: bool monitor(uint32_t type, uint64_t eventid, Target target); std::map<uint64_t, Target> mIdToTarget{}; // start with minimum size of 3 for read_format std::vector<uint64_t> mCounters{3}; std::vector<uint64_t> mCalibratedOverhead{3}; std::vector<uint64_t> mLoopOverhead{3}; uint64_t mTimeEnabledNanos = 0; uint64_t mTimeRunningNanos = 0; int mFd = -1; bool mHasError = false; }; ANKERL_NANOBENCH(IGNORE_PADDED_POP) LinuxPerformanceCounters::~LinuxPerformanceCounters() { if (-1 != mFd) { close(mFd); } } bool LinuxPerformanceCounters::monitor(perf_sw_ids swId, LinuxPerformanceCounters::Target target) { return monitor(PERF_TYPE_SOFTWARE, swId, target); } bool LinuxPerformanceCounters::monitor(perf_hw_id hwId, LinuxPerformanceCounters::Target target) { return monitor(PERF_TYPE_HARDWARE, hwId, target); } // overflow is ok, it's checked ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") void LinuxPerformanceCounters::updateResults(uint64_t numIters) { // clear old data for (auto& id_value : mIdToTarget) { *id_value.second.targetValue = UINT64_C(0); } if (mHasError) { return; } mTimeEnabledNanos = mCounters[1] - mCalibratedOverhead[1]; mTimeRunningNanos = mCounters[2] - mCalibratedOverhead[2]; for (uint64_t i = 0; i < mCounters[0]; ++i) { auto idx = static_cast<size_t>(3 + i * 2 + 0); auto id = mCounters[idx + 1U]; auto it = mIdToTarget.find(id); if (it != mIdToTarget.end()) { auto& tgt = it->second; *tgt.targetValue = mCounters[idx]; if (tgt.correctMeasuringOverhead) { if (*tgt.targetValue >= mCalibratedOverhead[idx]) { *tgt.targetValue -= mCalibratedOverhead[idx]; } else { *tgt.targetValue = 0U; } } if (tgt.correctLoopOverhead) { auto correctionVal = mLoopOverhead[idx] * numIters; if (*tgt.targetValue >= correctionVal) { *tgt.targetValue -= correctionVal; } else { *tgt.targetValue = 0U; } } } } } bool LinuxPerformanceCounters::monitor(uint32_t type, uint64_t eventid, Target target) { *target.targetValue = (std::numeric_limits<uint64_t>::max)(); if (mHasError) { return false; } auto pea = perf_event_attr(); std::memset(&pea, 0, sizeof(perf_event_attr)); pea.type = type; pea.size = sizeof(perf_event_attr); pea.config = eventid; pea.disabled = 1; // start counter as disabled pea.exclude_kernel = 1; pea.exclude_hv = 1; // NOLINTNEXTLINE(hicpp-signed-bitwise) pea.read_format = PERF_FORMAT_GROUP | PERF_FORMAT_ID | PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING; const int pid = 0; // the current process const int cpu = -1; // all CPUs # if defined(PERF_FLAG_FD_CLOEXEC) // since Linux 3.14 const unsigned long flags = PERF_FLAG_FD_CLOEXEC; # else const unsigned long flags = 0; # endif // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg) auto fd = static_cast<int>(syscall(__NR_perf_event_open, &pea, pid, cpu, mFd, flags)); if (-1 == fd) { return false; } if (-1 == mFd) { // first call: set to fd, and use this from now on mFd = fd; } uint64_t id = 0; // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg) if (-1 == ioctl(fd, PERF_EVENT_IOC_ID, &id)) { // couldn't get id return false; } // insert into map, rely on the fact that map's references are constant. mIdToTarget.emplace(id, target); // prepare readformat with the correct size (after the insert) auto size = 3 + 2 * mIdToTarget.size(); mCounters.resize(size); mCalibratedOverhead.resize(size); mLoopOverhead.resize(size); return true; } PerformanceCounters::PerformanceCounters() : mPc(new LinuxPerformanceCounters()) , mVal() , mHas() { // HW events mHas.cpuCycles = mPc->monitor(PERF_COUNT_HW_REF_CPU_CYCLES, LinuxPerformanceCounters::Target(&mVal.cpuCycles, true, false)); if (!mHas.cpuCycles) { // Fallback to cycles counter, reference cycles not available in many systems. mHas.cpuCycles = mPc->monitor(PERF_COUNT_HW_CPU_CYCLES, LinuxPerformanceCounters::Target(&mVal.cpuCycles, true, false)); } mHas.instructions = mPc->monitor(PERF_COUNT_HW_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.instructions, true, true)); mHas.branchInstructions = mPc->monitor(PERF_COUNT_HW_BRANCH_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.branchInstructions, true, false)); mHas.branchMisses = mPc->monitor(PERF_COUNT_HW_BRANCH_MISSES, LinuxPerformanceCounters::Target(&mVal.branchMisses, true, false)); // mHas.branchMisses = false; // SW events mHas.pageFaults = mPc->monitor(PERF_COUNT_SW_PAGE_FAULTS, LinuxPerformanceCounters::Target(&mVal.pageFaults, true, false)); mHas.contextSwitches = mPc->monitor(PERF_COUNT_SW_CONTEXT_SWITCHES, LinuxPerformanceCounters::Target(&mVal.contextSwitches, true, false)); mPc->start(); mPc->calibrate([] { auto before = ankerl::nanobench::Clock::now(); auto after = ankerl::nanobench::Clock::now(); (void)before; (void)after; }); if (mPc->hasError()) { // something failed, don't monitor anything. mHas = PerfCountSet<bool>{}; } } PerformanceCounters::~PerformanceCounters() { // no need to check for nullptr, delete nullptr has no effect delete mPc; } void PerformanceCounters::beginMeasure() { mPc->beginMeasure(); } void PerformanceCounters::endMeasure() { mPc->endMeasure(); } void PerformanceCounters::updateResults(uint64_t numIters) { mPc->updateResults(numIters); } # else PerformanceCounters::PerformanceCounters() = default; PerformanceCounters::~PerformanceCounters() = default; void PerformanceCounters::beginMeasure() {} void PerformanceCounters::endMeasure() {} void PerformanceCounters::updateResults(uint64_t) {} # endif ANKERL_NANOBENCH(NODISCARD) PerfCountSet<uint64_t> const& PerformanceCounters::val() const noexcept { return mVal; } ANKERL_NANOBENCH(NODISCARD) PerfCountSet<bool> const& PerformanceCounters::has() const noexcept { return mHas; } // formatting utilities namespace fmt { // adds thousands separator to numbers NumSep::NumSep(char sep) : mSep(sep) {} char NumSep::do_thousands_sep() const { return mSep; } std::string NumSep::do_grouping() const { return "\003"; } // RAII to save & restore a stream's state StreamStateRestorer::StreamStateRestorer(std::ostream& s) : mStream(s) , mLocale(s.getloc()) , mPrecision(s.precision()) , mWidth(s.width()) , mFill(s.fill()) , mFmtFlags(s.flags()) {} StreamStateRestorer::~StreamStateRestorer() { restore(); } // sets back all stream info that we remembered at construction void StreamStateRestorer::restore() { mStream.imbue(mLocale); mStream.precision(mPrecision); mStream.width(mWidth); mStream.fill(mFill); mStream.flags(mFmtFlags); } Number::Number(int width, int precision, int64_t value) : mWidth(width) , mPrecision(precision) , mValue(d(value)) {} Number::Number(int width, int precision, double value) : mWidth(width) , mPrecision(precision) , mValue(value) {} std::ostream& Number::write(std::ostream& os) const { StreamStateRestorer const restorer(os); os.imbue(std::locale(os.getloc(), new NumSep(','))); os << std::setw(mWidth) << std::setprecision(mPrecision) << std::fixed << mValue; return os; } std::string Number::to_s() const { std::stringstream ss; write(ss); return ss.str(); } std::string to_s(uint64_t n) { std::string str; do { str += static_cast<char>('0' + static_cast<char>(n % 10)); n /= 10; } while (n != 0); std::reverse(str.begin(), str.end()); return str; } std::ostream& operator<<(std::ostream& os, Number const& n) { return n.write(os); } MarkDownColumn::MarkDownColumn(int w, int prec, std::string tit, std::string suff, double val) noexcept : mWidth(w) , mPrecision(prec) , mTitle(std::move(tit)) , mSuffix(std::move(suff)) , mValue(val) {} std::string MarkDownColumn::title() const { std::stringstream ss; ss << '|' << std::setw(mWidth - 2) << std::right << mTitle << ' '; return ss.str(); } std::string MarkDownColumn::separator() const { std::string sep(static_cast<size_t>(mWidth), '-'); sep.front() = '|'; sep.back() = ':'; return sep; } std::string MarkDownColumn::invalid() const { std::string sep(static_cast<size_t>(mWidth), ' '); sep.front() = '|'; sep[sep.size() - 2] = '-'; return sep; } std::string MarkDownColumn::value() const { std::stringstream ss; auto width = mWidth - 2 - static_cast<int>(mSuffix.size()); ss << '|' << Number(width, mPrecision, mValue) << mSuffix << ' '; return ss.str(); } // Formats any text as markdown code, escaping backticks. MarkDownCode::MarkDownCode(std::string const& what) { mWhat.reserve(what.size() + 2); mWhat.push_back('`'); for (char const c : what) { mWhat.push_back(c); if ('`' == c) { mWhat.push_back('`'); } } mWhat.push_back('`'); } std::ostream& MarkDownCode::write(std::ostream& os) const { return os << mWhat; } std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode) { return mdCode.write(os); } } // namespace fmt } // namespace detail // provide implementation here so it's only generated once Config::Config() = default; Config::~Config() = default; Config& Config::operator=(Config const&) = default; Config& Config::operator=(Config&&) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE)) = default; Config::Config(Config const&) = default; Config::Config(Config&&) noexcept = default; // provide implementation here so it's only generated once Result::~Result() = default; Result& Result::operator=(Result const&) = default; Result& Result::operator=(Result&&) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE)) = default; Result::Result(Result const&) = default; Result::Result(Result&&) noexcept = default; namespace detail { template <typename T> inline constexpr typename std::underlying_type<T>::type u(T val) noexcept { return static_cast<typename std::underlying_type<T>::type>(val); } } // namespace detail // Result returned after a benchmark has finished. Can be used as a baseline for relative(). Result::Result(Config benchmarkConfig) : mConfig(std::move(benchmarkConfig)) , mNameToMeasurements{detail::u(Result::Measure::_size)} {} void Result::add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters const& pc) { using detail::d; using detail::u; double const dIters = d(iters); mNameToMeasurements[u(Result::Measure::iterations)].push_back(dIters); mNameToMeasurements[u(Result::Measure::elapsed)].push_back(d(totalElapsed) / dIters); if (pc.has().pageFaults) { mNameToMeasurements[u(Result::Measure::pagefaults)].push_back(d(pc.val().pageFaults) / dIters); } if (pc.has().cpuCycles) { mNameToMeasurements[u(Result::Measure::cpucycles)].push_back(d(pc.val().cpuCycles) / dIters); } if (pc.has().contextSwitches) { mNameToMeasurements[u(Result::Measure::contextswitches)].push_back(d(pc.val().contextSwitches) / dIters); } if (pc.has().instructions) { mNameToMeasurements[u(Result::Measure::instructions)].push_back(d(pc.val().instructions) / dIters); } if (pc.has().branchInstructions) { double branchInstructions = 0.0; // correcting branches: remove branch introduced by the while (...) loop for each iteration. if (pc.val().branchInstructions > iters + 1U) { branchInstructions = d(pc.val().branchInstructions - (iters + 1U)); } mNameToMeasurements[u(Result::Measure::branchinstructions)].push_back(branchInstructions / dIters); if (pc.has().branchMisses) { // correcting branch misses double branchMisses = d(pc.val().branchMisses); if (branchMisses > branchInstructions) { // can't have branch misses when there were branches... branchMisses = branchInstructions; } // assuming at least one missed branch for the loop branchMisses -= 1.0; if (branchMisses < 1.0) { branchMisses = 1.0; } mNameToMeasurements[u(Result::Measure::branchmisses)].push_back(branchMisses / dIters); } } } Config const& Result::config() const noexcept { return mConfig; } inline double calcMedian(std::vector<double>& data) { if (data.empty()) { return 0.0; } std::sort(data.begin(), data.end()); auto midIdx = data.size() / 2U; if (1U == (data.size() & 1U)) { return data[midIdx]; } return (data[midIdx - 1U] + data[midIdx]) / 2U; } double Result::median(Measure m) const { // create a copy so we can sort auto data = mNameToMeasurements[detail::u(m)]; return calcMedian(data); } double Result::average(Measure m) const { using detail::d; auto const& data = mNameToMeasurements[detail::u(m)]; if (data.empty()) { return 0.0; } // create a copy so we can sort return sum(m) / d(data.size()); } double Result::medianAbsolutePercentError(Measure m) const { // create copy auto data = mNameToMeasurements[detail::u(m)]; // calculates MdAPE which is the median of percentage error // see https://support.numxl.com/hc/en-us/articles/115001223503-MdAPE-Median-Absolute-Percentage-Error auto med = calcMedian(data); // transform the data to absolute error for (auto& x : data) { x = (x - med) / x; if (x < 0) { x = -x; } } return calcMedian(data); } double Result::sum(Measure m) const noexcept { auto const& data = mNameToMeasurements[detail::u(m)]; return std::accumulate(data.begin(), data.end(), 0.0); } double Result::sumProduct(Measure m1, Measure m2) const noexcept { auto const& data1 = mNameToMeasurements[detail::u(m1)]; auto const& data2 = mNameToMeasurements[detail::u(m2)]; if (data1.size() != data2.size()) { return 0.0; } double result = 0.0; for (size_t i = 0, s = data1.size(); i != s; ++i) { result += data1[i] * data2[i]; } return result; } bool Result::has(Measure m) const noexcept { return !mNameToMeasurements[detail::u(m)].empty(); } double Result::get(size_t idx, Measure m) const { auto const& data = mNameToMeasurements[detail::u(m)]; return data.at(idx); } bool Result::empty() const noexcept { return 0U == size(); } size_t Result::size() const noexcept { auto const& data = mNameToMeasurements[detail::u(Measure::elapsed)]; return data.size(); } double Result::minimum(Measure m) const noexcept { auto const& data = mNameToMeasurements[detail::u(m)]; if (data.empty()) { return 0.0; } // here its save to assume that at least one element is there return *std::min_element(data.begin(), data.end()); } double Result::maximum(Measure m) const noexcept { auto const& data = mNameToMeasurements[detail::u(m)]; if (data.empty()) { return 0.0; } // here its save to assume that at least one element is there return *std::max_element(data.begin(), data.end()); } std::string const& Result::context(char const* variableName) const { return mConfig.mContext.at(variableName); } std::string const& Result::context(std::string const& variableName) const { return mConfig.mContext.at(variableName); } Result::Measure Result::fromString(std::string const& str) { if (str == "elapsed") { return Measure::elapsed; } if (str == "iterations") { return Measure::iterations; } if (str == "pagefaults") { return Measure::pagefaults; } if (str == "cpucycles") { return Measure::cpucycles; } if (str == "contextswitches") { return Measure::contextswitches; } if (str == "instructions") { return Measure::instructions; } if (str == "branchinstructions") { return Measure::branchinstructions; } if (str == "branchmisses") { return Measure::branchmisses; } // not found, return _size return Measure::_size; } // Configuration of a microbenchmark. Bench::Bench() { mConfig.mOut = &std::cout; } Bench::Bench(Bench&&) noexcept = default; Bench& Bench::operator=(Bench&&) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE)) = default; Bench::Bench(Bench const&) = default; Bench& Bench::operator=(Bench const&) = default; Bench::~Bench() noexcept = default; double Bench::batch() const noexcept { return mConfig.mBatch; } double Bench::complexityN() const noexcept { return mConfig.mComplexityN; } // Set a baseline to compare it to. 100% it is exactly as fast as the baseline, >100% means it is faster than the baseline, <100% // means it is slower than the baseline. Bench& Bench::relative(bool isRelativeEnabled) noexcept { mConfig.mIsRelative = isRelativeEnabled; return *this; } bool Bench::relative() const noexcept { return mConfig.mIsRelative; } Bench& Bench::performanceCounters(bool showPerformanceCounters) noexcept { mConfig.mShowPerformanceCounters = showPerformanceCounters; return *this; } bool Bench::performanceCounters() const noexcept { return mConfig.mShowPerformanceCounters; } // Operation unit. Defaults to "op", could be e.g. "byte" for string processing. // If u differs from currently set unit, the stored results will be cleared. // Use singular (byte, not bytes). Bench& Bench::unit(char const* u) { if (u != mConfig.mUnit) { mResults.clear(); } mConfig.mUnit = u; return *this; } Bench& Bench::unit(std::string const& u) { return unit(u.c_str()); } std::string const& Bench::unit() const noexcept { return mConfig.mUnit; } Bench& Bench::timeUnit(std::chrono::duration<double> const& tu, std::string const& tuName) { mConfig.mTimeUnit = tu; mConfig.mTimeUnitName = tuName; return *this; } std::string const& Bench::timeUnitName() const noexcept { return mConfig.mTimeUnitName; } std::chrono::duration<double> const& Bench::timeUnit() const noexcept { return mConfig.mTimeUnit; } // If benchmarkTitle differs from currently set title, the stored results will be cleared. Bench& Bench::title(const char* benchmarkTitle) { if (benchmarkTitle != mConfig.mBenchmarkTitle) { mResults.clear(); } mConfig.mBenchmarkTitle = benchmarkTitle; return *this; } Bench& Bench::title(std::string const& benchmarkTitle) { if (benchmarkTitle != mConfig.mBenchmarkTitle) { mResults.clear(); } mConfig.mBenchmarkTitle = benchmarkTitle; return *this; } std::string const& Bench::title() const noexcept { return mConfig.mBenchmarkTitle; } Bench& Bench::name(const char* benchmarkName) { mConfig.mBenchmarkName = benchmarkName; return *this; } Bench& Bench::name(std::string const& benchmarkName) { mConfig.mBenchmarkName = benchmarkName; return *this; } std::string const& Bench::name() const noexcept { return mConfig.mBenchmarkName; } Bench& Bench::context(char const* variableName, char const* variableValue) { mConfig.mContext[variableName] = variableValue; return *this; } Bench& Bench::context(std::string const& variableName, std::string const& variableValue) { mConfig.mContext[variableName] = variableValue; return *this; } Bench& Bench::clearContext() { mConfig.mContext.clear(); return *this; } // Number of epochs to evaluate. The reported result will be the median of evaluation of each epoch. Bench& Bench::epochs(size_t numEpochs) noexcept { mConfig.mNumEpochs = numEpochs; return *this; } size_t Bench::epochs() const noexcept { return mConfig.mNumEpochs; } // Desired evaluation time is a multiple of clock resolution. Default is to be 1000 times above this measurement precision. Bench& Bench::clockResolutionMultiple(size_t multiple) noexcept { mConfig.mClockResolutionMultiple = multiple; return *this; } size_t Bench::clockResolutionMultiple() const noexcept { return mConfig.mClockResolutionMultiple; } // Sets the maximum time each epoch should take. Default is 100ms. Bench& Bench::maxEpochTime(std::chrono::nanoseconds t) noexcept { mConfig.mMaxEpochTime = t; return *this; } std::chrono::nanoseconds Bench::maxEpochTime() const noexcept { return mConfig.mMaxEpochTime; } // Sets the maximum time each epoch should take. Default is 100ms. Bench& Bench::minEpochTime(std::chrono::nanoseconds t) noexcept { mConfig.mMinEpochTime = t; return *this; } std::chrono::nanoseconds Bench::minEpochTime() const noexcept { return mConfig.mMinEpochTime; } Bench& Bench::minEpochIterations(uint64_t numIters) noexcept { mConfig.mMinEpochIterations = (numIters == 0) ? 1 : numIters; return *this; } uint64_t Bench::minEpochIterations() const noexcept { return mConfig.mMinEpochIterations; } Bench& Bench::epochIterations(uint64_t numIters) noexcept { mConfig.mEpochIterations = numIters; return *this; } uint64_t Bench::epochIterations() const noexcept { return mConfig.mEpochIterations; } Bench& Bench::warmup(uint64_t numWarmupIters) noexcept { mConfig.mWarmup = numWarmupIters; return *this; } uint64_t Bench::warmup() const noexcept { return mConfig.mWarmup; } Bench& Bench::config(Config const& benchmarkConfig) { mConfig = benchmarkConfig; return *this; } Config const& Bench::config() const noexcept { return mConfig; } Bench& Bench::output(std::ostream* outstream) noexcept { mConfig.mOut = outstream; return *this; } ANKERL_NANOBENCH(NODISCARD) std::ostream* Bench::output() const noexcept { return mConfig.mOut; } std::vector<Result> const& Bench::results() const noexcept { return mResults; } Bench& Bench::render(char const* templateContent, std::ostream& os) { ::ankerl::nanobench::render(templateContent, *this, os); return *this; } Bench& Bench::render(std::string const& templateContent, std::ostream& os) { ::ankerl::nanobench::render(templateContent, *this, os); return *this; } std::vector<BigO> Bench::complexityBigO() const { std::vector<BigO> bigOs; auto rangeMeasure = BigO::collectRangeMeasure(mResults); bigOs.emplace_back("O(1)", rangeMeasure, [](double) { return 1.0; }); bigOs.emplace_back("O(n)", rangeMeasure, [](double n) { return n; }); bigOs.emplace_back("O(log n)", rangeMeasure, [](double n) { return std::log2(n); }); bigOs.emplace_back("O(n log n)", rangeMeasure, [](double n) { return n * std::log2(n); }); bigOs.emplace_back("O(n^2)", rangeMeasure, [](double n) { return n * n; }); bigOs.emplace_back("O(n^3)", rangeMeasure, [](double n) { return n * n * n; }); std::sort(bigOs.begin(), bigOs.end()); return bigOs; } Rng::Rng() : mX(0) , mY(0) { std::random_device rd; std::uniform_int_distribution<uint64_t> dist; do { mX = dist(rd); mY = dist(rd); } while (mX == 0 && mY == 0); } ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined") uint64_t splitMix64(uint64_t& state) noexcept { uint64_t z = (state += UINT64_C(0x9e3779b97f4a7c15)); z = (z ^ (z >> 30U)) * UINT64_C(0xbf58476d1ce4e5b9); z = (z ^ (z >> 27U)) * UINT64_C(0x94d049bb133111eb); return z ^ (z >> 31U); } // Seeded as described in romu paper (update april 2020) Rng::Rng(uint64_t seed) noexcept : mX(splitMix64(seed)) , mY(splitMix64(seed)) { for (size_t i = 0; i < 10; ++i) { operator()(); } } // only internally used to copy the RNG. Rng::Rng(uint64_t x, uint64_t y) noexcept : mX(x) , mY(y) {} Rng Rng::copy() const noexcept { return Rng{mX, mY}; } Rng::Rng(std::vector<uint64_t> const& data) : mX(0) , mY(0) { if (data.size() != 2) { throw std::runtime_error("ankerl::nanobench::Rng::Rng: needed exactly 2 entries in data, but got " + detail::fmt::to_s(data.size())); } mX = data[0]; mY = data[1]; } std::vector<uint64_t> Rng::state() const { std::vector<uint64_t> data(2); data[0] = mX; data[1] = mY; return data; } BigO::RangeMeasure BigO::collectRangeMeasure(std::vector<Result> const& results) { BigO::RangeMeasure rangeMeasure; for (auto const& result : results) { if (result.config().mComplexityN > 0.0) { rangeMeasure.emplace_back(result.config().mComplexityN, result.median(Result::Measure::elapsed)); } } return rangeMeasure; } BigO::BigO(std::string bigOName, RangeMeasure const& rangeMeasure) : mName(std::move(bigOName)) { // estimate the constant factor double sumRangeMeasure = 0.0; double sumRangeRange = 0.0; for (const auto& rm : rangeMeasure) { sumRangeMeasure += rm.first * rm.second; sumRangeRange += rm.first * rm.first; } mConstant = sumRangeMeasure / sumRangeRange; // calculate root mean square double err = 0.0; double sumMeasure = 0.0; for (const auto& rm : rangeMeasure) { auto diff = mConstant * rm.first - rm.second; err += diff * diff; sumMeasure += rm.second; } auto n = detail::d(rangeMeasure.size()); auto mean = sumMeasure / n; mNormalizedRootMeanSquare = std::sqrt(err / n) / mean; } BigO::BigO(const char* bigOName, RangeMeasure const& rangeMeasure) : BigO(std::string(bigOName), rangeMeasure) {} std::string const& BigO::name() const noexcept { return mName; } double BigO::constant() const noexcept { return mConstant; } double BigO::normalizedRootMeanSquare() const noexcept { return mNormalizedRootMeanSquare; } bool BigO::operator<(BigO const& other) const noexcept { return std::tie(mNormalizedRootMeanSquare, mName) < std::tie(other.mNormalizedRootMeanSquare, other.mName); } std::ostream& operator<<(std::ostream& os, BigO const& bigO) { return os << bigO.constant() << " * " << bigO.name() << ", rms=" << bigO.normalizedRootMeanSquare(); } std::ostream& operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO> const& bigOs) { detail::fmt::StreamStateRestorer const restorer(os); os << std::endl << "| coefficient | err% | complexity" << std::endl << "|--------------:|-------:|------------" << std::endl; for (auto const& bigO : bigOs) { os << "|" << std::setw(14) << std::setprecision(7) << std::scientific << bigO.constant() << " "; os << "|" << detail::fmt::Number(6, 1, bigO.normalizedRootMeanSquare() * 100.0) << "% "; os << "| " << bigO.name(); os << std::endl; } return os; } } // namespace nanobench } // namespace ankerl #endif // ANKERL_NANOBENCH_IMPLEMENT #endif // ANKERL_NANOBENCH_H_INCLUDED
0
bitcoin/src
bitcoin/src/bench/mempool_stress.cpp
// Copyright (c) 2011-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <kernel/mempool_entry.h> #include <policy/policy.h> #include <random.h> #include <test/util/setup_common.h> #include <txmempool.h> #include <util/chaintype.h> #include <validation.h> #include <vector> static void AddTx(const CTransactionRef& tx, CTxMemPool& pool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) { int64_t nTime = 0; unsigned int nHeight = 1; uint64_t sequence = 0; bool spendsCoinbase = false; unsigned int sigOpCost = 4; LockPoints lp; pool.addUnchecked(CTxMemPoolEntry(tx, 1000, nTime, nHeight, sequence, spendsCoinbase, sigOpCost, lp)); } struct Available { CTransactionRef ref; size_t vin_left{0}; size_t tx_count; Available(CTransactionRef& ref, size_t tx_count) : ref(ref), tx_count(tx_count){} }; static std::vector<CTransactionRef> CreateOrderedCoins(FastRandomContext& det_rand, int childTxs, int min_ancestors) { std::vector<Available> available_coins; std::vector<CTransactionRef> ordered_coins; // Create some base transactions size_t tx_counter = 1; for (auto x = 0; x < 100; ++x) { CMutableTransaction tx = CMutableTransaction(); tx.vin.resize(1); tx.vin[0].scriptSig = CScript() << CScriptNum(tx_counter); tx.vin[0].scriptWitness.stack.push_back(CScriptNum(x).getvch()); tx.vout.resize(det_rand.randrange(10)+2); for (auto& out : tx.vout) { out.scriptPubKey = CScript() << CScriptNum(tx_counter) << OP_EQUAL; out.nValue = 10 * COIN; } ordered_coins.emplace_back(MakeTransactionRef(tx)); available_coins.emplace_back(ordered_coins.back(), tx_counter++); } for (auto x = 0; x < childTxs && !available_coins.empty(); ++x) { CMutableTransaction tx = CMutableTransaction(); size_t n_ancestors = det_rand.randrange(10)+1; for (size_t ancestor = 0; ancestor < n_ancestors && !available_coins.empty(); ++ancestor){ size_t idx = det_rand.randrange(available_coins.size()); Available coin = available_coins[idx]; Txid hash = coin.ref->GetHash(); // biased towards taking min_ancestors parents, but maybe more size_t n_to_take = det_rand.randrange(2) == 0 ? min_ancestors : min_ancestors + det_rand.randrange(coin.ref->vout.size() - coin.vin_left); for (size_t i = 0; i < n_to_take; ++i) { tx.vin.emplace_back(); tx.vin.back().prevout = COutPoint(hash, coin.vin_left++); tx.vin.back().scriptSig = CScript() << coin.tx_count; tx.vin.back().scriptWitness.stack.push_back(CScriptNum(coin.tx_count).getvch()); } if (coin.vin_left == coin.ref->vin.size()) { coin = available_coins.back(); available_coins.pop_back(); } tx.vout.resize(det_rand.randrange(10)+2); for (auto& out : tx.vout) { out.scriptPubKey = CScript() << CScriptNum(tx_counter) << OP_EQUAL; out.nValue = 10 * COIN; } } ordered_coins.emplace_back(MakeTransactionRef(tx)); available_coins.emplace_back(ordered_coins.back(), tx_counter++); } return ordered_coins; } static void ComplexMemPool(benchmark::Bench& bench) { FastRandomContext det_rand{true}; int childTxs = 800; if (bench.complexityN() > 1) { childTxs = static_cast<int>(bench.complexityN()); } std::vector<CTransactionRef> ordered_coins = CreateOrderedCoins(det_rand, childTxs, /*min_ancestors=*/1); const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(ChainType::MAIN); CTxMemPool& pool = *testing_setup.get()->m_node.mempool; LOCK2(cs_main, pool.cs); bench.run([&]() NO_THREAD_SAFETY_ANALYSIS { for (auto& tx : ordered_coins) { AddTx(tx, pool); } pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4); pool.TrimToSize(GetVirtualTransactionSize(*ordered_coins.front())); }); } static void MempoolCheck(benchmark::Bench& bench) { FastRandomContext det_rand{true}; auto testing_setup = MakeNoLogFileContext<TestChain100Setup>(ChainType::REGTEST, {"-checkmempool=1"}); CTxMemPool& pool = *testing_setup.get()->m_node.mempool; LOCK2(cs_main, pool.cs); testing_setup->PopulateMempool(det_rand, 400, true); const CCoinsViewCache& coins_tip = testing_setup.get()->m_node.chainman->ActiveChainstate().CoinsTip(); bench.run([&]() NO_THREAD_SAFETY_ANALYSIS { // Bump up the spendheight so we don't hit premature coinbase spend errors. pool.check(coins_tip, /*spendheight=*/300); }); } BENCHMARK(ComplexMemPool, benchmark::PriorityLevel::HIGH); BENCHMARK(MempoolCheck, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/prevector.cpp
// Copyright (c) 2015-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <prevector.h> #include <serialize.h> #include <streams.h> #include <type_traits> #include <bench/bench.h> struct nontrivial_t { int x{-1}; nontrivial_t() = default; SERIALIZE_METHODS(nontrivial_t, obj) { READWRITE(obj.x); } }; static_assert(!std::is_trivially_default_constructible<nontrivial_t>::value, "expected nontrivial_t to not be trivially constructible"); typedef unsigned char trivial_t; static_assert(std::is_trivially_default_constructible<trivial_t>::value, "expected trivial_t to be trivially constructible"); template <typename T> static void PrevectorDestructor(benchmark::Bench& bench) { bench.batch(2).run([&] { prevector<28, T> t0; prevector<28, T> t1; t0.resize(28); t1.resize(29); }); } template <typename T> static void PrevectorClear(benchmark::Bench& bench) { prevector<28, T> t0; prevector<28, T> t1; bench.batch(2).run([&] { t0.resize(28); t0.clear(); t1.resize(29); t1.clear(); }); } template <typename T> static void PrevectorResize(benchmark::Bench& bench) { prevector<28, T> t0; prevector<28, T> t1; bench.batch(4).run([&] { t0.resize(28); t0.resize(0); t1.resize(29); t1.resize(0); }); } template <typename T> static void PrevectorDeserialize(benchmark::Bench& bench) { DataStream s0{}; prevector<28, T> t0; t0.resize(28); for (auto x = 0; x < 900; ++x) { s0 << t0; } t0.resize(100); for (auto x = 0; x < 101; ++x) { s0 << t0; } bench.batch(1000).run([&] { prevector<28, T> t1; for (auto x = 0; x < 1000; ++x) { s0 >> t1; } s0.Rewind(); }); } template <typename T> static void PrevectorFillVectorDirect(benchmark::Bench& bench) { bench.run([&] { std::vector<prevector<28, T>> vec; for (size_t i = 0; i < 260; ++i) { vec.emplace_back(); } }); } template <typename T> static void PrevectorFillVectorIndirect(benchmark::Bench& bench) { bench.run([&] { std::vector<prevector<28, T>> vec; for (size_t i = 0; i < 260; ++i) { // force allocation vec.emplace_back(29, T{}); } }); } #define PREVECTOR_TEST(name) \ static void Prevector##name##Nontrivial(benchmark::Bench& bench) \ { \ Prevector##name<nontrivial_t>(bench); \ } \ BENCHMARK(Prevector##name##Nontrivial, benchmark::PriorityLevel::HIGH); \ static void Prevector##name##Trivial(benchmark::Bench& bench) \ { \ Prevector##name<trivial_t>(bench); \ } \ BENCHMARK(Prevector##name##Trivial, benchmark::PriorityLevel::HIGH); PREVECTOR_TEST(Clear) PREVECTOR_TEST(Destructor) PREVECTOR_TEST(Resize) PREVECTOR_TEST(Deserialize) PREVECTOR_TEST(FillVectorDirect) PREVECTOR_TEST(FillVectorIndirect)
0
bitcoin/src
bitcoin/src/bench/checkblock.cpp
// Copyright (c) 2016-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <bench/data.h> #include <chainparams.h> #include <common/args.h> #include <consensus/validation.h> #include <streams.h> #include <util/chaintype.h> #include <validation.h> // These are the two major time-sinks which happen after we have fully received // a block off the wire, but before we can relay the block on to peers using // compact block relay. static void DeserializeBlockTest(benchmark::Bench& bench) { DataStream stream(benchmark::data::block413567); std::byte a{0}; stream.write({&a, 1}); // Prevent compaction bench.unit("block").run([&] { CBlock block; stream >> TX_WITH_WITNESS(block); bool rewound = stream.Rewind(benchmark::data::block413567.size()); assert(rewound); }); } static void DeserializeAndCheckBlockTest(benchmark::Bench& bench) { DataStream stream(benchmark::data::block413567); std::byte a{0}; stream.write({&a, 1}); // Prevent compaction ArgsManager bench_args; const auto chainParams = CreateChainParams(bench_args, ChainType::MAIN); bench.unit("block").run([&] { CBlock block; // Note that CBlock caches its checked state, so we need to recreate it here stream >> TX_WITH_WITNESS(block); bool rewound = stream.Rewind(benchmark::data::block413567.size()); assert(rewound); BlockValidationState validationState; bool checked = CheckBlock(block, validationState, chainParams->GetConsensus()); assert(checked); }); } BENCHMARK(DeserializeBlockTest, benchmark::PriorityLevel::HIGH); BENCHMARK(DeserializeAndCheckBlockTest, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/wallet_loading.cpp
// Copyright (c) 2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <interfaces/chain.h> #include <node/context.h> #include <test/util/mining.h> #include <test/util/setup_common.h> #include <wallet/test/util.h> #include <util/translation.h> #include <validationinterface.h> #include <wallet/context.h> #include <wallet/receive.h> #include <wallet/wallet.h> #include <optional> namespace wallet{ static void AddTx(CWallet& wallet) { CMutableTransaction mtx; mtx.vout.emplace_back(COIN, GetScriptForDestination(*Assert(wallet.GetNewDestination(OutputType::BECH32, "")))); mtx.vin.emplace_back(); wallet.AddToWallet(MakeTransactionRef(mtx), TxStateInactive{}); } static void WalletLoading(benchmark::Bench& bench, bool legacy_wallet) { const auto test_setup = MakeNoLogFileContext<TestingSetup>(); WalletContext context; context.args = &test_setup->m_args; context.chain = test_setup->m_node.chain.get(); // Setup the wallet // Loading the wallet will also create it uint64_t create_flags = 0; if (!legacy_wallet) { create_flags = WALLET_FLAG_DESCRIPTORS; } auto database = CreateMockableWalletDatabase(); auto wallet = TestLoadWallet(std::move(database), context, create_flags); // Generate a bunch of transactions and addresses to put into the wallet for (int i = 0; i < 1000; ++i) { AddTx(*wallet); } database = DuplicateMockDatabase(wallet->GetDatabase()); // reload the wallet for the actual benchmark TestUnloadWallet(std::move(wallet)); bench.epochs(5).run([&] { wallet = TestLoadWallet(std::move(database), context, create_flags); // Cleanup database = DuplicateMockDatabase(wallet->GetDatabase()); TestUnloadWallet(std::move(wallet)); }); } #ifdef USE_BDB static void WalletLoadingLegacy(benchmark::Bench& bench) { WalletLoading(bench, /*legacy_wallet=*/true); } BENCHMARK(WalletLoadingLegacy, benchmark::PriorityLevel::HIGH); #endif #ifdef USE_SQLITE static void WalletLoadingDescriptors(benchmark::Bench& bench) { WalletLoading(bench, /*legacy_wallet=*/false); } BENCHMARK(WalletLoadingDescriptors, benchmark::PriorityLevel::HIGH); #endif } // namespace wallet
0
bitcoin/src
bitcoin/src/bench/rpc_mempool.cpp
// Copyright (c) 2011-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <kernel/cs_main.h> #include <kernel/mempool_entry.h> #include <rpc/mempool.h> #include <test/util/setup_common.h> #include <txmempool.h> #include <util/chaintype.h> #include <univalue.h> static void AddTx(const CTransactionRef& tx, const CAmount& fee, CTxMemPool& pool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) { LockPoints lp; pool.addUnchecked(CTxMemPoolEntry(tx, fee, /*time=*/0, /*entry_height=*/1, /*entry_sequence=*/0, /*spends_coinbase=*/false, /*sigops_cost=*/4, lp)); } static void RpcMempool(benchmark::Bench& bench) { const auto testing_setup = MakeNoLogFileContext<const ChainTestingSetup>(ChainType::MAIN); CTxMemPool& pool = *Assert(testing_setup->m_node.mempool); LOCK2(cs_main, pool.cs); for (int i = 0; i < 1000; ++i) { CMutableTransaction tx = CMutableTransaction(); tx.vin.resize(1); tx.vin[0].scriptSig = CScript() << OP_1; tx.vin[0].scriptWitness.stack.push_back({1}); tx.vout.resize(1); tx.vout[0].scriptPubKey = CScript() << OP_1 << OP_EQUAL; tx.vout[0].nValue = i; const CTransactionRef tx_r{MakeTransactionRef(tx)}; AddTx(tx_r, /*fee=*/i, pool); } bench.run([&] { (void)MempoolToJSON(pool, /*verbose=*/true); }); } BENCHMARK(RpcMempool, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/disconnected_transactions.cpp
// Copyright (c) 2023 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <kernel/disconnected_transactions.h> #include <primitives/block.h> #include <test/util/random.h> #include <test/util/setup_common.h> constexpr size_t BLOCK_VTX_COUNT{4000}; constexpr size_t BLOCK_VTX_COUNT_10PERCENT{400}; using BlockTxns = decltype(CBlock::vtx); /** Reorg where 1 block is disconnected and 2 blocks are connected. */ struct ReorgTxns { /** Disconnected block. */ BlockTxns disconnected_txns; /** First connected block. */ BlockTxns connected_txns_1; /** Second connected block, new chain tip. Has no overlap with disconnected_txns. */ BlockTxns connected_txns_2; /** Transactions shared between disconnected_txns and connected_txns_1. */ size_t num_shared; }; static BlockTxns CreateRandomTransactions(size_t num_txns) { // Ensure every transaction has a different txid by having each one spend the previous one. static Txid prevout_hash{}; BlockTxns txns; txns.reserve(num_txns); // Simplest spk for every tx CScript spk = CScript() << OP_TRUE; for (uint32_t i = 0; i < num_txns; ++i) { CMutableTransaction tx; tx.vin.emplace_back(COutPoint{prevout_hash, 0}); tx.vout.emplace_back(CENT, spk); auto ptx{MakeTransactionRef(tx)}; txns.emplace_back(ptx); prevout_hash = ptx->GetHash(); } return txns; } /** Creates blocks for a Reorg, each with BLOCK_VTX_COUNT transactions. Between the disconnected * block and the first connected block, there will be num_not_shared transactions that are * different, and all other transactions the exact same. The second connected block has all unique * transactions. This is to simulate a reorg in which all but num_not_shared transactions are * confirmed in the new chain. */ static ReorgTxns CreateBlocks(size_t num_not_shared) { auto num_shared{BLOCK_VTX_COUNT - num_not_shared}; const auto shared_txns{CreateRandomTransactions(/*num_txns=*/num_shared)}; // Create different sets of transactions... auto disconnected_block_txns{CreateRandomTransactions(/*num_txns=*/num_not_shared)}; std::copy(shared_txns.begin(), shared_txns.end(), std::back_inserter(disconnected_block_txns)); auto connected_block_txns{CreateRandomTransactions(/*num_txns=*/num_not_shared)}; std::copy(shared_txns.begin(), shared_txns.end(), std::back_inserter(connected_block_txns)); assert(disconnected_block_txns.size() == BLOCK_VTX_COUNT); assert(connected_block_txns.size() == BLOCK_VTX_COUNT); return ReorgTxns{/*disconnected_txns=*/disconnected_block_txns, /*connected_txns_1=*/connected_block_txns, /*connected_txns_2=*/CreateRandomTransactions(BLOCK_VTX_COUNT), /*num_shared=*/num_shared}; } static void Reorg(const ReorgTxns& reorg) { DisconnectedBlockTransactions disconnectpool{MAX_DISCONNECTED_TX_POOL_BYTES}; // Disconnect block const auto evicted = disconnectpool.AddTransactionsFromBlock(reorg.disconnected_txns); assert(evicted.empty()); // Connect first block disconnectpool.removeForBlock(reorg.connected_txns_1); // Connect new tip disconnectpool.removeForBlock(reorg.connected_txns_2); // Sanity Check assert(disconnectpool.size() == BLOCK_VTX_COUNT - reorg.num_shared); disconnectpool.clear(); } /** Add transactions from DisconnectedBlockTransactions, remove all but one (the disconnected * block's coinbase transaction) of them, and then pop from the front until empty. This is a reorg * in which all of the non-coinbase transactions in the disconnected chain also exist in the new * chain. */ static void AddAndRemoveDisconnectedBlockTransactionsAll(benchmark::Bench& bench) { const auto chains{CreateBlocks(/*num_not_shared=*/1)}; assert(chains.num_shared == BLOCK_VTX_COUNT - 1); bench.minEpochIterations(10).run([&]() { Reorg(chains); }); } /** Add transactions from DisconnectedBlockTransactions, remove 90% of them, and then pop from the front until empty. */ static void AddAndRemoveDisconnectedBlockTransactions90(benchmark::Bench& bench) { const auto chains{CreateBlocks(/*num_not_shared=*/BLOCK_VTX_COUNT_10PERCENT)}; assert(chains.num_shared == BLOCK_VTX_COUNT - BLOCK_VTX_COUNT_10PERCENT); bench.minEpochIterations(10).run([&]() { Reorg(chains); }); } /** Add transactions from DisconnectedBlockTransactions, remove 10% of them, and then pop from the front until empty. */ static void AddAndRemoveDisconnectedBlockTransactions10(benchmark::Bench& bench) { const auto chains{CreateBlocks(/*num_not_shared=*/BLOCK_VTX_COUNT - BLOCK_VTX_COUNT_10PERCENT)}; assert(chains.num_shared == BLOCK_VTX_COUNT_10PERCENT); bench.minEpochIterations(10).run([&]() { Reorg(chains); }); } BENCHMARK(AddAndRemoveDisconnectedBlockTransactionsAll, benchmark::PriorityLevel::HIGH); BENCHMARK(AddAndRemoveDisconnectedBlockTransactions90, benchmark::PriorityLevel::HIGH); BENCHMARK(AddAndRemoveDisconnectedBlockTransactions10, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/merkle_root.cpp
// Copyright (c) 2016-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <consensus/merkle.h> #include <random.h> #include <uint256.h> static void MerkleRoot(benchmark::Bench& bench) { FastRandomContext rng(true); std::vector<uint256> leaves; leaves.resize(9001); for (auto& item : leaves) { item = rng.rand256(); } bench.batch(leaves.size()).unit("leaf").run([&] { bool mutation = false; uint256 hash = ComputeMerkleRoot(std::vector<uint256>(leaves), &mutation); leaves[mutation] = hash; }); } BENCHMARK(MerkleRoot, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/wallet_create_tx.cpp
// Copyright (c) 2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or https://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <chainparams.h> #include <wallet/coincontrol.h> #include <consensus/merkle.h> #include <kernel/chain.h> #include <node/context.h> #include <test/util/setup_common.h> #include <validation.h> #include <wallet/spend.h> #include <wallet/test/util.h> #include <wallet/wallet.h> using wallet::CWallet; using wallet::CreateMockableWalletDatabase; using wallet::WALLET_FLAG_DESCRIPTORS; struct TipBlock { uint256 prev_block_hash; int64_t prev_block_time; int tip_height; }; TipBlock getTip(const CChainParams& params, const node::NodeContext& context) { auto tip = WITH_LOCK(::cs_main, return context.chainman->ActiveTip()); return (tip) ? TipBlock{tip->GetBlockHash(), tip->GetBlockTime(), tip->nHeight} : TipBlock{params.GenesisBlock().GetHash(), params.GenesisBlock().GetBlockTime(), 0}; } void generateFakeBlock(const CChainParams& params, const node::NodeContext& context, CWallet& wallet, const CScript& coinbase_out_script) { TipBlock tip{getTip(params, context)}; // Create block CBlock block; CMutableTransaction coinbase_tx; coinbase_tx.vin.resize(1); coinbase_tx.vin[0].prevout.SetNull(); coinbase_tx.vout.resize(2); coinbase_tx.vout[0].scriptPubKey = coinbase_out_script; coinbase_tx.vout[0].nValue = 49 * COIN; coinbase_tx.vin[0].scriptSig = CScript() << ++tip.tip_height << OP_0; coinbase_tx.vout[1].scriptPubKey = coinbase_out_script; // extra output coinbase_tx.vout[1].nValue = 1 * COIN; block.vtx = {MakeTransactionRef(std::move(coinbase_tx))}; block.nVersion = VERSIONBITS_LAST_OLD_BLOCK_VERSION; block.hashPrevBlock = tip.prev_block_hash; block.hashMerkleRoot = BlockMerkleRoot(block); block.nTime = ++tip.prev_block_time; block.nBits = params.GenesisBlock().nBits; block.nNonce = 0; { LOCK(::cs_main); // Add it to the index CBlockIndex* pindex{context.chainman->m_blockman.AddToBlockIndex(block, context.chainman->m_best_header)}; // add it to the chain context.chainman->ActiveChain().SetTip(*pindex); } // notify wallet const auto& pindex = WITH_LOCK(::cs_main, return context.chainman->ActiveChain().Tip()); wallet.blockConnected(ChainstateRole::NORMAL, kernel::MakeBlockInfo(pindex, &block)); } struct PreSelectInputs { // How many coins from the wallet the process should select int num_of_internal_inputs; // future: this could have external inputs as well. }; static void WalletCreateTx(benchmark::Bench& bench, const OutputType output_type, bool allow_other_inputs, std::optional<PreSelectInputs> preset_inputs) { const auto test_setup = MakeNoLogFileContext<const TestingSetup>(); // Set clock to genesis block, so the descriptors/keys creation time don't interfere with the blocks scanning process. SetMockTime(test_setup->m_node.chainman->GetParams().GenesisBlock().nTime); CWallet wallet{test_setup->m_node.chain.get(), "", CreateMockableWalletDatabase()}; { LOCK(wallet.cs_wallet); wallet.SetWalletFlag(WALLET_FLAG_DESCRIPTORS); wallet.SetupDescriptorScriptPubKeyMans(); } // Generate destinations const auto dest{getNewDestination(wallet, output_type)}; // Generate chain; each coinbase will have two outputs to fill-up the wallet const auto& params = Params(); const CScript coinbase_out{GetScriptForDestination(dest)}; unsigned int chain_size = 5000; // 5k blocks means 10k UTXO for the wallet (minus 200 due COINBASE_MATURITY) for (unsigned int i = 0; i < chain_size; ++i) { generateFakeBlock(params, test_setup->m_node, wallet, coinbase_out); } // Check available balance auto bal = WITH_LOCK(wallet.cs_wallet, return wallet::AvailableCoins(wallet).GetTotalAmount()); // Cache assert(bal == 50 * COIN * (chain_size - COINBASE_MATURITY)); wallet::CCoinControl coin_control; coin_control.m_allow_other_inputs = allow_other_inputs; CAmount target = 0; if (preset_inputs) { // Select inputs, each has 49 BTC wallet::CoinFilterParams filter_coins; filter_coins.max_count = preset_inputs->num_of_internal_inputs; const auto& res = WITH_LOCK(wallet.cs_wallet, return wallet::AvailableCoins(wallet, /*coinControl=*/nullptr, /*feerate=*/std::nullopt, filter_coins)); for (int i=0; i < preset_inputs->num_of_internal_inputs; i++) { const auto& coin{res.coins.at(output_type)[i]}; target += coin.txout.nValue; coin_control.Select(coin.outpoint); } } // If automatic coin selection is enabled, add the value of another UTXO to the target if (coin_control.m_allow_other_inputs) target += 50 * COIN; std::vector<wallet::CRecipient> recipients = {{dest, target, true}}; bench.epochIterations(5).run([&] { LOCK(wallet.cs_wallet); const auto& tx_res = CreateTransaction(wallet, recipients, /*change_pos=*/std::nullopt, coin_control); assert(tx_res); }); } static void AvailableCoins(benchmark::Bench& bench, const std::vector<OutputType>& output_type) { const auto test_setup = MakeNoLogFileContext<const TestingSetup>(); // Set clock to genesis block, so the descriptors/keys creation time don't interfere with the blocks scanning process. SetMockTime(test_setup->m_node.chainman->GetParams().GenesisBlock().nTime); CWallet wallet{test_setup->m_node.chain.get(), "", CreateMockableWalletDatabase()}; { LOCK(wallet.cs_wallet); wallet.SetWalletFlag(WALLET_FLAG_DESCRIPTORS); wallet.SetupDescriptorScriptPubKeyMans(); } // Generate destinations std::vector<CScript> dest_wallet; dest_wallet.reserve(output_type.size()); for (auto type : output_type) { dest_wallet.emplace_back(GetScriptForDestination(getNewDestination(wallet, type))); } // Generate chain; each coinbase will have two outputs to fill-up the wallet const auto& params = Params(); unsigned int chain_size = 1000; for (unsigned int i = 0; i < chain_size / dest_wallet.size(); ++i) { for (const auto& dest : dest_wallet) { generateFakeBlock(params, test_setup->m_node, wallet, dest); } } // Check available balance auto bal = WITH_LOCK(wallet.cs_wallet, return wallet::AvailableCoins(wallet).GetTotalAmount()); // Cache assert(bal == 50 * COIN * (chain_size - COINBASE_MATURITY)); bench.epochIterations(2).run([&] { LOCK(wallet.cs_wallet); const auto& res = wallet::AvailableCoins(wallet); assert(res.All().size() == (chain_size - COINBASE_MATURITY) * 2); }); } static void WalletCreateTxUseOnlyPresetInputs(benchmark::Bench& bench) { WalletCreateTx(bench, OutputType::BECH32, /*allow_other_inputs=*/false, {{/*num_of_internal_inputs=*/4}}); } static void WalletCreateTxUsePresetInputsAndCoinSelection(benchmark::Bench& bench) { WalletCreateTx(bench, OutputType::BECH32, /*allow_other_inputs=*/true, {{/*num_of_internal_inputs=*/4}}); } static void WalletAvailableCoins(benchmark::Bench& bench) { AvailableCoins(bench, {OutputType::BECH32M}); } BENCHMARK(WalletCreateTxUseOnlyPresetInputs, benchmark::PriorityLevel::LOW) BENCHMARK(WalletCreateTxUsePresetInputsAndCoinSelection, benchmark::PriorityLevel::LOW) BENCHMARK(WalletAvailableCoins, benchmark::PriorityLevel::LOW);
0
bitcoin/src
bitcoin/src/bench/logging.cpp
// Copyright (c) 2020-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <logging.h> #include <test/util/setup_common.h> #include <util/chaintype.h> // All but 2 of the benchmarks should have roughly similar performance: // // LogPrintWithoutCategory should be ~3 orders of magnitude faster, as nothing is logged. // // LogWithoutWriteToFile should be ~2 orders of magnitude faster, as it avoids disk writes. static void Logging(benchmark::Bench& bench, const std::vector<const char*>& extra_args, const std::function<void()>& log) { // Reset any enabled logging categories from a previous benchmark run. LogInstance().DisableCategory(BCLog::LogFlags::ALL); TestingSetup test_setup{ ChainType::REGTEST, extra_args, }; bench.run([&] { log(); }); } static void LogPrintLevelWithThreadNames(benchmark::Bench& bench) { Logging(bench, {"-logthreadnames=1", "-debug=net"}, [] { LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", "test"); }); } static void LogPrintLevelWithoutThreadNames(benchmark::Bench& bench) { Logging(bench, {"-logthreadnames=0", "-debug=net"}, [] { LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", "test"); }); } static void LogPrintWithCategory(benchmark::Bench& bench) { Logging(bench, {"-logthreadnames=0", "-debug=net"}, [] { LogPrint(BCLog::NET, "%s\n", "test"); }); } static void LogPrintWithoutCategory(benchmark::Bench& bench) { Logging(bench, {"-logthreadnames=0", "-debug=0"}, [] { LogPrint(BCLog::NET, "%s\n", "test"); }); } static void LogPrintfCategoryWithThreadNames(benchmark::Bench& bench) { Logging(bench, {"-logthreadnames=1", "-debug=net"}, [] { LogPrintfCategory(BCLog::NET, "%s\n", "test"); }); } static void LogPrintfCategoryWithoutThreadNames(benchmark::Bench& bench) { Logging(bench, {"-logthreadnames=0", "-debug=net"}, [] { LogPrintfCategory(BCLog::NET, "%s\n", "test"); }); } static void LogPrintfWithThreadNames(benchmark::Bench& bench) { Logging(bench, {"-logthreadnames=1"}, [] { LogPrintf("%s\n", "test"); }); } static void LogPrintfWithoutThreadNames(benchmark::Bench& bench) { Logging(bench, {"-logthreadnames=0"}, [] { LogPrintf("%s\n", "test"); }); } static void LogWithoutWriteToFile(benchmark::Bench& bench) { // Disable writing the log to a file, as used for unit tests and fuzzing in `MakeNoLogFileContext`. Logging(bench, {"-nodebuglogfile", "-debug=1"}, [] { LogPrintf("%s\n", "test"); LogPrint(BCLog::NET, "%s\n", "test"); }); } BENCHMARK(LogPrintLevelWithThreadNames, benchmark::PriorityLevel::HIGH); BENCHMARK(LogPrintLevelWithoutThreadNames, benchmark::PriorityLevel::HIGH); BENCHMARK(LogPrintWithCategory, benchmark::PriorityLevel::HIGH); BENCHMARK(LogPrintWithoutCategory, benchmark::PriorityLevel::HIGH); BENCHMARK(LogPrintfCategoryWithThreadNames, benchmark::PriorityLevel::HIGH); BENCHMARK(LogPrintfCategoryWithoutThreadNames, benchmark::PriorityLevel::HIGH); BENCHMARK(LogPrintfWithThreadNames, benchmark::PriorityLevel::HIGH); BENCHMARK(LogPrintfWithoutThreadNames, benchmark::PriorityLevel::HIGH); BENCHMARK(LogWithoutWriteToFile, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/duplicate_inputs.cpp
// Copyright (c) 2011-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <chainparams.h> #include <consensus/merkle.h> #include <consensus/validation.h> #include <pow.h> #include <random.h> #include <test/util/setup_common.h> #include <txmempool.h> #include <validation.h> static void DuplicateInputs(benchmark::Bench& bench) { const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(); const CScript SCRIPT_PUB{CScript(OP_TRUE)}; const CChainParams& chainparams = Params(); CBlock block{}; CMutableTransaction coinbaseTx{}; CMutableTransaction naughtyTx{}; LOCK(cs_main); CBlockIndex* pindexPrev = testing_setup->m_node.chainman->ActiveChain().Tip(); assert(pindexPrev != nullptr); block.nBits = GetNextWorkRequired(pindexPrev, &block, chainparams.GetConsensus()); block.nNonce = 0; auto nHeight = pindexPrev->nHeight + 1; // Make a coinbase TX coinbaseTx.vin.resize(1); coinbaseTx.vin[0].prevout.SetNull(); coinbaseTx.vout.resize(1); coinbaseTx.vout[0].scriptPubKey = SCRIPT_PUB; coinbaseTx.vout[0].nValue = GetBlockSubsidy(nHeight, chainparams.GetConsensus()); coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0; naughtyTx.vout.resize(1); naughtyTx.vout[0].nValue = 0; naughtyTx.vout[0].scriptPubKey = SCRIPT_PUB; uint64_t n_inputs = (((MAX_BLOCK_SERIALIZED_SIZE / WITNESS_SCALE_FACTOR) - (CTransaction(coinbaseTx).GetTotalSize() + CTransaction(naughtyTx).GetTotalSize())) / 41) - 100; for (uint64_t x = 0; x < (n_inputs - 1); ++x) { naughtyTx.vin.emplace_back(Txid::FromUint256(GetRandHash()), 0, CScript(), 0); } naughtyTx.vin.emplace_back(naughtyTx.vin.back()); block.vtx.push_back(MakeTransactionRef(std::move(coinbaseTx))); block.vtx.push_back(MakeTransactionRef(std::move(naughtyTx))); block.hashMerkleRoot = BlockMerkleRoot(block); bench.run([&] { BlockValidationState cvstate{}; assert(!CheckBlock(block, cvstate, chainparams.GetConsensus(), false, false)); assert(cvstate.GetRejectReason() == "bad-txns-inputs-duplicate"); }); } BENCHMARK(DuplicateInputs, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/base58.cpp
// Copyright (c) 2016-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <base58.h> #include <array> #include <vector> static void Base58Encode(benchmark::Bench& bench) { static const std::array<unsigned char, 32> buff = { { 17, 79, 8, 99, 150, 189, 208, 162, 22, 23, 203, 163, 36, 58, 147, 227, 139, 2, 215, 100, 91, 38, 11, 141, 253, 40, 117, 21, 16, 90, 200, 24 } }; bench.batch(buff.size()).unit("byte").run([&] { EncodeBase58(buff); }); } static void Base58CheckEncode(benchmark::Bench& bench) { static const std::array<unsigned char, 32> buff = { { 17, 79, 8, 99, 150, 189, 208, 162, 22, 23, 203, 163, 36, 58, 147, 227, 139, 2, 215, 100, 91, 38, 11, 141, 253, 40, 117, 21, 16, 90, 200, 24 } }; bench.batch(buff.size()).unit("byte").run([&] { EncodeBase58Check(buff); }); } static void Base58Decode(benchmark::Bench& bench) { const char* addr = "17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem"; std::vector<unsigned char> vch; bench.batch(strlen(addr)).unit("byte").run([&] { (void) DecodeBase58(addr, vch, 64); }); } BENCHMARK(Base58Encode, benchmark::PriorityLevel::HIGH); BENCHMARK(Base58CheckEncode, benchmark::PriorityLevel::HIGH); BENCHMARK(Base58Decode, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/coin_selection.cpp
// Copyright (c) 2012-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <interfaces/chain.h> #include <node/context.h> #include <policy/policy.h> #include <wallet/coinselection.h> #include <wallet/spend.h> #include <wallet/wallet.h> #include <wallet/test/util.h> #include <set> using node::NodeContext; using wallet::AttemptSelection; using wallet::CHANGE_LOWER; using wallet::COutput; using wallet::CWallet; using wallet::CWalletTx; using wallet::CoinEligibilityFilter; using wallet::CoinSelectionParams; using wallet::CreateMockableWalletDatabase; using wallet::OutputGroup; using wallet::SelectCoinsBnB; using wallet::TxStateInactive; static void addCoin(const CAmount& nValue, const CWallet& wallet, std::vector<std::unique_ptr<CWalletTx>>& wtxs) { static int nextLockTime = 0; CMutableTransaction tx; tx.nLockTime = nextLockTime++; // so all transactions get different hashes tx.vout.resize(1); tx.vout[0].nValue = nValue; wtxs.push_back(std::make_unique<CWalletTx>(MakeTransactionRef(std::move(tx)), TxStateInactive{})); } // Simple benchmark for wallet coin selection. Note that it maybe be necessary // to build up more complicated scenarios in order to get meaningful // measurements of performance. From laanwj, "Wallet coin selection is probably // the hardest, as you need a wider selection of scenarios, just testing the // same one over and over isn't too useful. Generating random isn't useful // either for measurements." // (https://github.com/bitcoin/bitcoin/issues/7883#issuecomment-224807484) static void CoinSelection(benchmark::Bench& bench) { NodeContext node; auto chain = interfaces::MakeChain(node); CWallet wallet(chain.get(), "", CreateMockableWalletDatabase()); std::vector<std::unique_ptr<CWalletTx>> wtxs; LOCK(wallet.cs_wallet); // Add coins. for (int i = 0; i < 1000; ++i) { addCoin(1000 * COIN, wallet, wtxs); } addCoin(3 * COIN, wallet, wtxs); // Create coins wallet::CoinsResult available_coins; for (const auto& wtx : wtxs) { const auto txout = wtx->tx->vout.at(0); available_coins.coins[OutputType::BECH32].emplace_back(COutPoint(wtx->GetHash(), 0), txout, /*depth=*/6 * 24, CalculateMaximumSignedInputSize(txout, &wallet, /*coin_control=*/nullptr), /*spendable=*/true, /*solvable=*/true, /*safe=*/true, wtx->GetTxTime(), /*from_me=*/true, /*fees=*/ 0); } const CoinEligibilityFilter filter_standard(1, 6, 0); FastRandomContext rand{}; const CoinSelectionParams coin_selection_params{ rand, /*change_output_size=*/ 34, /*change_spend_size=*/ 148, /*min_change_target=*/ CHANGE_LOWER, /*effective_feerate=*/ CFeeRate(0), /*long_term_feerate=*/ CFeeRate(0), /*discard_feerate=*/ CFeeRate(0), /*tx_noinputs_size=*/ 0, /*avoid_partial=*/ false, }; auto group = wallet::GroupOutputs(wallet, available_coins, coin_selection_params, {{filter_standard}})[filter_standard]; bench.run([&] { auto result = AttemptSelection(wallet.chain(), 1003 * COIN, group, coin_selection_params, /*allow_mixed_output_types=*/true); assert(result); assert(result->GetSelectedValue() == 1003 * COIN); assert(result->GetInputSet().size() == 2); }); } // Copied from src/wallet/test/coinselector_tests.cpp static void add_coin(const CAmount& nValue, int nInput, std::vector<OutputGroup>& set) { CMutableTransaction tx; tx.vout.resize(nInput + 1); tx.vout[nInput].nValue = nValue; COutput output(COutPoint(tx.GetHash(), nInput), tx.vout.at(nInput), /*depth=*/ 0, /*input_bytes=*/ -1, /*spendable=*/ true, /*solvable=*/ true, /*safe=*/ true, /*time=*/ 0, /*from_me=*/ true, /*fees=*/ 0); set.emplace_back(); set.back().Insert(std::make_shared<COutput>(output), /*ancestors=*/ 0, /*descendants=*/ 0); } // Copied from src/wallet/test/coinselector_tests.cpp static CAmount make_hard_case(int utxos, std::vector<OutputGroup>& utxo_pool) { utxo_pool.clear(); CAmount target = 0; for (int i = 0; i < utxos; ++i) { target += CAmount{1} << (utxos+i); add_coin(CAmount{1} << (utxos+i), 2*i, utxo_pool); add_coin((CAmount{1} << (utxos+i)) + (CAmount{1} << (utxos-1-i)), 2*i + 1, utxo_pool); } return target; } static void BnBExhaustion(benchmark::Bench& bench) { // Setup std::vector<OutputGroup> utxo_pool; bench.run([&] { // Benchmark CAmount target = make_hard_case(17, utxo_pool); SelectCoinsBnB(utxo_pool, target, 0, MAX_STANDARD_TX_WEIGHT); // Should exhaust // Cleanup utxo_pool.clear(); }); } BENCHMARK(CoinSelection, benchmark::PriorityLevel::HIGH); BENCHMARK(BnBExhaustion, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/data.h
// Copyright (c) 2019 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_BENCH_DATA_H #define BITCOIN_BENCH_DATA_H #include <cstdint> #include <vector> namespace benchmark { namespace data { extern const std::vector<uint8_t> block413567; } // namespace data } // namespace benchmark #endif // BITCOIN_BENCH_DATA_H
0
bitcoin/src
bitcoin/src/bench/block_assemble.cpp
// Copyright (c) 2011-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <consensus/validation.h> #include <crypto/sha256.h> #include <node/miner.h> #include <random.h> #include <test/util/mining.h> #include <test/util/script.h> #include <test/util/setup_common.h> #include <txmempool.h> #include <validation.h> #include <vector> static void AssembleBlock(benchmark::Bench& bench) { const auto test_setup = MakeNoLogFileContext<const TestingSetup>(); CScriptWitness witness; witness.stack.push_back(WITNESS_STACK_ELEM_OP_TRUE); // Collect some loose transactions that spend the coinbases of our mined blocks constexpr size_t NUM_BLOCKS{200}; std::array<CTransactionRef, NUM_BLOCKS - COINBASE_MATURITY + 1> txs; for (size_t b{0}; b < NUM_BLOCKS; ++b) { CMutableTransaction tx; tx.vin.emplace_back(MineBlock(test_setup->m_node, P2WSH_OP_TRUE)); tx.vin.back().scriptWitness = witness; tx.vout.emplace_back(1337, P2WSH_OP_TRUE); if (NUM_BLOCKS - b >= COINBASE_MATURITY) txs.at(b) = MakeTransactionRef(tx); } { LOCK(::cs_main); for (const auto& txr : txs) { const MempoolAcceptResult res = test_setup->m_node.chainman->ProcessTransaction(txr); assert(res.m_result_type == MempoolAcceptResult::ResultType::VALID); } } bench.run([&] { PrepareBlock(test_setup->m_node, P2WSH_OP_TRUE); }); } static void BlockAssemblerAddPackageTxns(benchmark::Bench& bench) { FastRandomContext det_rand{true}; auto testing_setup{MakeNoLogFileContext<TestChain100Setup>()}; testing_setup->PopulateMempool(det_rand, /*num_transactions=*/1000, /*submit=*/true); node::BlockAssembler::Options assembler_options; assembler_options.test_block_validity = false; bench.run([&] { PrepareBlock(testing_setup->m_node, P2WSH_OP_TRUE, assembler_options); }); } BENCHMARK(AssembleBlock, benchmark::PriorityLevel::HIGH); BENCHMARK(BlockAssemblerAddPackageTxns, benchmark::PriorityLevel::LOW);
0
bitcoin/src
bitcoin/src/bench/bench.h
// Copyright (c) 2015-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_BENCH_BENCH_H #define BITCOIN_BENCH_BENCH_H #include <util/fs.h> #include <util/macros.h> #include <chrono> #include <functional> #include <map> #include <string> #include <vector> #include <bench/nanobench.h> // IWYU pragma: export /* * Usage: static void NameOfYourBenchmarkFunction(benchmark::Bench& bench) { ...do any setup needed... bench.run([&] { ...do stuff you want to time; refer to src/bench/nanobench.h for more information and the options that can be passed here... }); ...do any cleanup needed... } BENCHMARK(NameOfYourBenchmarkFunction); */ namespace benchmark { using ankerl::nanobench::Bench; typedef std::function<void(Bench&)> BenchFunction; enum PriorityLevel : uint8_t { LOW = 1 << 0, HIGH = 1 << 2, }; // List priority labels, comma-separated and sorted by increasing priority std::string ListPriorities(); uint8_t StringToPriority(const std::string& str); struct Args { bool is_list_only; bool sanity_check; std::chrono::milliseconds min_time; std::vector<double> asymptote; fs::path output_csv; fs::path output_json; std::string regex_filter; uint8_t priority; }; class BenchRunner { // maps from "name" -> (function, priority_level) typedef std::map<std::string, std::pair<BenchFunction, PriorityLevel>> BenchmarkMap; static BenchmarkMap& benchmarks(); public: BenchRunner(std::string name, BenchFunction func, PriorityLevel level); static void RunAll(const Args& args); }; } // namespace benchmark // BENCHMARK(foo) expands to: benchmark::BenchRunner bench_11foo("foo", foo, priority_level); #define BENCHMARK(n, priority_level) \ benchmark::BenchRunner PASTE2(bench_, PASTE2(__LINE__, n))(STRINGIZE(n), n, priority_level); #endif // BITCOIN_BENCH_BENCH_H
0
bitcoin/src
bitcoin/src/bench/util_time.cpp
// Copyright (c) 2019-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <util/time.h> static void BenchTimeDeprecated(benchmark::Bench& bench) { bench.run([&] { (void)GetTime(); }); } static void BenchTimeMock(benchmark::Bench& bench) { SetMockTime(111); bench.run([&] { (void)GetTime<std::chrono::seconds>(); }); SetMockTime(0); } static void BenchTimeMillis(benchmark::Bench& bench) { bench.run([&] { (void)GetTime<std::chrono::milliseconds>(); }); } static void BenchTimeMillisSys(benchmark::Bench& bench) { bench.run([&] { (void)TicksSinceEpoch<std::chrono::milliseconds>(SystemClock::now()); }); } BENCHMARK(BenchTimeDeprecated, benchmark::PriorityLevel::HIGH); BENCHMARK(BenchTimeMillis, benchmark::PriorityLevel::HIGH); BENCHMARK(BenchTimeMillisSys, benchmark::PriorityLevel::HIGH); BENCHMARK(BenchTimeMock, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/bip324_ecdh.cpp
// Copyright (c) 2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <key.h> #include <pubkey.h> #include <random.h> #include <span.h> #include <array> #include <cstddef> static void BIP324_ECDH(benchmark::Bench& bench) { ECC_Start(); FastRandomContext rng; std::array<std::byte, 32> key_data; std::array<std::byte, EllSwiftPubKey::size()> our_ellswift_data; std::array<std::byte, EllSwiftPubKey::size()> their_ellswift_data; rng.fillrand(key_data); rng.fillrand(our_ellswift_data); rng.fillrand(their_ellswift_data); bench.batch(1).unit("ecdh").run([&] { CKey key; key.Set(UCharCast(key_data.data()), UCharCast(key_data.data()) + 32, true); EllSwiftPubKey our_ellswift(our_ellswift_data); EllSwiftPubKey their_ellswift(their_ellswift_data); auto ret = key.ComputeBIP324ECDHSecret(their_ellswift, our_ellswift, true); // To make sure that the computation is not the same on every iteration (ellswift decoding // is variable-time), distribute bytes from the shared secret over the 3 inputs. The most // important one is their_ellswift, because that one is actually decoded, so it's given most // bytes. The data is copied into the middle, so that both halves are affected: // - Copy 8 bytes from the resulting shared secret into middle of the private key. std::copy(ret.begin(), ret.begin() + 8, key_data.begin() + 12); // - Copy 8 bytes from the resulting shared secret into the middle of our ellswift key. std::copy(ret.begin() + 8, ret.begin() + 16, our_ellswift_data.begin() + 28); // - Copy 16 bytes from the resulting shared secret into the middle of their ellswift key. std::copy(ret.begin() + 16, ret.end(), their_ellswift_data.begin() + 24); }); ECC_Stop(); } BENCHMARK(BIP324_ECDH, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/hashpadding.cpp
// Copyright (c) 2015-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <hash.h> #include <random.h> #include <uint256.h> static void PrePadded(benchmark::Bench& bench) { CSHA256 hasher; // Setup the salted hasher uint256 nonce = GetRandHash(); hasher.Write(nonce.begin(), 32); hasher.Write(nonce.begin(), 32); uint256 data = GetRandHash(); bench.run([&] { unsigned char out[32]; CSHA256 h = hasher; h.Write(data.begin(), 32); h.Finalize(out); }); } BENCHMARK(PrePadded, benchmark::PriorityLevel::HIGH); static void RegularPadded(benchmark::Bench& bench) { CSHA256 hasher; // Setup the salted hasher uint256 nonce = GetRandHash(); uint256 data = GetRandHash(); bench.run([&] { unsigned char out[32]; CSHA256 h = hasher; h.Write(nonce.begin(), 32); h.Write(data.begin(), 32); h.Finalize(out); }); } BENCHMARK(RegularPadded, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/rollingbloom.cpp
// Copyright (c) 2016-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <common/bloom.h> #include <crypto/common.h> #include <vector> static void RollingBloom(benchmark::Bench& bench) { CRollingBloomFilter filter(120000, 0.000001); std::vector<unsigned char> data(32); uint32_t count = 0; bench.run([&] { count++; WriteLE32(data.data(), count); filter.insert(data); WriteBE32(data.data(), count); filter.contains(data); }); } static void RollingBloomReset(benchmark::Bench& bench) { CRollingBloomFilter filter(120000, 0.000001); bench.run([&] { filter.reset(); }); } BENCHMARK(RollingBloom, benchmark::PriorityLevel::HIGH); BENCHMARK(RollingBloomReset, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/load_external.cpp
// Copyright (c) 2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or https://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <bench/data.h> #include <chainparams.h> #include <clientversion.h> #include <test/util/setup_common.h> #include <util/chaintype.h> #include <validation.h> /** * The LoadExternalBlockFile() function is used during -reindex and -loadblock. * * Create a test file that's similar to a datadir/blocks/blk?????.dat file, * It contains around 134 copies of the same block (typical size of real block files). * For each block in the file, LoadExternalBlockFile() won't find its parent, * and so will skip the block. (In the real system, it will re-read the block * from disk later when it encounters its parent.) * * This benchmark measures the performance of deserializing the block (or just * its header, beginning with PR 16981). */ static void LoadExternalBlockFile(benchmark::Bench& bench) { const auto testing_setup{MakeNoLogFileContext<const TestingSetup>(ChainType::MAIN)}; // Create a single block as in the blocks files (magic bytes, block size, // block data) as a stream object. const fs::path blkfile{testing_setup.get()->m_path_root / "blk.dat"}; DataStream ss{}; auto params{testing_setup->m_node.chainman->GetParams()}; ss << params.MessageStart(); ss << static_cast<uint32_t>(benchmark::data::block413567.size()); // We can't use the streaming serialization (ss << benchmark::data::block413567) // because that first writes a compact size. ss << Span{benchmark::data::block413567}; // Create the test file. { // "wb+" is "binary, O_RDWR | O_CREAT | O_TRUNC". FILE* file{fsbridge::fopen(blkfile, "wb+")}; // Make the test block file about 128 MB in length. for (size_t i = 0; i < node::MAX_BLOCKFILE_SIZE / ss.size(); ++i) { if (fwrite(ss.data(), 1, ss.size(), file) != ss.size()) { throw std::runtime_error("write to test file failed\n"); } } fclose(file); } std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent; FlatFilePos pos; bench.run([&] { // "rb" is "binary, O_RDONLY", positioned to the start of the file. // The file will be closed by LoadExternalBlockFile(). AutoFile file{fsbridge::fopen(blkfile, "rb")}; testing_setup->m_node.chainman->LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent); }); fs::remove(blkfile); } BENCHMARK(LoadExternalBlockFile, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/verify_script.cpp
// Copyright (c) 2016-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <key.h> #if defined(HAVE_CONSENSUS_LIB) #include <script/bitcoinconsensus.h> #endif #include <script/script.h> #include <script/interpreter.h> #include <streams.h> #include <test/util/transaction_utils.h> #include <array> // Microbenchmark for verification of a basic P2WPKH script. Can be easily // modified to measure performance of other types of scripts. static void VerifyScriptBench(benchmark::Bench& bench) { ECC_Start(); const uint32_t flags{SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH}; const int witnessversion = 0; // Key pair. CKey key; static const std::array<unsigned char, 32> vchKey = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 } }; key.Set(vchKey.begin(), vchKey.end(), false); CPubKey pubkey = key.GetPubKey(); uint160 pubkeyHash; CHash160().Write(pubkey).Finalize(pubkeyHash); // Script. CScript scriptPubKey = CScript() << witnessversion << ToByteVector(pubkeyHash); CScript scriptSig; CScript witScriptPubkey = CScript() << OP_DUP << OP_HASH160 << ToByteVector(pubkeyHash) << OP_EQUALVERIFY << OP_CHECKSIG; const CMutableTransaction& txCredit = BuildCreditingTransaction(scriptPubKey, 1); CMutableTransaction txSpend = BuildSpendingTransaction(scriptSig, CScriptWitness(), CTransaction(txCredit)); CScriptWitness& witness = txSpend.vin[0].scriptWitness; witness.stack.emplace_back(); key.Sign(SignatureHash(witScriptPubkey, txSpend, 0, SIGHASH_ALL, txCredit.vout[0].nValue, SigVersion::WITNESS_V0), witness.stack.back()); witness.stack.back().push_back(static_cast<unsigned char>(SIGHASH_ALL)); witness.stack.push_back(ToByteVector(pubkey)); // Benchmark. bench.run([&] { ScriptError err; bool success = VerifyScript( txSpend.vin[0].scriptSig, txCredit.vout[0].scriptPubKey, &txSpend.vin[0].scriptWitness, flags, MutableTransactionSignatureChecker(&txSpend, 0, txCredit.vout[0].nValue, MissingDataBehavior::ASSERT_FAIL), &err); assert(err == SCRIPT_ERR_OK); assert(success); #if defined(HAVE_CONSENSUS_LIB) DataStream stream; stream << TX_WITH_WITNESS(txSpend); int csuccess = bitcoinconsensus_verify_script_with_amount( txCredit.vout[0].scriptPubKey.data(), txCredit.vout[0].scriptPubKey.size(), txCredit.vout[0].nValue, (const unsigned char*)stream.data(), stream.size(), 0, flags, nullptr); assert(csuccess == 1); #endif }); ECC_Stop(); } static void VerifyNestedIfScript(benchmark::Bench& bench) { std::vector<std::vector<unsigned char>> stack; CScript script; for (int i = 0; i < 100; ++i) { script << OP_1 << OP_IF; } for (int i = 0; i < 1000; ++i) { script << OP_1; } for (int i = 0; i < 100; ++i) { script << OP_ENDIF; } bench.run([&] { auto stack_copy = stack; ScriptError error; bool ret = EvalScript(stack_copy, script, 0, BaseSignatureChecker(), SigVersion::BASE, &error); assert(ret); }); } BENCHMARK(VerifyScriptBench, benchmark::PriorityLevel::HIGH); BENCHMARK(VerifyNestedIfScript, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/data.cpp
// Copyright (c) 2019-2021 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/data.h> namespace benchmark { namespace data { #include <bench/data/block413567.raw.h> const std::vector<uint8_t> block413567{std::begin(block413567_raw), std::end(block413567_raw)}; } // namespace data } // namespace benchmark
0
bitcoin/src
bitcoin/src/bench/wallet_balance.cpp
// Copyright (c) 2012-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <interfaces/chain.h> #include <node/chainstate.h> #include <node/context.h> #include <test/util/mining.h> #include <test/util/setup_common.h> #include <wallet/test/util.h> #include <validationinterface.h> #include <wallet/receive.h> #include <wallet/wallet.h> #include <optional> namespace wallet { static void WalletBalance(benchmark::Bench& bench, const bool set_dirty, const bool add_mine) { const auto test_setup = MakeNoLogFileContext<const TestingSetup>(); const auto& ADDRESS_WATCHONLY = ADDRESS_BCRT1_UNSPENDABLE; // Set clock to genesis block, so the descriptors/keys creation time don't interfere with the blocks scanning process. // The reason is 'generatetoaddress', which creates a chain with deterministic timestamps in the past. SetMockTime(test_setup->m_node.chainman->GetParams().GenesisBlock().nTime); CWallet wallet{test_setup->m_node.chain.get(), "", CreateMockableWalletDatabase()}; { LOCK(wallet.cs_wallet); wallet.SetWalletFlag(WALLET_FLAG_DESCRIPTORS); wallet.SetupDescriptorScriptPubKeyMans(); } auto handler = test_setup->m_node.chain->handleNotifications({&wallet, [](CWallet*) {}}); const std::optional<std::string> address_mine{add_mine ? std::optional<std::string>{getnewaddress(wallet)} : std::nullopt}; for (int i = 0; i < 100; ++i) { generatetoaddress(test_setup->m_node, address_mine.value_or(ADDRESS_WATCHONLY)); generatetoaddress(test_setup->m_node, ADDRESS_WATCHONLY); } SyncWithValidationInterfaceQueue(); auto bal = GetBalance(wallet); // Cache bench.run([&] { if (set_dirty) wallet.MarkDirty(); bal = GetBalance(wallet); if (add_mine) assert(bal.m_mine_trusted > 0); }); } static void WalletBalanceDirty(benchmark::Bench& bench) { WalletBalance(bench, /*set_dirty=*/true, /*add_mine=*/true); } static void WalletBalanceClean(benchmark::Bench& bench) { WalletBalance(bench, /*set_dirty=*/false, /*add_mine=*/true); } static void WalletBalanceMine(benchmark::Bench& bench) { WalletBalance(bench, /*set_dirty=*/false, /*add_mine=*/true); } static void WalletBalanceWatch(benchmark::Bench& bench) { WalletBalance(bench, /*set_dirty=*/false, /*add_mine=*/false); } BENCHMARK(WalletBalanceDirty, benchmark::PriorityLevel::HIGH); BENCHMARK(WalletBalanceClean, benchmark::PriorityLevel::HIGH); BENCHMARK(WalletBalanceMine, benchmark::PriorityLevel::HIGH); BENCHMARK(WalletBalanceWatch, benchmark::PriorityLevel::HIGH); } // namespace wallet
0
bitcoin/src
bitcoin/src/bench/lockedpool.cpp
// Copyright (c) 2016-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <support/lockedpool.h> #include <vector> #define ASIZE 2048 #define MSIZE 2048 static void BenchLockedPool(benchmark::Bench& bench) { void *synth_base = reinterpret_cast<void*>(0x08000000); const size_t synth_size = 1024*1024; Arena b(synth_base, synth_size, 16); std::vector<void*> addr{ASIZE, nullptr}; uint32_t s = 0x12345678; bench.run([&] { int idx = s & (addr.size() - 1); if (s & 0x80000000) { b.free(addr[idx]); addr[idx] = nullptr; } else if (!addr[idx]) { addr[idx] = b.alloc((s >> 16) & (MSIZE - 1)); } bool lsb = s & 1; s >>= 1; if (lsb) s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0 }); for (void *ptr: addr) b.free(ptr); addr.clear(); } BENCHMARK(BenchLockedPool, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/crypto_hash.cpp
// Copyright (c) 2016-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <crypto/muhash.h> #include <crypto/ripemd160.h> #include <crypto/sha1.h> #include <crypto/sha256.h> #include <crypto/sha3.h> #include <crypto/sha512.h> #include <crypto/siphash.h> #include <hash.h> #include <random.h> #include <tinyformat.h> #include <uint256.h> /* Number of bytes to hash per iteration */ static const uint64_t BUFFER_SIZE = 1000*1000; static void BenchRIPEMD160(benchmark::Bench& bench) { uint8_t hash[CRIPEMD160::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); bench.batch(in.size()).unit("byte").run([&] { CRIPEMD160().Write(in.data(), in.size()).Finalize(hash); }); } static void SHA1(benchmark::Bench& bench) { uint8_t hash[CSHA1::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); bench.batch(in.size()).unit("byte").run([&] { CSHA1().Write(in.data(), in.size()).Finalize(hash); }); } static void SHA256_STANDARD(benchmark::Bench& bench) { bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::STANDARD))); uint8_t hash[CSHA256::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); bench.batch(in.size()).unit("byte").run([&] { CSHA256().Write(in.data(), in.size()).Finalize(hash); }); SHA256AutoDetect(); } static void SHA256_SSE4(benchmark::Bench& bench) { bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4))); uint8_t hash[CSHA256::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); bench.batch(in.size()).unit("byte").run([&] { CSHA256().Write(in.data(), in.size()).Finalize(hash); }); SHA256AutoDetect(); } static void SHA256_AVX2(benchmark::Bench& bench) { bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4_AND_AVX2))); uint8_t hash[CSHA256::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); bench.batch(in.size()).unit("byte").run([&] { CSHA256().Write(in.data(), in.size()).Finalize(hash); }); SHA256AutoDetect(); } static void SHA256_SHANI(benchmark::Bench& bench) { bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4_AND_SHANI))); uint8_t hash[CSHA256::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); bench.batch(in.size()).unit("byte").run([&] { CSHA256().Write(in.data(), in.size()).Finalize(hash); }); SHA256AutoDetect(); } static void SHA3_256_1M(benchmark::Bench& bench) { uint8_t hash[SHA3_256::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); bench.batch(in.size()).unit("byte").run([&] { SHA3_256().Write(in).Finalize(hash); }); } static void SHA256_32b_STANDARD(benchmark::Bench& bench) { bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::STANDARD))); std::vector<uint8_t> in(32,0); bench.batch(in.size()).unit("byte").run([&] { CSHA256() .Write(in.data(), in.size()) .Finalize(in.data()); }); SHA256AutoDetect(); } static void SHA256_32b_SSE4(benchmark::Bench& bench) { bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4))); std::vector<uint8_t> in(32,0); bench.batch(in.size()).unit("byte").run([&] { CSHA256() .Write(in.data(), in.size()) .Finalize(in.data()); }); SHA256AutoDetect(); } static void SHA256_32b_AVX2(benchmark::Bench& bench) { bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4_AND_AVX2))); std::vector<uint8_t> in(32,0); bench.batch(in.size()).unit("byte").run([&] { CSHA256() .Write(in.data(), in.size()) .Finalize(in.data()); }); SHA256AutoDetect(); } static void SHA256_32b_SHANI(benchmark::Bench& bench) { bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4_AND_SHANI))); std::vector<uint8_t> in(32,0); bench.batch(in.size()).unit("byte").run([&] { CSHA256() .Write(in.data(), in.size()) .Finalize(in.data()); }); SHA256AutoDetect(); } static void SHA256D64_1024_STANDARD(benchmark::Bench& bench) { bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::STANDARD))); std::vector<uint8_t> in(64 * 1024, 0); bench.batch(in.size()).unit("byte").run([&] { SHA256D64(in.data(), in.data(), 1024); }); SHA256AutoDetect(); } static void SHA256D64_1024_SSE4(benchmark::Bench& bench) { bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4))); std::vector<uint8_t> in(64 * 1024, 0); bench.batch(in.size()).unit("byte").run([&] { SHA256D64(in.data(), in.data(), 1024); }); SHA256AutoDetect(); } static void SHA256D64_1024_AVX2(benchmark::Bench& bench) { bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4_AND_AVX2))); std::vector<uint8_t> in(64 * 1024, 0); bench.batch(in.size()).unit("byte").run([&] { SHA256D64(in.data(), in.data(), 1024); }); SHA256AutoDetect(); } static void SHA256D64_1024_SHANI(benchmark::Bench& bench) { bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4_AND_SHANI))); std::vector<uint8_t> in(64 * 1024, 0); bench.batch(in.size()).unit("byte").run([&] { SHA256D64(in.data(), in.data(), 1024); }); SHA256AutoDetect(); } static void SHA512(benchmark::Bench& bench) { uint8_t hash[CSHA512::OUTPUT_SIZE]; std::vector<uint8_t> in(BUFFER_SIZE,0); bench.batch(in.size()).unit("byte").run([&] { CSHA512().Write(in.data(), in.size()).Finalize(hash); }); } static void SipHash_32b(benchmark::Bench& bench) { uint256 x; uint64_t k1 = 0; bench.run([&] { *((uint64_t*)x.begin()) = SipHashUint256(0, ++k1, x); }); } static void FastRandom_32bit(benchmark::Bench& bench) { FastRandomContext rng(true); bench.run([&] { rng.rand32(); }); } static void FastRandom_1bit(benchmark::Bench& bench) { FastRandomContext rng(true); bench.run([&] { rng.randbool(); }); } static void MuHash(benchmark::Bench& bench) { MuHash3072 acc; unsigned char key[32] = {0}; uint32_t i = 0; bench.run([&] { key[0] = ++i & 0xFF; acc *= MuHash3072(key); }); } static void MuHashMul(benchmark::Bench& bench) { MuHash3072 acc; FastRandomContext rng(true); MuHash3072 muhash{rng.randbytes(32)}; bench.run([&] { acc *= muhash; }); } static void MuHashDiv(benchmark::Bench& bench) { MuHash3072 acc; FastRandomContext rng(true); MuHash3072 muhash{rng.randbytes(32)}; bench.run([&] { acc /= muhash; }); } static void MuHashPrecompute(benchmark::Bench& bench) { MuHash3072 acc; FastRandomContext rng(true); std::vector<unsigned char> key{rng.randbytes(32)}; bench.run([&] { MuHash3072{key}; }); } BENCHMARK(BenchRIPEMD160, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA1, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256_STANDARD, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256_SSE4, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256_AVX2, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256_SHANI, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA512, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA3_256_1M, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256_32b_STANDARD, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256_32b_SSE4, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256_32b_AVX2, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256_32b_SHANI, benchmark::PriorityLevel::HIGH); BENCHMARK(SipHash_32b, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256D64_1024_STANDARD, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256D64_1024_SSE4, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256D64_1024_AVX2, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA256D64_1024_SHANI, benchmark::PriorityLevel::HIGH); BENCHMARK(FastRandom_32bit, benchmark::PriorityLevel::HIGH); BENCHMARK(FastRandom_1bit, benchmark::PriorityLevel::HIGH); BENCHMARK(MuHash, benchmark::PriorityLevel::HIGH); BENCHMARK(MuHashMul, benchmark::PriorityLevel::HIGH); BENCHMARK(MuHashDiv, benchmark::PriorityLevel::HIGH); BENCHMARK(MuHashPrecompute, benchmark::PriorityLevel::HIGH);
0
bitcoin/src
bitcoin/src/bench/poly1305.cpp
// Copyright (c) 2019-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <bench/bench.h> #include <crypto/poly1305.h> #include <span.h> /* Number of bytes to process per iteration */ static constexpr uint64_t BUFFER_SIZE_TINY = 64; static constexpr uint64_t BUFFER_SIZE_SMALL = 256; static constexpr uint64_t BUFFER_SIZE_LARGE = 1024*1024; static void POLY1305(benchmark::Bench& bench, size_t buffersize) { std::vector<std::byte> tag(Poly1305::TAGLEN, {}); std::vector<std::byte> key(Poly1305::KEYLEN, {}); std::vector<std::byte> in(buffersize, {}); bench.batch(in.size()).unit("byte").run([&] { Poly1305{key}.Update(in).Finalize(tag); }); } static void POLY1305_64BYTES(benchmark::Bench& bench) { POLY1305(bench, BUFFER_SIZE_TINY); } static void POLY1305_256BYTES(benchmark::Bench& bench) { POLY1305(bench, BUFFER_SIZE_SMALL); } static void POLY1305_1MB(benchmark::Bench& bench) { POLY1305(bench, BUFFER_SIZE_LARGE); } BENCHMARK(POLY1305_64BYTES, benchmark::PriorityLevel::HIGH); BENCHMARK(POLY1305_256BYTES, benchmark::PriorityLevel::HIGH); BENCHMARK(POLY1305_1MB, benchmark::PriorityLevel::HIGH);
0