repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | ceph-main/src/os/kstore/KStore.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_OSD_KSTORE_H
#define CEPH_OSD_KSTORE_H
#include "acconfig.h"
#include <unistd.h>
#include <atomic>
#include <mutex>
#include <condition_variable>
#include "include/ceph_assert.h"
#include "include/unordered_map.h"
#include "common/Finisher.h"
#include "common/Throttle.h"
#include "common/WorkQueue.h"
#include "os/ObjectStore.h"
#include "common/perf_counters.h"
#include "os/fs/FS.h"
#include "kv/KeyValueDB.h"
#include "kstore_types.h"
#include "boost/intrusive/list.hpp"
enum {
l_kstore_first = 832430,
l_kstore_state_prepare_lat,
l_kstore_state_kv_queued_lat,
l_kstore_state_kv_done_lat,
l_kstore_state_finishing_lat,
l_kstore_state_done_lat,
l_kstore_last
};
class KStore : public ObjectStore {
// -----------------------------------------------------
// types
public:
struct TransContext;
/// an in-memory object
struct Onode {
CephContext* cct;
std::atomic_int nref; ///< reference count
ghobject_t oid;
std::string key; ///< key under PREFIX_OBJ where we are stored
boost::intrusive::list_member_hook<> lru_item;
kstore_onode_t onode; ///< metadata stored as value in kv store
bool dirty; // ???
bool exists;
std::mutex flush_lock; ///< protect flush_txns
std::condition_variable flush_cond; ///< wait here for unapplied txns
std::set<TransContext*> flush_txns; ///< committing txns
uint64_t tail_offset;
ceph::buffer::list tail_bl;
std::map<uint64_t,ceph::buffer::list> pending_stripes; ///< unwritten stripes
Onode(CephContext* cct, const ghobject_t& o, const std::string& k)
: cct(cct),
nref(0),
oid(o),
key(k),
dirty(false),
exists(false),
tail_offset(0) {
}
void flush();
void get() {
++nref;
}
void put() {
if (--nref == 0)
delete this;
}
void clear_tail() {
tail_offset = 0;
tail_bl.clear();
}
void clear_pending_stripes() {
pending_stripes.clear();
}
};
typedef boost::intrusive_ptr<Onode> OnodeRef;
struct OnodeHashLRU {
CephContext* cct;
typedef boost::intrusive::list<
Onode,
boost::intrusive::member_hook<
Onode,
boost::intrusive::list_member_hook<>,
&Onode::lru_item> > lru_list_t;
std::mutex lock;
ceph::unordered_map<ghobject_t,OnodeRef> onode_map; ///< forward lookups
lru_list_t lru; ///< lru
OnodeHashLRU(CephContext* cct) : cct(cct) {}
void add(const ghobject_t& oid, OnodeRef o);
void _touch(OnodeRef o);
OnodeRef lookup(const ghobject_t& o);
void rename(const ghobject_t& old_oid, const ghobject_t& new_oid);
void clear();
bool get_next(const ghobject_t& after, std::pair<ghobject_t,OnodeRef> *next);
int trim(int max=-1);
};
class OpSequencer;
typedef boost::intrusive_ptr<OpSequencer> OpSequencerRef;
struct Collection : public CollectionImpl {
KStore *store;
kstore_cnode_t cnode;
ceph::shared_mutex lock =
ceph::make_shared_mutex("KStore::Collection::lock", true, false);
OpSequencerRef osr;
// cache onodes on a per-collection basis to avoid lock
// contention.
OnodeHashLRU onode_map;
OnodeRef get_onode(const ghobject_t& oid, bool create);
bool contains(const ghobject_t& oid) {
if (cid.is_meta())
return oid.hobj.pool == -1;
spg_t spgid;
if (cid.is_pg(&spgid))
return
spgid.pgid.contains(cnode.bits, oid) &&
oid.shard_id == spgid.shard;
return false;
}
void flush() override;
bool flush_commit(Context *c) override;
private:
FRIEND_MAKE_REF(Collection);
Collection(KStore *ns, coll_t c);
};
using CollectionRef = ceph::ref_t<Collection>;
class OmapIteratorImpl : public ObjectMap::ObjectMapIteratorImpl {
CollectionRef c;
OnodeRef o;
KeyValueDB::Iterator it;
std::string head, tail;
public:
OmapIteratorImpl(CollectionRef c, OnodeRef o, KeyValueDB::Iterator it);
int seek_to_first() override;
int upper_bound(const std::string &after) override;
int lower_bound(const std::string &to) override;
bool valid() override;
int next() override;
std::string key() override;
ceph::buffer::list value() override;
int status() override {
return 0;
}
};
struct TransContext {
typedef enum {
STATE_PREPARE,
STATE_AIO_WAIT,
STATE_IO_DONE,
STATE_KV_QUEUED,
STATE_KV_COMMITTING,
STATE_KV_DONE,
STATE_FINISHING,
STATE_DONE,
} state_t;
state_t state;
const char *get_state_name() {
switch (state) {
case STATE_PREPARE: return "prepare";
case STATE_AIO_WAIT: return "aio_wait";
case STATE_IO_DONE: return "io_done";
case STATE_KV_QUEUED: return "kv_queued";
case STATE_KV_COMMITTING: return "kv_committing";
case STATE_KV_DONE: return "kv_done";
case STATE_FINISHING: return "finishing";
case STATE_DONE: return "done";
}
return "???";
}
void log_state_latency(PerfCounters *logger, int state) {
utime_t lat, now = ceph_clock_now();
lat = now - start;
logger->tinc(state, lat);
start = now;
}
CollectionRef ch;
OpSequencerRef osr;
boost::intrusive::list_member_hook<> sequencer_item;
uint64_t ops, bytes;
std::set<OnodeRef> onodes; ///< these onodes need to be updated/written
KeyValueDB::Transaction t; ///< then we will commit this
Context *oncommit; ///< signal on commit
Context *onreadable; ///< signal on readable
Context *onreadable_sync; ///< signal on readable
std::list<Context*> oncommits; ///< more commit completions
std::list<CollectionRef> removed_collections; ///< colls we removed
CollectionRef first_collection; ///< first referenced collection
utime_t start;
explicit TransContext(OpSequencer *o)
: state(STATE_PREPARE),
osr(o),
ops(0),
bytes(0),
oncommit(NULL),
onreadable(NULL),
onreadable_sync(NULL),
start(ceph_clock_now()){
//cout << "txc new " << this << std::endl;
}
~TransContext() {
//cout << "txc del " << this << std::endl;
}
void write_onode(OnodeRef &o) {
onodes.insert(o);
}
};
class OpSequencer : public RefCountedObject {
public:
std::mutex qlock;
std::condition_variable qcond;
typedef boost::intrusive::list<
TransContext,
boost::intrusive::member_hook<
TransContext,
boost::intrusive::list_member_hook<>,
&TransContext::sequencer_item> > q_list_t;
q_list_t q; ///< transactions
~OpSequencer() {
ceph_assert(q.empty());
}
void queue_new(TransContext *txc) {
std::lock_guard<std::mutex> l(qlock);
q.push_back(*txc);
}
void flush() {
std::unique_lock<std::mutex> l(qlock);
while (!q.empty())
qcond.wait(l);
}
bool flush_commit(Context *c) {
std::lock_guard<std::mutex> l(qlock);
if (q.empty()) {
return true;
}
TransContext *txc = &q.back();
if (txc->state >= TransContext::STATE_KV_DONE) {
return true;
}
ceph_assert(txc->state < TransContext::STATE_KV_DONE);
txc->oncommits.push_back(c);
return false;
}
};
struct KVSyncThread : public Thread {
KStore *store;
explicit KVSyncThread(KStore *s) : store(s) {}
void *entry() override {
store->_kv_sync_thread();
return NULL;
}
};
// --------------------------------------------------------
// members
private:
KeyValueDB *db;
uuid_d fsid;
std::string basedir;
int path_fd; ///< open handle to $path
int fsid_fd; ///< open handle (locked) to $path/fsid
bool mounted;
/// rwlock to protect coll_map
ceph::shared_mutex coll_lock = ceph::make_shared_mutex("KStore::coll_lock");
ceph::unordered_map<coll_t, CollectionRef> coll_map;
std::map<coll_t,CollectionRef> new_coll_map;
std::mutex nid_lock;
uint64_t nid_last;
uint64_t nid_max;
Throttle throttle_ops, throttle_bytes; ///< submit to commit
Finisher finisher;
KVSyncThread kv_sync_thread;
std::mutex kv_lock;
std::condition_variable kv_cond, kv_sync_cond;
bool kv_stop;
std::deque<TransContext*> kv_queue, kv_committing;
//Logger *logger;
PerfCounters *logger;
std::mutex reap_lock;
std::list<CollectionRef> removed_collections;
// --------------------------------------------------------
// private methods
void _init_logger();
void _shutdown_logger();
int _open_path();
void _close_path();
int _open_fsid(bool create);
int _lock_fsid();
int _read_fsid(uuid_d *f);
int _write_fsid();
void _close_fsid();
int _open_db(bool create);
void _close_db();
int _open_collections(int *errors=0);
void _close_collections();
int _open_super_meta();
CollectionRef _get_collection(coll_t cid);
void _queue_reap_collection(CollectionRef& c);
void _reap_collections();
void _assign_nid(TransContext *txc, OnodeRef o);
void _dump_onode(OnodeRef o);
TransContext *_txc_create(OpSequencer *osr);
void _txc_release(TransContext *txc, uint64_t offset, uint64_t length);
void _txc_add_transaction(TransContext *txc, Transaction *t);
void _txc_finalize(OpSequencer *osr, TransContext *txc);
void _txc_state_proc(TransContext *txc);
void _txc_finish_kv(TransContext *txc);
void _txc_finish(TransContext *txc);
void _osr_reap_done(OpSequencer *osr);
void _kv_sync_thread();
void _kv_stop() {
{
std::lock_guard<std::mutex> l(kv_lock);
kv_stop = true;
kv_cond.notify_all();
}
kv_sync_thread.join();
kv_stop = false;
}
void _do_read_stripe(OnodeRef o, uint64_t offset, ceph::buffer::list *pbl, bool do_cache);
void _do_write_stripe(TransContext *txc, OnodeRef o,
uint64_t offset, ceph::buffer::list& bl);
void _do_remove_stripe(TransContext *txc, OnodeRef o, uint64_t offset);
int _collection_list(
Collection *c, const ghobject_t& start, const ghobject_t& end,
int max, std::vector<ghobject_t> *ls, ghobject_t *next);
public:
KStore(CephContext *cct, const std::string& path);
~KStore() override;
std::string get_type() override {
return "kstore";
}
bool needs_journal() override { return false; };
bool wants_journal() override { return false; };
bool allows_journal() override { return false; };
static int get_block_device_fsid(const std::string& path, uuid_d *fsid);
bool test_mount_in_use() override;
int mount() override;
int umount() override;
void _sync();
int fsck(bool deep) override;
int validate_hobject_key(const hobject_t &obj) const override {
return 0;
}
unsigned get_max_attr_name_length() override {
return 256; // arbitrary; there is no real limit internally
}
int mkfs() override;
int mkjournal() override {
return 0;
}
void dump_perf_counters(ceph::Formatter *f) override {
f->open_object_section("perf_counters");
logger->dump_formatted(f, false, false);
f->close_section();
}
void get_db_statistics(ceph::Formatter *f) override {
db->get_statistics(f);
}
int statfs(struct store_statfs_t *buf,
osd_alert_list_t* alerts = nullptr) override;
int pool_statfs(uint64_t pool_id, struct store_statfs_t *buf,
bool *per_pool_omap) override;
CollectionHandle open_collection(const coll_t& c) override;
CollectionHandle create_new_collection(const coll_t& c) override;
void set_collection_commit_queue(const coll_t& cid,
ContextQueue *commit_queue) override {
}
using ObjectStore::exists;
bool exists(CollectionHandle& c, const ghobject_t& oid) override;
using ObjectStore::stat;
int stat(
CollectionHandle& c,
const ghobject_t& oid,
struct stat *st,
bool allow_eio = false) override; // struct stat?
int set_collection_opts(
CollectionHandle& c,
const pool_opts_t& opts) override;
using ObjectStore::read;
int read(
CollectionHandle& c,
const ghobject_t& oid,
uint64_t offset,
size_t len,
ceph::buffer::list& bl,
uint32_t op_flags = 0) override;
int _do_read(
OnodeRef o,
uint64_t offset,
size_t len,
ceph::buffer::list& bl,
bool do_cache,
uint32_t op_flags = 0);
using ObjectStore::fiemap;
int fiemap(CollectionHandle& c, const ghobject_t& oid, uint64_t offset, size_t len, std::map<uint64_t, uint64_t>& destmap) override;
int fiemap(CollectionHandle& c, const ghobject_t& oid, uint64_t offset, size_t len, ceph::buffer::list& outbl) override;
using ObjectStore::getattr;
int getattr(CollectionHandle& c, const ghobject_t& oid, const char *name, ceph::buffer::ptr& value) override;
using ObjectStore::getattrs;
int getattrs(CollectionHandle& c,
const ghobject_t& oid,
std::map<std::string,ceph::buffer::ptr,std::less<>>& aset) override;
int list_collections(std::vector<coll_t>& ls) override;
bool collection_exists(const coll_t& c) override;
int collection_empty(CollectionHandle& c, bool *empty) override;
int collection_bits(CollectionHandle& c) override;
int collection_list(
CollectionHandle &c, const ghobject_t& start, const ghobject_t& end,
int max,
std::vector<ghobject_t> *ls, ghobject_t *next) override;
using ObjectStore::omap_get;
int omap_get(
CollectionHandle& c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
ceph::buffer::list *header, ///< [out] omap header
std::map<std::string, ceph::buffer::list> *out /// < [out] Key to value std::map
) override;
using ObjectStore::omap_get_header;
/// Get omap header
int omap_get_header(
CollectionHandle& c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
ceph::buffer::list *header, ///< [out] omap header
bool allow_eio = false ///< [in] don't assert on eio
) override;
using ObjectStore::omap_get_keys;
/// Get keys defined on oid
int omap_get_keys(
CollectionHandle& c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
std::set<std::string> *keys ///< [out] Keys defined on oid
) override;
using ObjectStore::omap_get_values;
/// Get key values
int omap_get_values(
CollectionHandle& c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
const std::set<std::string> &keys, ///< [in] Keys to get
std::map<std::string, ceph::buffer::list> *out ///< [out] Returned keys and values
) override;
using ObjectStore::omap_check_keys;
/// Filters keys into out which are defined on oid
int omap_check_keys(
CollectionHandle& c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
const std::set<std::string> &keys, ///< [in] Keys to check
std::set<std::string> *out ///< [out] Subset of keys defined on oid
) override;
using ObjectStore::get_omap_iterator;
ObjectMap::ObjectMapIterator get_omap_iterator(
CollectionHandle& c, ///< [in] collection
const ghobject_t &oid ///< [in] object
) override;
void set_fsid(uuid_d u) override {
fsid = u;
}
uuid_d get_fsid() override {
return fsid;
}
uint64_t estimate_objects_overhead(uint64_t num_objects) override {
return num_objects * 300; //assuming per-object overhead is 300 bytes
}
objectstore_perf_stat_t get_cur_stats() override {
return objectstore_perf_stat_t();
}
const PerfCounters* get_perf_counters() const override {
return logger;
}
int queue_transactions(
CollectionHandle& ch,
std::vector<Transaction>& tls,
TrackedOpRef op = TrackedOpRef(),
ThreadPool::TPHandle *handle = NULL) override;
void compact () override {
ceph_assert(db);
db->compact();
}
private:
// --------------------------------------------------------
// write ops
int _write(TransContext *txc,
CollectionRef& c,
OnodeRef& o,
uint64_t offset, size_t len,
ceph::buffer::list& bl,
uint32_t fadvise_flags);
int _do_write(TransContext *txc,
OnodeRef o,
uint64_t offset, uint64_t length,
ceph::buffer::list& bl,
uint32_t fadvise_flags);
int _touch(TransContext *txc,
CollectionRef& c,
OnodeRef& o);
int _zero(TransContext *txc,
CollectionRef& c,
OnodeRef& o,
uint64_t offset, size_t len);
int _do_truncate(TransContext *txc,
OnodeRef o,
uint64_t offset);
int _truncate(TransContext *txc,
CollectionRef& c,
OnodeRef& o,
uint64_t offset);
int _remove(TransContext *txc,
CollectionRef& c,
OnodeRef& o);
int _do_remove(TransContext *txc,
OnodeRef o);
int _setattr(TransContext *txc,
CollectionRef& c,
OnodeRef& o,
const std::string& name,
ceph::buffer::ptr& val);
int _setattrs(TransContext *txc,
CollectionRef& c,
OnodeRef& o,
const std::map<std::string,ceph::buffer::ptr>& aset);
int _rmattr(TransContext *txc,
CollectionRef& c,
OnodeRef& o,
const std::string& name);
int _rmattrs(TransContext *txc,
CollectionRef& c,
OnodeRef& o);
void _do_omap_clear(TransContext *txc, uint64_t id);
int _omap_clear(TransContext *txc,
CollectionRef& c,
OnodeRef& o);
int _omap_setkeys(TransContext *txc,
CollectionRef& c,
OnodeRef& o,
ceph::buffer::list& bl);
int _omap_setheader(TransContext *txc,
CollectionRef& c,
OnodeRef& o,
ceph::buffer::list& header);
int _omap_rmkeys(TransContext *txc,
CollectionRef& c,
OnodeRef& o,
const ceph::buffer::list& bl);
int _omap_rmkey_range(TransContext *txc,
CollectionRef& c,
OnodeRef& o,
const std::string& first, const std::string& last);
int _setallochint(TransContext *txc,
CollectionRef& c,
OnodeRef& o,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags);
int _clone(TransContext *txc,
CollectionRef& c,
OnodeRef& oldo,
OnodeRef& newo);
int _clone_range(TransContext *txc,
CollectionRef& c,
OnodeRef& oldo,
OnodeRef& newo,
uint64_t srcoff, uint64_t length, uint64_t dstoff);
int _rename(TransContext *txc,
CollectionRef& c,
OnodeRef& oldo,
OnodeRef& newo,
const ghobject_t& new_oid);
int _create_collection(TransContext *txc, coll_t cid, unsigned bits,
CollectionRef *c);
int _remove_collection(TransContext *txc, coll_t cid, CollectionRef *c);
int _split_collection(TransContext *txc,
CollectionRef& c,
CollectionRef& d,
unsigned bits, int rem);
int _merge_collection(TransContext *txc,
CollectionRef *c,
CollectionRef& d,
unsigned bits);
};
static inline void intrusive_ptr_add_ref(KStore::Onode *o) {
o->get();
}
static inline void intrusive_ptr_release(KStore::Onode *o) {
o->put();
}
static inline void intrusive_ptr_add_ref(KStore::OpSequencer *o) {
o->get();
}
static inline void intrusive_ptr_release(KStore::OpSequencer *o) {
o->put();
}
#endif
| 19,661 | 27.088571 | 134 | h |
null | ceph-main/src/os/kstore/kstore_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_OSD_KSTORE_TYPES_H
#define CEPH_OSD_KSTORE_TYPES_H
#include <ostream>
#include "include/types.h"
#include "include/interval_set.h"
#include "include/utime.h"
#include "common/hobject.h"
namespace ceph {
class Formatter;
}
/// collection metadata
struct kstore_cnode_t {
uint32_t bits; ///< how many bits of coll pgid are significant
explicit kstore_cnode_t(int b=0) : bits(b) {}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& p);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<kstore_cnode_t*>& o);
};
WRITE_CLASS_ENCODER(kstore_cnode_t)
/// onode: per-object metadata
struct kstore_onode_t {
uint64_t nid; ///< numeric id (locally unique)
uint64_t size; ///< object size
std::map<std::string, ceph::buffer::ptr, std::less<>> attrs; ///< attrs
uint64_t omap_head; ///< id for omap root node
uint32_t stripe_size; ///< stripe size
uint32_t expected_object_size;
uint32_t expected_write_size;
uint32_t alloc_hint_flags;
kstore_onode_t()
: nid(0),
size(0),
omap_head(0),
stripe_size(0),
expected_object_size(0),
expected_write_size(0),
alloc_hint_flags(0) {}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& p);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<kstore_onode_t*>& o);
};
WRITE_CLASS_ENCODER(kstore_onode_t)
#endif
| 1,969 | 27.550725 | 80 | h |
null | ceph-main/src/os/memstore/MemStore.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013- Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MEMSTORE_H
#define CEPH_MEMSTORE_H
#include <atomic>
#include <mutex>
#include <boost/intrusive_ptr.hpp>
#include "include/unordered_map.h"
#include "common/Finisher.h"
#include "common/RefCountedObj.h"
#include "os/ObjectStore.h"
#include "PageSet.h"
#include "include/ceph_assert.h"
class MemStore : public ObjectStore {
public:
struct Object : public RefCountedObject {
ceph::mutex xattr_mutex{ceph::make_mutex("MemStore::Object::xattr_mutex")};
ceph::mutex omap_mutex{ceph::make_mutex("MemStore::Object::omap_mutex")};
std::map<std::string,ceph::buffer::ptr,std::less<>> xattr;
ceph::buffer::list omap_header;
std::map<std::string,ceph::buffer::list> omap;
using Ref = ceph::ref_t<Object>;
// interface for object data
virtual size_t get_size() const = 0;
virtual int read(uint64_t offset, uint64_t len, ceph::buffer::list &bl) = 0;
virtual int write(uint64_t offset, const ceph::buffer::list &bl) = 0;
virtual int clone(Object *src, uint64_t srcoff, uint64_t len,
uint64_t dstoff) = 0;
virtual int truncate(uint64_t offset) = 0;
virtual void encode(ceph::buffer::list& bl) const = 0;
virtual void decode(ceph::buffer::list::const_iterator& p) = 0;
void encode_base(ceph::buffer::list& bl) const {
using ceph::encode;
encode(xattr, bl);
encode(omap_header, bl);
encode(omap, bl);
}
void decode_base(ceph::buffer::list::const_iterator& p) {
using ceph::decode;
decode(xattr, p);
decode(omap_header, p);
decode(omap, p);
}
void dump(ceph::Formatter *f) const {
f->dump_int("data_len", get_size());
f->dump_int("omap_header_len", omap_header.length());
f->open_array_section("xattrs");
for (auto p = xattr.begin(); p != xattr.end(); ++p) {
f->open_object_section("xattr");
f->dump_string("name", p->first);
f->dump_int("length", p->second.length());
f->close_section();
}
f->close_section();
f->open_array_section("omap");
for (auto p = omap.begin(); p != omap.end(); ++p) {
f->open_object_section("pair");
f->dump_string("key", p->first);
f->dump_int("length", p->second.length());
f->close_section();
}
f->close_section();
}
protected:
Object() = default;
};
using ObjectRef = Object::Ref;
struct PageSetObject;
struct Collection : public CollectionImpl {
int bits = 0;
CephContext *cct;
bool use_page_set;
ceph::unordered_map<ghobject_t, ObjectRef> object_hash; ///< for lookup
std::map<ghobject_t, ObjectRef> object_map; ///< for iteration
std::map<std::string,ceph::buffer::ptr> xattr;
/// for object_{map,hash}
ceph::shared_mutex lock{
ceph::make_shared_mutex("MemStore::Collection::lock", true, false)};
bool exists = true;
ceph::mutex sequencer_mutex{
ceph::make_mutex("MemStore::Collection::sequencer_mutex")};
typedef boost::intrusive_ptr<Collection> Ref;
ObjectRef create_object() const;
// NOTE: The lock only needs to protect the object_map/hash, not the
// contents of individual objects. The osd is already sequencing
// reads and writes, so we will never see them concurrently at this
// level.
ObjectRef get_object(ghobject_t oid) {
std::shared_lock l{lock};
auto o = object_hash.find(oid);
if (o == object_hash.end())
return ObjectRef();
return o->second;
}
ObjectRef get_or_create_object(ghobject_t oid) {
std::lock_guard l{lock};
auto result = object_hash.emplace(oid, ObjectRef());
if (result.second)
object_map[oid] = result.first->second = create_object();
return result.first->second;
}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(xattr, bl);
encode(use_page_set, bl);
uint32_t s = object_map.size();
encode(s, bl);
for (auto p = object_map.begin(); p != object_map.end(); ++p) {
encode(p->first, bl);
p->second->encode(bl);
}
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& p) {
DECODE_START(1, p);
decode(xattr, p);
decode(use_page_set, p);
uint32_t s;
decode(s, p);
while (s--) {
ghobject_t k;
decode(k, p);
auto o = create_object();
o->decode(p);
object_map.insert(std::make_pair(k, o));
object_hash.insert(std::make_pair(k, o));
}
DECODE_FINISH(p);
}
uint64_t used_bytes() const {
uint64_t result = 0;
for (auto p = object_map.begin(); p != object_map.end(); ++p) {
result += p->second->get_size();
}
return result;
}
void flush() override {
}
bool flush_commit(Context *c) override {
return true;
}
private:
FRIEND_MAKE_REF(Collection);
explicit Collection(CephContext *cct, coll_t c)
: CollectionImpl(cct, c),
cct(cct),
use_page_set(cct->_conf->memstore_page_set) {}
};
typedef Collection::Ref CollectionRef;
private:
class OmapIteratorImpl;
ceph::unordered_map<coll_t, CollectionRef> coll_map;
/// rwlock to protect coll_map
ceph::shared_mutex coll_lock{
ceph::make_shared_mutex("MemStore::coll_lock")};
std::map<coll_t,CollectionRef> new_coll_map;
CollectionRef get_collection(const coll_t& cid);
Finisher finisher;
std::atomic<uint64_t> used_bytes;
void _do_transaction(Transaction& t);
int _touch(const coll_t& cid, const ghobject_t& oid);
int _write(const coll_t& cid, const ghobject_t& oid, uint64_t offset, size_t len,
const ceph::buffer::list& bl, uint32_t fadvise_flags = 0);
int _zero(const coll_t& cid, const ghobject_t& oid, uint64_t offset, size_t len);
int _truncate(const coll_t& cid, const ghobject_t& oid, uint64_t size);
int _remove(const coll_t& cid, const ghobject_t& oid);
int _setattrs(const coll_t& cid, const ghobject_t& oid, std::map<std::string,ceph::buffer::ptr>& aset);
int _rmattr(const coll_t& cid, const ghobject_t& oid, const char *name);
int _rmattrs(const coll_t& cid, const ghobject_t& oid);
int _clone(const coll_t& cid, const ghobject_t& oldoid, const ghobject_t& newoid);
int _clone_range(const coll_t& cid, const ghobject_t& oldoid,
const ghobject_t& newoid,
uint64_t srcoff, uint64_t len, uint64_t dstoff);
int _omap_clear(const coll_t& cid, const ghobject_t &oid);
int _omap_setkeys(const coll_t& cid, const ghobject_t &oid, ceph::buffer::list& aset_bl);
int _omap_rmkeys(const coll_t& cid, const ghobject_t &oid, ceph::buffer::list& keys_bl);
int _omap_rmkeyrange(const coll_t& cid, const ghobject_t &oid,
const std::string& first, const std::string& last);
int _omap_setheader(const coll_t& cid, const ghobject_t &oid, const ceph::buffer::list &bl);
int _collection_hint_expected_num_objs(const coll_t& cid, uint32_t pg_num,
uint64_t num_objs) const { return 0; }
int _create_collection(const coll_t& c, int bits);
int _destroy_collection(const coll_t& c);
int _collection_add(const coll_t& cid, const coll_t& ocid, const ghobject_t& oid);
int _collection_move_rename(const coll_t& oldcid, const ghobject_t& oldoid,
coll_t cid, const ghobject_t& o);
int _split_collection(const coll_t& cid, uint32_t bits, uint32_t rem, coll_t dest);
int _merge_collection(const coll_t& cid, uint32_t bits, coll_t dest);
int _save();
int _load();
void dump(ceph::Formatter *f);
void dump_all();
public:
MemStore(CephContext *cct, const std::string& path)
: ObjectStore(cct, path),
finisher(cct),
used_bytes(0) {}
~MemStore() override { }
std::string get_type() override {
return "memstore";
}
bool test_mount_in_use() override {
return false;
}
int mount() override;
int umount() override;
int fsck(bool deep) override {
return 0;
}
int validate_hobject_key(const hobject_t &obj) const override {
return 0;
}
unsigned get_max_attr_name_length() override {
return 256; // arbitrary; there is no real limit internally
}
int mkfs() override;
int mkjournal() override {
return 0;
}
bool wants_journal() override {
return false;
}
bool allows_journal() override {
return false;
}
bool needs_journal() override {
return false;
}
int get_devices(std::set<std::string> *ls) override {
// no devices for us!
return 0;
}
int statfs(struct store_statfs_t *buf,
osd_alert_list_t* alerts = nullptr) override;
int pool_statfs(uint64_t pool_id, struct store_statfs_t *buf,
bool *per_pool_omap) override;
bool exists(CollectionHandle &c, const ghobject_t& oid) override;
int stat(CollectionHandle &c, const ghobject_t& oid,
struct stat *st, bool allow_eio = false) override;
int set_collection_opts(
CollectionHandle& c,
const pool_opts_t& opts) override;
int read(
CollectionHandle &c,
const ghobject_t& oid,
uint64_t offset,
size_t len,
ceph::buffer::list& bl,
uint32_t op_flags = 0) override;
using ObjectStore::fiemap;
int fiemap(CollectionHandle& c, const ghobject_t& oid,
uint64_t offset, size_t len, ceph::buffer::list& bl) override;
int fiemap(CollectionHandle& c, const ghobject_t& oid, uint64_t offset,
size_t len, std::map<uint64_t, uint64_t>& destmap) override;
int getattr(CollectionHandle &c, const ghobject_t& oid, const char *name,
ceph::buffer::ptr& value) override;
int getattrs(CollectionHandle &c, const ghobject_t& oid,
std::map<std::string,ceph::buffer::ptr,std::less<>>& aset) override;
int list_collections(std::vector<coll_t>& ls) override;
CollectionHandle open_collection(const coll_t& c) override {
return get_collection(c);
}
CollectionHandle create_new_collection(const coll_t& c) override;
void set_collection_commit_queue(const coll_t& cid,
ContextQueue *commit_queue) override {
}
bool collection_exists(const coll_t& c) override;
int collection_empty(CollectionHandle& c, bool *empty) override;
int collection_bits(CollectionHandle& c) override;
int collection_list(CollectionHandle& cid,
const ghobject_t& start, const ghobject_t& end, int max,
std::vector<ghobject_t> *ls, ghobject_t *next) override;
using ObjectStore::omap_get;
int omap_get(
CollectionHandle& c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
ceph::buffer::list *header, ///< [out] omap header
std::map<std::string, ceph::buffer::list> *out /// < [out] Key to value map
) override;
using ObjectStore::omap_get_header;
/// Get omap header
int omap_get_header(
CollectionHandle& c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
ceph::buffer::list *header, ///< [out] omap header
bool allow_eio = false ///< [in] don't assert on eio
) override;
using ObjectStore::omap_get_keys;
/// Get keys defined on oid
int omap_get_keys(
CollectionHandle& c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
std::set<std::string> *keys ///< [out] Keys defined on oid
) override;
using ObjectStore::omap_get_values;
/// Get key values
int omap_get_values(
CollectionHandle& c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
const std::set<std::string> &keys, ///< [in] Keys to get
std::map<std::string, ceph::buffer::list> *out ///< [out] Returned keys and values
) override;
#ifdef WITH_SEASTAR
int omap_get_values(
CollectionHandle &c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
const std::optional<std::string> &start_after, ///< [in] Keys to get
std::map<std::string, ceph::buffer::list> *out ///< [out] Returned keys and values
) override;
#endif
using ObjectStore::omap_check_keys;
/// Filters keys into out which are defined on oid
int omap_check_keys(
CollectionHandle& c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
const std::set<std::string> &keys, ///< [in] Keys to check
std::set<std::string> *out ///< [out] Subset of keys defined on oid
) override;
using ObjectStore::get_omap_iterator;
ObjectMap::ObjectMapIterator get_omap_iterator(
CollectionHandle& c, ///< [in] collection
const ghobject_t &oid ///< [in] object
) override;
void set_fsid(uuid_d u) override;
uuid_d get_fsid() override;
uint64_t estimate_objects_overhead(uint64_t num_objects) override {
return 0; //do not care
}
objectstore_perf_stat_t get_cur_stats() override;
const PerfCounters* get_perf_counters() const override {
return nullptr;
}
int queue_transactions(
CollectionHandle& ch,
std::vector<Transaction>& tls,
TrackedOpRef op = TrackedOpRef(),
ThreadPool::TPHandle *handle = NULL) override;
};
#endif
| 13,569 | 31.698795 | 105 | h |
null | ceph-main/src/os/memstore/PageSet.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013- Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_PAGESET_H
#define CEPH_PAGESET_H
#include <algorithm>
#include <atomic>
#include <cassert>
#include <mutex>
#include <vector>
#include <boost/intrusive/avl_set.hpp>
#include <boost/intrusive_ptr.hpp>
#include "include/encoding.h"
struct Page {
char *const data;
boost::intrusive::avl_set_member_hook<> hook;
uint64_t offset;
// avoid RefCountedObject because it has a virtual destructor
std::atomic<uint16_t> nrefs;
void get() { ++nrefs; }
void put() { if (--nrefs == 0) delete this; }
typedef boost::intrusive_ptr<Page> Ref;
friend void intrusive_ptr_add_ref(Page *p) { p->get(); }
friend void intrusive_ptr_release(Page *p) { p->put(); }
// key-value comparison functor for avl
struct Less {
bool operator()(uint64_t offset, const Page &page) const {
return offset < page.offset;
}
bool operator()(const Page &page, uint64_t offset) const {
return page.offset < offset;
}
bool operator()(const Page &lhs, const Page &rhs) const {
return lhs.offset < rhs.offset;
}
};
void encode(ceph::buffer::list &bl, size_t page_size) const {
using ceph::encode;
bl.append(ceph::buffer::copy(data, page_size));
encode(offset, bl);
}
void decode(ceph::buffer::list::const_iterator &p, size_t page_size) {
using ceph::decode;
p.copy(page_size, data);
decode(offset, p);
}
static Ref create(size_t page_size, uint64_t offset = 0) {
// ensure proper alignment of the Page
const auto align = alignof(Page);
page_size = (page_size + align - 1) & ~(align - 1);
// allocate the Page and its data in a single buffer
auto buffer = new char[page_size + sizeof(Page)];
// place the Page structure at the end of the buffer
return new (buffer + page_size) Page(buffer, offset);
}
// copy disabled
Page(const Page&) = delete;
const Page& operator=(const Page&) = delete;
private: // private constructor, use create() instead
Page(char *data, uint64_t offset) : data(data), offset(offset), nrefs(1) {}
static void operator delete(void *p) {
delete[] reinterpret_cast<Page*>(p)->data;
}
};
class PageSet {
public:
// alloc_range() and get_range() return page refs in a vector
typedef std::vector<Page::Ref> page_vector;
private:
// store pages in a boost intrusive avl_set
typedef Page::Less page_cmp;
typedef boost::intrusive::member_hook<Page,
boost::intrusive::avl_set_member_hook<>,
&Page::hook> member_option;
typedef boost::intrusive::avl_set<Page,
boost::intrusive::compare<page_cmp>, member_option> page_set;
typedef typename page_set::iterator iterator;
page_set pages;
uint64_t page_size;
typedef std::mutex lock_type;
lock_type mutex;
void free_pages(iterator cur, iterator end) {
while (cur != end) {
Page *page = &*cur;
cur = pages.erase(cur);
page->put();
}
}
int count_pages(uint64_t offset, uint64_t len) const {
// count the overlapping pages
int count = 0;
if (offset % page_size) {
count++;
size_t rem = page_size - offset % page_size;
len = len <= rem ? 0 : len - rem;
}
count += len / page_size;
if (len % page_size)
count++;
return count;
}
public:
explicit PageSet(size_t page_size) : page_size(page_size) {}
PageSet(PageSet &&rhs)
: pages(std::move(rhs.pages)), page_size(rhs.page_size) {}
~PageSet() {
free_pages(pages.begin(), pages.end());
}
// disable copy
PageSet(const PageSet&) = delete;
const PageSet& operator=(const PageSet&) = delete;
bool empty() const { return pages.empty(); }
size_t size() const { return pages.size(); }
size_t get_page_size() const { return page_size; }
// allocate all pages that intersect the range [offset,length)
void alloc_range(uint64_t offset, uint64_t length, page_vector &range) {
// loop in reverse so we can provide hints to avl_set::insert_check()
// and get O(1) insertions after the first
uint64_t position = offset + length - 1;
range.resize(count_pages(offset, length));
auto out = range.rbegin();
std::lock_guard<lock_type> lock(mutex);
iterator cur = pages.end();
while (length) {
const uint64_t page_offset = position & ~(page_size-1);
typename page_set::insert_commit_data commit;
auto insert = pages.insert_check(cur, page_offset, page_cmp(), commit);
if (insert.second) {
auto page = Page::create(page_size, page_offset);
cur = pages.insert_commit(*page, commit);
// assume that the caller will write to the range [offset,length),
// so we only need to zero memory outside of this range
// zero end of page past offset + length
if (offset + length < page->offset + page_size)
std::fill(page->data + offset + length - page->offset,
page->data + page_size, 0);
// zero front of page between page_offset and offset
if (offset > page->offset)
std::fill(page->data, page->data + offset - page->offset, 0);
} else { // exists
cur = insert.first;
}
// add a reference to output vector
out->reset(&*cur);
++out;
auto c = std::min(length, (position & (page_size-1)) + 1);
position -= c;
length -= c;
}
// make sure we sized the vector correctly
ceph_assert(out == range.rend());
}
// return all allocated pages that intersect the range [offset,length)
void get_range(uint64_t offset, uint64_t length, page_vector &range) {
auto cur = pages.lower_bound(offset & ~(page_size-1), page_cmp());
while (cur != pages.end() && cur->offset < offset + length)
range.push_back(&*cur++);
}
void free_pages_after(uint64_t offset) {
std::lock_guard<lock_type> lock(mutex);
auto cur = pages.lower_bound(offset & ~(page_size-1), page_cmp());
if (cur == pages.end())
return;
if (cur->offset < offset)
cur++;
free_pages(cur, pages.end());
}
void encode(ceph::buffer::list &bl) const {
using ceph::encode;
encode(page_size, bl);
unsigned count = pages.size();
encode(count, bl);
for (auto p = pages.rbegin(); p != pages.rend(); ++p)
p->encode(bl, page_size);
}
void decode(ceph::buffer::list::const_iterator &p) {
using ceph::decode;
ceph_assert(empty());
decode(page_size, p);
unsigned count;
decode(count, p);
auto cur = pages.end();
for (unsigned i = 0; i < count; i++) {
auto page = Page::create(page_size);
page->decode(p, page_size);
cur = pages.insert_before(cur, *page);
}
}
};
#endif // CEPH_PAGESET_H
| 7,090 | 29.433476 | 77 | h |
null | ceph-main/src/osd/ClassHandler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLASSHANDLER_H
#define CEPH_CLASSHANDLER_H
#include <variant>
#include "include/types.h"
#include "include/common_fwd.h"
#include "common/ceph_mutex.h"
#include "objclass/objclass.h"
//forward declaration
class ClassHandler
{
public:
CephContext *cct;
struct ClassData;
struct ClassMethod {
const std::string name;
using func_t = std::variant<cls_method_cxx_call_t, cls_method_call_t>;
func_t func;
int flags = 0;
ClassData *cls = nullptr;
int exec(cls_method_context_t ctx,
ceph::bufferlist& indata,
ceph::bufferlist& outdata);
void unregister();
int get_flags() {
std::lock_guard l(cls->handler->mutex);
return flags;
}
ClassMethod(const char* name, func_t call, int flags, ClassData* cls)
: name{name}, func{call}, flags{flags}, cls{cls}
{}
};
struct ClassFilter {
ClassData *cls = nullptr;
std::string name;
cls_cxx_filter_factory_t fn = nullptr;
void unregister();
};
struct ClassData {
enum Status {
CLASS_UNKNOWN,
CLASS_MISSING, // missing
CLASS_MISSING_DEPS, // missing dependencies
CLASS_INITIALIZING, // calling init() right now
CLASS_OPEN, // initialized, usable
} status = CLASS_UNKNOWN;
std::string name;
ClassHandler *handler = nullptr;
void *handle = nullptr;
bool allowed = false;
std::map<std::string, ClassMethod> methods_map;
std::map<std::string, ClassFilter> filters_map;
std::set<ClassData *> dependencies; /* our dependencies */
std::set<ClassData *> missing_dependencies; /* only missing dependencies */
ClassMethod *_get_method(const std::string& mname);
ClassMethod *register_method(const char *mname,
int flags,
cls_method_call_t func);
ClassMethod *register_cxx_method(const char *mname,
int flags,
cls_method_cxx_call_t func);
void unregister_method(ClassMethod *method);
ClassFilter *register_cxx_filter(const std::string &filter_name,
cls_cxx_filter_factory_t fn);
void unregister_filter(ClassFilter *method);
ClassMethod *get_method(const std::string& mname) {
std::lock_guard l(handler->mutex);
return _get_method(mname);
}
int get_method_flags(const std::string& mname);
ClassFilter *get_filter(const std::string &filter_name) {
std::lock_guard l(handler->mutex);
if (auto i = filters_map.find(filter_name); i == filters_map.end()) {
return nullptr;
} else {
return &(i->second);
}
}
};
private:
std::map<std::string, ClassData> classes;
ClassData *_get_class(const std::string& cname, bool check_allowed);
int _load_class(ClassData *cls);
static bool in_class_list(const std::string& cname,
const std::string& list);
ceph::mutex mutex = ceph::make_mutex("ClassHandler");
public:
explicit ClassHandler(CephContext *cct) : cct(cct) {}
int open_all_classes();
int open_class(const std::string& cname, ClassData **pcls);
ClassData *register_class(const char *cname);
void unregister_class(ClassData *cls);
void shutdown();
static ClassHandler& get_instance();
};
#endif
| 3,455 | 26.212598 | 79 | h |
null | ceph-main/src/osd/DynamicPerfStats.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef DYNAMIC_PERF_STATS_H
#define DYNAMIC_PERF_STATS_H
#include "include/random.h"
#include "messages/MOSDOp.h"
#include "mgr/OSDPerfMetricTypes.h"
#include "osd/OSD.h"
#include "osd/OpRequest.h"
class DynamicPerfStats {
public:
DynamicPerfStats() {
}
DynamicPerfStats(const std::list<OSDPerfMetricQuery> &queries) {
for (auto &query : queries) {
data[query];
}
}
void merge(const DynamicPerfStats &dps) {
for (auto &query_it : dps.data) {
auto &query = query_it.first;
for (auto &key_it : query_it.second) {
auto &key = key_it.first;
auto counter_it = key_it.second.begin();
auto update_counter_fnc =
[&counter_it](const PerformanceCounterDescriptor &d,
PerformanceCounter *c) {
c->first += counter_it->first;
c->second += counter_it->second;
counter_it++;
};
ceph_assert(key_it.second.size() >= data[query][key].size());
query.update_counters(update_counter_fnc, &data[query][key]);
}
}
}
void set_queries(const std::list<OSDPerfMetricQuery> &queries) {
std::map<OSDPerfMetricQuery,
std::map<OSDPerfMetricKey, PerformanceCounters>> new_data;
for (auto &query : queries) {
std::swap(new_data[query], data[query]);
}
std::swap(data, new_data);
}
bool is_enabled() {
return !data.empty();
}
void add(const OSDService *osd, const pg_info_t &pg_info, const OpRequest& op,
uint64_t inb, uint64_t outb, const utime_t &latency) {
auto update_counter_fnc =
[&op, inb, outb, &latency](const PerformanceCounterDescriptor &d,
PerformanceCounter *c) {
ceph_assert(d.is_supported());
switch(d.type) {
case PerformanceCounterType::OPS:
c->first++;
return;
case PerformanceCounterType::WRITE_OPS:
if (op.may_write() || op.may_cache()) {
c->first++;
}
return;
case PerformanceCounterType::READ_OPS:
if (op.may_read()) {
c->first++;
}
return;
case PerformanceCounterType::BYTES:
c->first += inb + outb;
return;
case PerformanceCounterType::WRITE_BYTES:
if (op.may_write() || op.may_cache()) {
c->first += inb;
}
return;
case PerformanceCounterType::READ_BYTES:
if (op.may_read()) {
c->first += outb;
}
return;
case PerformanceCounterType::LATENCY:
c->first += latency.to_nsec();
c->second++;
return;
case PerformanceCounterType::WRITE_LATENCY:
if (op.may_write() || op.may_cache()) {
c->first += latency.to_nsec();
c->second++;
}
return;
case PerformanceCounterType::READ_LATENCY:
if (op.may_read()) {
c->first += latency.to_nsec();
c->second++;
}
return;
default:
ceph_abort_msg("unknown counter type");
}
};
auto get_subkey_fnc =
[&osd, &pg_info, &op](const OSDPerfMetricSubKeyDescriptor &d,
OSDPerfMetricSubKey *sub_key) {
ceph_assert(d.is_supported());
auto m = op.get_req<MOSDOp>();
std::string match_string;
switch(d.type) {
case OSDPerfMetricSubKeyType::CLIENT_ID:
match_string = stringify(m->get_reqid().name);
break;
case OSDPerfMetricSubKeyType::CLIENT_ADDRESS:
match_string = stringify(m->get_connection()->get_peer_addr());
break;
case OSDPerfMetricSubKeyType::POOL_ID:
match_string = stringify(m->get_spg().pool());
break;
case OSDPerfMetricSubKeyType::NAMESPACE:
match_string = m->get_hobj().nspace;
break;
case OSDPerfMetricSubKeyType::OSD_ID:
match_string = stringify(osd->get_nodeid());
break;
case OSDPerfMetricSubKeyType::PG_ID:
match_string = stringify(pg_info.pgid);
break;
case OSDPerfMetricSubKeyType::OBJECT_NAME:
match_string = m->get_oid().name;
break;
case OSDPerfMetricSubKeyType::SNAP_ID:
match_string = stringify(m->get_snapid());
break;
default:
ceph_abort_msg("unknown counter type");
}
std::smatch match;
if (!std::regex_search(match_string, match, d.regex)) {
return false;
}
if (match.size() <= 1) {
return false;
}
for (size_t i = 1; i < match.size(); i++) {
sub_key->push_back(match[i].str());
}
return true;
};
for (auto &it : data) {
auto &query = it.first;
OSDPerfMetricKey key;
if (query.get_key(get_subkey_fnc, &key)) {
query.update_counters(update_counter_fnc, &it.second[key]);
}
}
}
void add_to_reports(
const std::map<OSDPerfMetricQuery, OSDPerfMetricLimits> &limits,
std::map<OSDPerfMetricQuery, OSDPerfMetricReport> *reports) {
for (auto &it : data) {
auto &query = it.first;
auto limit_it = limits.find(query);
if (limit_it == limits.end()) {
continue;
}
auto &query_limits = limit_it->second;
auto &counters = it.second;
auto &report = (*reports)[query];
query.get_performance_counter_descriptors(
&report.performance_counter_descriptors);
auto &descriptors = report.performance_counter_descriptors;
ceph_assert(descriptors.size() > 0);
if (!is_limited(query_limits, counters.size())) {
for (auto &it_counters : counters) {
auto &bl = report.group_packed_performance_counters[it_counters.first];
query.pack_counters(it_counters.second, &bl);
}
continue;
}
for (auto &limit : query_limits) {
size_t index = 0;
for (; index < descriptors.size(); index++) {
if (descriptors[index] == limit.order_by) {
break;
}
}
if (index == descriptors.size()) {
// should not happen
continue;
}
// Weighted Random Sampling (Algorithm A-Chao):
// Select the first [0, max_count) samples, randomly replace
// with samples from [max_count, end) using weighted
// probability, and return [0, max_count) as the result.
ceph_assert(limit.max_count < counters.size());
typedef std::map<OSDPerfMetricKey, PerformanceCounters>::iterator
Iterator;
std::vector<Iterator> counter_iterators;
counter_iterators.reserve(limit.max_count);
Iterator it_counters = counters.begin();
uint64_t wsum = 0;
for (size_t i = 0; i < limit.max_count; i++) {
wsum += it_counters->second[index].first;
counter_iterators.push_back(it_counters++);
}
for (; it_counters != counters.end(); it_counters++) {
wsum += it_counters->second[index].first;
if (ceph::util::generate_random_number(0, wsum) <=
it_counters->second[index].first) {
auto i = ceph::util::generate_random_number(0, limit.max_count - 1);
counter_iterators[i] = it_counters;
}
}
for (auto it_counters : counter_iterators) {
auto &bl =
report.group_packed_performance_counters[it_counters->first];
if (bl.length() == 0) {
query.pack_counters(it_counters->second, &bl);
}
}
}
}
}
private:
static bool is_limited(const OSDPerfMetricLimits &limits,
size_t counters_size) {
if (limits.empty()) {
return false;
}
for (auto &limit : limits) {
if (limit.max_count >= counters_size) {
return false;
}
}
return true;
}
std::map<OSDPerfMetricQuery,
std::map<OSDPerfMetricKey, PerformanceCounters>> data;
};
#endif // DYNAMIC_PERF_STATS_H
| 8,446 | 30.518657 | 81 | h |
null | ceph-main/src/osd/ECBackend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef ECBACKEND_H
#define ECBACKEND_H
#include <boost/intrusive/set.hpp>
#include <boost/intrusive/list.hpp>
#include "OSD.h"
#include "PGBackend.h"
#include "erasure-code/ErasureCodeInterface.h"
#include "ECUtil.h"
#include "ECTransaction.h"
#include "ExtentCache.h"
//forward declaration
struct ECSubWrite;
struct ECSubWriteReply;
struct ECSubRead;
struct ECSubReadReply;
struct RecoveryMessages;
class ECBackend : public PGBackend {
public:
RecoveryHandle *open_recovery_op() override;
void run_recovery_op(
RecoveryHandle *h,
int priority
) override;
int recover_object(
const hobject_t &hoid,
eversion_t v,
ObjectContextRef head,
ObjectContextRef obc,
RecoveryHandle *h
) override;
bool _handle_message(
OpRequestRef op
) override;
bool can_handle_while_inactive(
OpRequestRef op
) override;
friend struct SubWriteApplied;
friend struct SubWriteCommitted;
void sub_write_committed(
ceph_tid_t tid,
eversion_t version,
eversion_t last_complete,
const ZTracer::Trace &trace);
void handle_sub_write(
pg_shard_t from,
OpRequestRef msg,
ECSubWrite &op,
const ZTracer::Trace &trace
);
void handle_sub_read(
pg_shard_t from,
const ECSubRead &op,
ECSubReadReply *reply,
const ZTracer::Trace &trace
);
void handle_sub_write_reply(
pg_shard_t from,
const ECSubWriteReply &op,
const ZTracer::Trace &trace
);
void handle_sub_read_reply(
pg_shard_t from,
ECSubReadReply &op,
RecoveryMessages *m,
const ZTracer::Trace &trace
);
/// @see ReadOp below
void check_recovery_sources(const OSDMapRef& osdmap) override;
void on_change() override;
void clear_recovery_state() override;
void dump_recovery_info(ceph::Formatter *f) const override;
void call_write_ordered(std::function<void(void)> &&cb) override;
void submit_transaction(
const hobject_t &hoid,
const object_stat_sum_t &delta_stats,
const eversion_t &at_version,
PGTransactionUPtr &&t,
const eversion_t &trim_to,
const eversion_t &min_last_complete_ondisk,
std::vector<pg_log_entry_t>&& log_entries,
std::optional<pg_hit_set_history_t> &hset_history,
Context *on_all_commit,
ceph_tid_t tid,
osd_reqid_t reqid,
OpRequestRef op
) override;
int objects_read_sync(
const hobject_t &hoid,
uint64_t off,
uint64_t len,
uint32_t op_flags,
ceph::buffer::list *bl) override;
/**
* Async read mechanism
*
* Async reads use the same async read mechanism as does recovery.
* CallClientContexts is responsible for reconstructing the response
* buffer as well as for calling the callbacks.
*
* One tricky bit is that two reads may possibly not read from the same
* std::set of replicas. This could result in two reads completing in the
* wrong (from the interface user's point of view) order. Thus, we
* maintain a queue of in progress reads (@see in_progress_client_reads)
* to ensure that we always call the completion callback in order.
*
* Another subtly is that while we may read a degraded object, we will
* still only perform a client read from shards in the acting std::set. This
* ensures that we won't ever have to restart a client initiated read in
* check_recovery_sources.
*/
void objects_read_and_reconstruct(
const std::map<hobject_t, std::list<boost::tuple<uint64_t, uint64_t, uint32_t> >
> &reads,
bool fast_read,
GenContextURef<std::map<hobject_t,std::pair<int, extent_map> > &&> &&func);
friend struct CallClientContexts;
struct ClientAsyncReadStatus {
unsigned objects_to_read;
GenContextURef<std::map<hobject_t,std::pair<int, extent_map> > &&> func;
std::map<hobject_t,std::pair<int, extent_map> > results;
explicit ClientAsyncReadStatus(
unsigned objects_to_read,
GenContextURef<std::map<hobject_t,std::pair<int, extent_map> > &&> &&func)
: objects_to_read(objects_to_read), func(std::move(func)) {}
void complete_object(
const hobject_t &hoid,
int err,
extent_map &&buffers) {
ceph_assert(objects_to_read);
--objects_to_read;
ceph_assert(!results.count(hoid));
results.emplace(hoid, std::make_pair(err, std::move(buffers)));
}
bool is_complete() const {
return objects_to_read == 0;
}
void run() {
func.release()->complete(std::move(results));
}
};
std::list<ClientAsyncReadStatus> in_progress_client_reads;
void objects_read_async(
const hobject_t &hoid,
const std::list<std::pair<boost::tuple<uint64_t, uint64_t, uint32_t>,
std::pair<ceph::buffer::list*, Context*> > > &to_read,
Context *on_complete,
bool fast_read = false) override;
template <typename Func>
void objects_read_async_no_cache(
const std::map<hobject_t,extent_set> &to_read,
Func &&on_complete) {
std::map<hobject_t,std::list<boost::tuple<uint64_t, uint64_t, uint32_t> > > _to_read;
for (auto &&hpair: to_read) {
auto &l = _to_read[hpair.first];
for (auto extent: hpair.second) {
l.emplace_back(extent.first, extent.second, 0);
}
}
objects_read_and_reconstruct(
_to_read,
false,
make_gen_lambda_context<
std::map<hobject_t,std::pair<int, extent_map> > &&, Func>(
std::forward<Func>(on_complete)));
}
void kick_reads() {
while (in_progress_client_reads.size() &&
in_progress_client_reads.front().is_complete()) {
in_progress_client_reads.front().run();
in_progress_client_reads.pop_front();
}
}
private:
friend struct ECRecoveryHandle;
uint64_t get_recovery_chunk_size() const {
return round_up_to(cct->_conf->osd_recovery_max_chunk,
sinfo.get_stripe_width());
}
void get_want_to_read_shards(std::set<int> *want_to_read) const {
const std::vector<int> &chunk_mapping = ec_impl->get_chunk_mapping();
for (int i = 0; i < (int)ec_impl->get_data_chunk_count(); ++i) {
int chunk = (int)chunk_mapping.size() > i ? chunk_mapping[i] : i;
want_to_read->insert(chunk);
}
}
/**
* Recovery
*
* Recovery uses the same underlying read mechanism as client reads
* with the slight difference that recovery reads may come from non
* acting shards. Thus, check_recovery_sources may wind up calling
* cancel_pull for a read originating with RecoveryOp.
*
* The recovery process is expressed as a state machine:
* - IDLE: Nothing is currently in progress, reads will be started and
* we will transition to READING
* - READING: We are awaiting a pending read op. Once complete, we will
* decode the buffers and proceed to WRITING
* - WRITING: We are awaiting a completed push. Once complete, we will
* either transition to COMPLETE or to IDLE to continue.
* - COMPLETE: complete
*
* We use the existing Push and PushReply messages and structures to
* handle actually shuffling the data over to the replicas. recovery_info
* and recovery_progress are expressed in terms of the logical offset
* space except for data_included which is in terms of the chunked object
* space (to match the passed buffer).
*
* xattrs are requested on the first read and used to initialize the
* object_context if missing on completion of the first read.
*
* In order to batch up reads and writes, we batch Push, PushReply,
* Transaction, and reads in a RecoveryMessages object which is passed
* among the recovery methods.
*/
struct RecoveryOp {
hobject_t hoid;
eversion_t v;
std::set<pg_shard_t> missing_on;
std::set<shard_id_t> missing_on_shards;
ObjectRecoveryInfo recovery_info;
ObjectRecoveryProgress recovery_progress;
enum state_t { IDLE, READING, WRITING, COMPLETE } state;
static const char* tostr(state_t state) {
switch (state) {
case ECBackend::RecoveryOp::IDLE:
return "IDLE";
case ECBackend::RecoveryOp::READING:
return "READING";
case ECBackend::RecoveryOp::WRITING:
return "WRITING";
case ECBackend::RecoveryOp::COMPLETE:
return "COMPLETE";
default:
ceph_abort();
return "";
}
}
// must be filled if state == WRITING
std::map<int, ceph::buffer::list> returned_data;
std::map<std::string, ceph::buffer::list, std::less<>> xattrs;
ECUtil::HashInfoRef hinfo;
ObjectContextRef obc;
std::set<pg_shard_t> waiting_on_pushes;
// valid in state READING
std::pair<uint64_t, uint64_t> extent_requested;
void dump(ceph::Formatter *f) const;
RecoveryOp() : state(IDLE) {}
};
friend ostream &operator<<(ostream &lhs, const RecoveryOp &rhs);
std::map<hobject_t, RecoveryOp> recovery_ops;
void continue_recovery_op(
RecoveryOp &op,
RecoveryMessages *m);
void dispatch_recovery_messages(RecoveryMessages &m, int priority);
friend struct OnRecoveryReadComplete;
void handle_recovery_read_complete(
const hobject_t &hoid,
boost::tuple<uint64_t, uint64_t, std::map<pg_shard_t, ceph::buffer::list> > &to_read,
std::optional<std::map<std::string, ceph::buffer::list, std::less<>> > attrs,
RecoveryMessages *m);
void handle_recovery_push(
const PushOp &op,
RecoveryMessages *m,
bool is_repair);
void handle_recovery_push_reply(
const PushReplyOp &op,
pg_shard_t from,
RecoveryMessages *m);
void get_all_avail_shards(
const hobject_t &hoid,
const std::set<pg_shard_t> &error_shards,
std::set<int> &have,
std::map<shard_id_t, pg_shard_t> &shards,
bool for_recovery);
public:
/**
* Low level async read mechanism
*
* To avoid duplicating the logic for requesting and waiting for
* multiple object shards, there is a common async read mechanism
* taking a std::map of hobject_t->read_request_t which defines callbacks
* taking read_result_ts as arguments.
*
* tid_to_read_map gives open read ops. check_recovery_sources uses
* shard_to_read_map and ReadOp::source_to_obj to restart reads
* involving down osds.
*
* The user is responsible for specifying replicas on which to read
* and for reassembling the buffer on the other side since client
* reads require the original object buffer while recovery only needs
* the missing pieces.
*
* Rather than handling reads on the primary directly, we simply send
* ourselves a message. This avoids a dedicated primary path for that
* part.
*/
struct read_result_t {
int r;
std::map<pg_shard_t, int> errors;
std::optional<std::map<std::string, ceph::buffer::list, std::less<>> > attrs;
std::list<
boost::tuple<
uint64_t, uint64_t, std::map<pg_shard_t, ceph::buffer::list> > > returned;
read_result_t() : r(0) {}
};
struct read_request_t {
const std::list<boost::tuple<uint64_t, uint64_t, uint32_t> > to_read;
std::map<pg_shard_t, std::vector<std::pair<int, int>>> need;
bool want_attrs;
GenContext<std::pair<RecoveryMessages *, read_result_t& > &> *cb;
read_request_t(
const std::list<boost::tuple<uint64_t, uint64_t, uint32_t> > &to_read,
const std::map<pg_shard_t, std::vector<std::pair<int, int>>> &need,
bool want_attrs,
GenContext<std::pair<RecoveryMessages *, read_result_t& > &> *cb)
: to_read(to_read), need(need), want_attrs(want_attrs),
cb(cb) {}
};
friend ostream &operator<<(ostream &lhs, const read_request_t &rhs);
struct ReadOp {
int priority;
ceph_tid_t tid;
OpRequestRef op; // may be null if not on behalf of a client
// True if redundant reads are issued, false otherwise,
// this is useful to tradeoff some resources (redundant ops) for
// low latency read, especially on relatively idle cluster
bool do_redundant_reads;
// True if reading for recovery which could possibly reading only a subset
// of the available shards.
bool for_recovery;
ZTracer::Trace trace;
std::map<hobject_t, std::set<int>> want_to_read;
std::map<hobject_t, read_request_t> to_read;
std::map<hobject_t, read_result_t> complete;
std::map<hobject_t, std::set<pg_shard_t>> obj_to_source;
std::map<pg_shard_t, std::set<hobject_t> > source_to_obj;
void dump(ceph::Formatter *f) const;
std::set<pg_shard_t> in_progress;
ReadOp(
int priority,
ceph_tid_t tid,
bool do_redundant_reads,
bool for_recovery,
OpRequestRef op,
std::map<hobject_t, std::set<int>> &&_want_to_read,
std::map<hobject_t, read_request_t> &&_to_read)
: priority(priority), tid(tid), op(op), do_redundant_reads(do_redundant_reads),
for_recovery(for_recovery), want_to_read(std::move(_want_to_read)),
to_read(std::move(_to_read)) {
for (auto &&hpair: to_read) {
auto &returned = complete[hpair.first].returned;
for (auto &&extent: hpair.second.to_read) {
returned.push_back(
boost::make_tuple(
extent.get<0>(),
extent.get<1>(),
std::map<pg_shard_t, ceph::buffer::list>()));
}
}
}
ReadOp() = delete;
ReadOp(const ReadOp &) = default;
ReadOp(ReadOp &&) = default;
};
friend struct FinishReadOp;
void filter_read_op(
const OSDMapRef& osdmap,
ReadOp &op);
void complete_read_op(ReadOp &rop, RecoveryMessages *m);
friend ostream &operator<<(ostream &lhs, const ReadOp &rhs);
std::map<ceph_tid_t, ReadOp> tid_to_read_map;
std::map<pg_shard_t, std::set<ceph_tid_t> > shard_to_read_map;
void start_read_op(
int priority,
std::map<hobject_t, std::set<int>> &want_to_read,
std::map<hobject_t, read_request_t> &to_read,
OpRequestRef op,
bool do_redundant_reads, bool for_recovery);
void do_read_op(ReadOp &rop);
int send_all_remaining_reads(
const hobject_t &hoid,
ReadOp &rop);
/**
* Client writes
*
* ECTransaction is responsible for generating a transaction for
* each shard to which we need to send the write. As required
* by the PGBackend interface, the ECBackend write mechanism
* passes trim information with the write and last_complete back
* with the reply.
*
* As with client reads, there is a possibility of out-of-order
* completions. Thus, callbacks and completion are called in order
* on the writing std::list.
*/
struct Op : boost::intrusive::list_base_hook<> {
/// From submit_transaction caller, describes operation
hobject_t hoid;
object_stat_sum_t delta_stats;
eversion_t version;
eversion_t trim_to;
std::optional<pg_hit_set_history_t> updated_hit_set_history;
std::vector<pg_log_entry_t> log_entries;
ceph_tid_t tid;
osd_reqid_t reqid;
ZTracer::Trace trace;
eversion_t roll_forward_to; /// Soon to be generated internally
/// Ancillary also provided from submit_transaction caller
std::map<hobject_t, ObjectContextRef> obc_map;
/// see call_write_ordered
std::list<std::function<void(void)> > on_write;
/// Generated internally
std::set<hobject_t> temp_added;
std::set<hobject_t> temp_cleared;
ECTransaction::WritePlan plan;
bool requires_rmw() const { return !plan.to_read.empty(); }
bool invalidates_cache() const { return plan.invalidates_cache; }
// must be true if requires_rmw(), must be false if invalidates_cache()
bool using_cache = true;
/// In progress read state;
std::map<hobject_t,extent_set> pending_read; // subset already being read
std::map<hobject_t,extent_set> remote_read; // subset we must read
std::map<hobject_t,extent_map> remote_read_result;
bool read_in_progress() const {
return !remote_read.empty() && remote_read_result.empty();
}
/// In progress write state.
std::set<pg_shard_t> pending_commit;
// we need pending_apply for pre-mimic peers so that we don't issue a
// read on a remote shard before it has applied a previous write. We can
// remove this after nautilus.
std::set<pg_shard_t> pending_apply;
bool write_in_progress() const {
return !pending_commit.empty() || !pending_apply.empty();
}
/// optional, may be null, for tracking purposes
OpRequestRef client_op;
/// pin for cache
ExtentCache::write_pin pin;
/// Callbacks
Context *on_all_commit = nullptr;
~Op() {
delete on_all_commit;
}
};
using op_list = boost::intrusive::list<Op>;
friend ostream &operator<<(ostream &lhs, const Op &rhs);
ExtentCache cache;
std::map<ceph_tid_t, Op> tid_to_op_map; /// Owns Op structure
/**
* We model the possible rmw states as a std::set of waitlists.
* All writes at this time complete in order, so a write blocked
* at waiting_state blocks all writes behind it as well (same for
* other states).
*
* Future work: We can break this up into a per-object pipeline
* (almost). First, provide an ordering token to submit_transaction
* and require that all operations within a single transaction take
* place on a subset of hobject_t space partitioned by that token
* (the hashid seem about right to me -- even works for temp objects
* if you recall that a temp object created for object head foo will
* only ever be referenced by other transactions on foo and aren't
* reused). Next, factor this part into a class and maintain one per
* ordering token. Next, fixup PrimaryLogPG's repop queue to be
* partitioned by ordering token. Finally, refactor the op pipeline
* so that the log entries passed into submit_transaction aren't
* versioned. We can't assign versions to them until we actually
* submit the operation. That's probably going to be the hard part.
*/
class pipeline_state_t {
enum {
CACHE_VALID = 0,
CACHE_INVALID = 1
} pipeline_state = CACHE_VALID;
public:
bool caching_enabled() const {
return pipeline_state == CACHE_VALID;
}
bool cache_invalid() const {
return !caching_enabled();
}
void invalidate() {
pipeline_state = CACHE_INVALID;
}
void clear() {
pipeline_state = CACHE_VALID;
}
friend ostream &operator<<(ostream &lhs, const pipeline_state_t &rhs);
} pipeline_state;
op_list waiting_state; /// writes waiting on pipe_state
op_list waiting_reads; /// writes waiting on partial stripe reads
op_list waiting_commit; /// writes waiting on initial commit
eversion_t completed_to;
eversion_t committed_to;
void start_rmw(Op *op, PGTransactionUPtr &&t);
bool try_state_to_reads();
bool try_reads_to_commit();
bool try_finish_rmw();
void check_ops();
ceph::ErasureCodeInterfaceRef ec_impl;
/**
* ECRecPred
*
* Determines the whether _have is sufficient to recover an object
*/
class ECRecPred : public IsPGRecoverablePredicate {
std::set<int> want;
ceph::ErasureCodeInterfaceRef ec_impl;
public:
explicit ECRecPred(ceph::ErasureCodeInterfaceRef ec_impl) : ec_impl(ec_impl) {
for (unsigned i = 0; i < ec_impl->get_chunk_count(); ++i) {
want.insert(i);
}
}
bool operator()(const std::set<pg_shard_t> &_have) const override {
std::set<int> have;
for (std::set<pg_shard_t>::const_iterator i = _have.begin();
i != _have.end();
++i) {
have.insert(i->shard);
}
std::map<int, std::vector<std::pair<int, int>>> min;
return ec_impl->minimum_to_decode(want, have, &min) == 0;
}
};
IsPGRecoverablePredicate *get_is_recoverable_predicate() const override {
return new ECRecPred(ec_impl);
}
int get_ec_data_chunk_count() const override {
return ec_impl->get_data_chunk_count();
}
int get_ec_stripe_chunk_size() const override {
return sinfo.get_chunk_size();
}
/**
* ECReadPred
*
* Determines the whether _have is sufficient to read an object
*/
class ECReadPred : public IsPGReadablePredicate {
pg_shard_t whoami;
ECRecPred rec_pred;
public:
ECReadPred(
pg_shard_t whoami,
ceph::ErasureCodeInterfaceRef ec_impl) : whoami(whoami), rec_pred(ec_impl) {}
bool operator()(const std::set<pg_shard_t> &_have) const override {
return _have.count(whoami) && rec_pred(_have);
}
};
IsPGReadablePredicate *get_is_readable_predicate() const override {
return new ECReadPred(get_parent()->whoami_shard(), ec_impl);
}
const ECUtil::stripe_info_t sinfo;
/// If modified, ensure that the ref is held until the update is applied
SharedPtrRegistry<hobject_t, ECUtil::HashInfo> unstable_hashinfo_registry;
ECUtil::HashInfoRef get_hash_info(const hobject_t &hoid, bool create = false,
const std::map<std::string, ceph::buffer::ptr, std::less<>> *attr = NULL);
public:
ECBackend(
PGBackend::Listener *pg,
const coll_t &coll,
ObjectStore::CollectionHandle &ch,
ObjectStore *store,
CephContext *cct,
ceph::ErasureCodeInterfaceRef ec_impl,
uint64_t stripe_width);
/// Returns to_read replicas sufficient to reconstruct want
int get_min_avail_to_read_shards(
const hobject_t &hoid, ///< [in] object
const std::set<int> &want, ///< [in] desired shards
bool for_recovery, ///< [in] true if we may use non-acting replicas
bool do_redundant_reads, ///< [in] true if we want to issue redundant reads to reduce latency
std::map<pg_shard_t, std::vector<std::pair<int, int>>> *to_read ///< [out] shards, corresponding subchunks to read
); ///< @return error code, 0 on success
int get_remaining_shards(
const hobject_t &hoid,
const std::set<int> &avail,
const std::set<int> &want,
const read_result_t &result,
std::map<pg_shard_t, std::vector<std::pair<int, int>>> *to_read,
bool for_recovery);
int objects_get_attrs(
const hobject_t &hoid,
std::map<std::string, ceph::buffer::list, std::less<>> *out) override;
void rollback_append(
const hobject_t &hoid,
uint64_t old_size,
ObjectStore::Transaction *t) override;
bool auto_repair_supported() const override { return true; }
int be_deep_scrub(
const hobject_t &poid,
ScrubMap &map,
ScrubMapBuilder &pos,
ScrubMap::object &o) override;
uint64_t be_get_ondisk_size(uint64_t logical_size) const final {
return sinfo.logical_to_next_chunk_offset(logical_size);
}
void _failed_push(const hobject_t &hoid,
std::pair<RecoveryMessages *, ECBackend::read_result_t &> &in);
};
ostream &operator<<(ostream &lhs, const ECBackend::pipeline_state_t &rhs);
#endif
| 22,919 | 32.313953 | 120 | h |
null | ceph-main/src/osd/ECMsgTypes.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef ECBMSGTYPES_H
#define ECBMSGTYPES_H
#include "osd_types.h"
#include "include/buffer.h"
#include "os/ObjectStore.h"
#include "boost/tuple/tuple.hpp"
struct ECSubWrite {
pg_shard_t from;
ceph_tid_t tid;
osd_reqid_t reqid;
hobject_t soid;
pg_stat_t stats;
ObjectStore::Transaction t;
eversion_t at_version;
eversion_t trim_to;
eversion_t roll_forward_to;
std::vector<pg_log_entry_t> log_entries;
std::set<hobject_t> temp_added;
std::set<hobject_t> temp_removed;
std::optional<pg_hit_set_history_t> updated_hit_set_history;
bool backfill_or_async_recovery = false;
ECSubWrite() : tid(0) {}
ECSubWrite(
pg_shard_t from,
ceph_tid_t tid,
osd_reqid_t reqid,
hobject_t soid,
const pg_stat_t &stats,
const ObjectStore::Transaction &t,
eversion_t at_version,
eversion_t trim_to,
eversion_t roll_forward_to,
std::vector<pg_log_entry_t> log_entries,
std::optional<pg_hit_set_history_t> updated_hit_set_history,
const std::set<hobject_t> &temp_added,
const std::set<hobject_t> &temp_removed,
bool backfill_or_async_recovery)
: from(from), tid(tid), reqid(reqid),
soid(soid), stats(stats), t(t),
at_version(at_version),
trim_to(trim_to), roll_forward_to(roll_forward_to),
log_entries(log_entries),
temp_added(temp_added),
temp_removed(temp_removed),
updated_hit_set_history(updated_hit_set_history),
backfill_or_async_recovery(backfill_or_async_recovery)
{}
void claim(ECSubWrite &other) {
from = other.from;
tid = other.tid;
reqid = other.reqid;
soid = other.soid;
stats = other.stats;
t.swap(other.t);
at_version = other.at_version;
trim_to = other.trim_to;
roll_forward_to = other.roll_forward_to;
log_entries.swap(other.log_entries);
temp_added.swap(other.temp_added);
temp_removed.swap(other.temp_removed);
updated_hit_set_history = other.updated_hit_set_history;
backfill_or_async_recovery = other.backfill_or_async_recovery;
}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<ECSubWrite*>& o);
private:
// no outside copying -- slow
ECSubWrite(ECSubWrite& other);
const ECSubWrite& operator=(const ECSubWrite& other);
};
WRITE_CLASS_ENCODER(ECSubWrite)
struct ECSubWriteReply {
pg_shard_t from;
ceph_tid_t tid;
eversion_t last_complete;
bool committed;
bool applied;
ECSubWriteReply() : tid(0), committed(false), applied(false) {}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<ECSubWriteReply*>& o);
};
WRITE_CLASS_ENCODER(ECSubWriteReply)
struct ECSubRead {
pg_shard_t from;
ceph_tid_t tid;
std::map<hobject_t, std::list<boost::tuple<uint64_t, uint64_t, uint32_t> >> to_read;
std::set<hobject_t> attrs_to_read;
std::map<hobject_t, std::vector<std::pair<int, int>>> subchunks;
void encode(ceph::buffer::list &bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<ECSubRead*>& o);
};
WRITE_CLASS_ENCODER_FEATURES(ECSubRead)
struct ECSubReadReply {
pg_shard_t from;
ceph_tid_t tid;
std::map<hobject_t, std::list<std::pair<uint64_t, ceph::buffer::list> >> buffers_read;
std::map<hobject_t, std::map<std::string, ceph::buffer::list, std::less<>>> attrs_read;
std::map<hobject_t, int> errors;
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<ECSubReadReply*>& o);
};
WRITE_CLASS_ENCODER(ECSubReadReply)
std::ostream &operator<<(
std::ostream &lhs, const ECSubWrite &rhs);
std::ostream &operator<<(
std::ostream &lhs, const ECSubWriteReply &rhs);
std::ostream &operator<<(
std::ostream &lhs, const ECSubRead &rhs);
std::ostream &operator<<(
std::ostream &lhs, const ECSubReadReply &rhs);
#endif
| 4,596 | 31.602837 | 89 | h |
null | ceph-main/src/osd/ECTransaction.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef ECTRANSACTION_H
#define ECTRANSACTION_H
#include "OSD.h"
#include "PGBackend.h"
#include "ECUtil.h"
#include "erasure-code/ErasureCodeInterface.h"
#include "PGTransaction.h"
#include "ExtentCache.h"
namespace ECTransaction {
struct WritePlan {
PGTransactionUPtr t;
bool invalidates_cache = false; // Yes, both are possible
std::map<hobject_t,extent_set> to_read;
std::map<hobject_t,extent_set> will_write; // superset of to_read
std::map<hobject_t,ECUtil::HashInfoRef> hash_infos;
};
bool requires_overwrite(
uint64_t prev_size,
const PGTransaction::ObjectOperation &op);
template <typename F>
WritePlan get_write_plan(
const ECUtil::stripe_info_t &sinfo,
PGTransactionUPtr &&t,
F &&get_hinfo,
DoutPrefixProvider *dpp) {
WritePlan plan;
t->safe_create_traverse(
[&](std::pair<const hobject_t, PGTransaction::ObjectOperation> &i) {
ECUtil::HashInfoRef hinfo = get_hinfo(i.first);
plan.hash_infos[i.first] = hinfo;
uint64_t projected_size =
hinfo->get_projected_total_logical_size(sinfo);
if (i.second.deletes_first()) {
ldpp_dout(dpp, 20) << __func__ << ": delete, setting projected size"
<< " to 0" << dendl;
projected_size = 0;
}
hobject_t source;
if (i.second.has_source(&source)) {
plan.invalidates_cache = true;
ECUtil::HashInfoRef shinfo = get_hinfo(source);
projected_size = shinfo->get_projected_total_logical_size(sinfo);
plan.hash_infos[source] = shinfo;
}
auto &will_write = plan.will_write[i.first];
if (i.second.truncate &&
i.second.truncate->first < projected_size) {
if (!(sinfo.logical_offset_is_stripe_aligned(
i.second.truncate->first))) {
plan.to_read[i.first].union_insert(
sinfo.logical_to_prev_stripe_offset(i.second.truncate->first),
sinfo.get_stripe_width());
ldpp_dout(dpp, 20) << __func__ << ": unaligned truncate" << dendl;
will_write.union_insert(
sinfo.logical_to_prev_stripe_offset(i.second.truncate->first),
sinfo.get_stripe_width());
}
projected_size = sinfo.logical_to_next_stripe_offset(
i.second.truncate->first);
}
extent_set raw_write_set;
for (auto &&extent: i.second.buffer_updates) {
using BufferUpdate = PGTransaction::ObjectOperation::BufferUpdate;
if (boost::get<BufferUpdate::CloneRange>(&(extent.get_val()))) {
ceph_assert(
0 ==
"CloneRange is not allowed, do_op should have returned ENOTSUPP");
}
raw_write_set.insert(extent.get_off(), extent.get_len());
}
auto orig_size = projected_size;
for (auto extent = raw_write_set.begin();
extent != raw_write_set.end();
++extent) {
uint64_t head_start =
sinfo.logical_to_prev_stripe_offset(extent.get_start());
uint64_t head_finish =
sinfo.logical_to_next_stripe_offset(extent.get_start());
if (head_start > projected_size) {
head_start = projected_size;
}
if (head_start != head_finish &&
head_start < orig_size) {
ceph_assert(head_finish <= orig_size);
ceph_assert(head_finish - head_start == sinfo.get_stripe_width());
ldpp_dout(dpp, 20) << __func__ << ": reading partial head stripe "
<< head_start << "~" << sinfo.get_stripe_width()
<< dendl;
plan.to_read[i.first].union_insert(
head_start, sinfo.get_stripe_width());
}
uint64_t tail_start =
sinfo.logical_to_prev_stripe_offset(
extent.get_start() + extent.get_len());
uint64_t tail_finish =
sinfo.logical_to_next_stripe_offset(
extent.get_start() + extent.get_len());
if (tail_start != tail_finish &&
(head_start == head_finish || tail_start != head_start) &&
tail_start < orig_size) {
ceph_assert(tail_finish <= orig_size);
ceph_assert(tail_finish - tail_start == sinfo.get_stripe_width());
ldpp_dout(dpp, 20) << __func__ << ": reading partial tail stripe "
<< tail_start << "~" << sinfo.get_stripe_width()
<< dendl;
plan.to_read[i.first].union_insert(
tail_start, sinfo.get_stripe_width());
}
if (head_start != tail_finish) {
ceph_assert(
sinfo.logical_offset_is_stripe_aligned(
tail_finish - head_start)
);
will_write.union_insert(
head_start, tail_finish - head_start);
if (tail_finish > projected_size)
projected_size = tail_finish;
} else {
ceph_assert(tail_finish <= projected_size);
}
}
if (i.second.truncate &&
i.second.truncate->second > projected_size) {
uint64_t truncating_to =
sinfo.logical_to_next_stripe_offset(i.second.truncate->second);
ldpp_dout(dpp, 20) << __func__ << ": truncating out to "
<< truncating_to
<< dendl;
will_write.union_insert(projected_size,
truncating_to - projected_size);
projected_size = truncating_to;
}
ldpp_dout(dpp, 20) << __func__ << ": " << i.first
<< " projected size "
<< projected_size
<< dendl;
hinfo->set_projected_total_logical_size(
sinfo,
projected_size);
/* validate post conditions:
* to_read should have an entry for i.first iff it isn't empty
* and if we are reading from i.first, we can't be renaming or
* cloning it */
ceph_assert(plan.to_read.count(i.first) == 0 ||
(!plan.to_read.at(i.first).empty() &&
!i.second.has_source()));
});
plan.t = std::move(t);
return plan;
}
void generate_transactions(
WritePlan &plan,
ceph::ErasureCodeInterfaceRef &ecimpl,
pg_t pgid,
const ECUtil::stripe_info_t &sinfo,
const std::map<hobject_t,extent_map> &partial_extents,
std::vector<pg_log_entry_t> &entries,
std::map<hobject_t,extent_map> *written,
std::map<shard_id_t, ObjectStore::Transaction> *transactions,
std::set<hobject_t> *temp_added,
std::set<hobject_t> *temp_removed,
DoutPrefixProvider *dpp,
const ceph_release_t require_osd_release = ceph_release_t::unknown);
};
#endif
| 6,342 | 30.557214 | 74 | h |
null | ceph-main/src/osd/ECUtil.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef ECUTIL_H
#define ECUTIL_H
#include <ostream>
#include "erasure-code/ErasureCodeInterface.h"
#include "include/buffer_fwd.h"
#include "include/ceph_assert.h"
#include "include/encoding.h"
#include "common/Formatter.h"
namespace ECUtil {
class stripe_info_t {
const uint64_t stripe_width;
const uint64_t chunk_size;
public:
stripe_info_t(uint64_t stripe_size, uint64_t stripe_width)
: stripe_width(stripe_width),
chunk_size(stripe_width / stripe_size) {
ceph_assert(stripe_width % stripe_size == 0);
}
bool logical_offset_is_stripe_aligned(uint64_t logical) const {
return (logical % stripe_width) == 0;
}
uint64_t get_stripe_width() const {
return stripe_width;
}
uint64_t get_chunk_size() const {
return chunk_size;
}
uint64_t logical_to_prev_chunk_offset(uint64_t offset) const {
return (offset / stripe_width) * chunk_size;
}
uint64_t logical_to_next_chunk_offset(uint64_t offset) const {
return ((offset + stripe_width - 1)/ stripe_width) * chunk_size;
}
uint64_t logical_to_prev_stripe_offset(uint64_t offset) const {
return offset - (offset % stripe_width);
}
uint64_t logical_to_next_stripe_offset(uint64_t offset) const {
return ((offset % stripe_width) ?
(offset - (offset % stripe_width) + stripe_width) :
offset);
}
uint64_t aligned_logical_offset_to_chunk_offset(uint64_t offset) const {
ceph_assert(offset % stripe_width == 0);
return (offset / stripe_width) * chunk_size;
}
uint64_t aligned_chunk_offset_to_logical_offset(uint64_t offset) const {
ceph_assert(offset % chunk_size == 0);
return (offset / chunk_size) * stripe_width;
}
std::pair<uint64_t, uint64_t> aligned_offset_len_to_chunk(
std::pair<uint64_t, uint64_t> in) const {
return std::make_pair(
aligned_logical_offset_to_chunk_offset(in.first),
aligned_logical_offset_to_chunk_offset(in.second));
}
std::pair<uint64_t, uint64_t> offset_len_to_stripe_bounds(
std::pair<uint64_t, uint64_t> in) const {
uint64_t off = logical_to_prev_stripe_offset(in.first);
uint64_t len = logical_to_next_stripe_offset(
(in.first - off) + in.second);
return std::make_pair(off, len);
}
};
int decode(
const stripe_info_t &sinfo,
ceph::ErasureCodeInterfaceRef &ec_impl,
std::map<int, ceph::buffer::list> &to_decode,
ceph::buffer::list *out);
int decode(
const stripe_info_t &sinfo,
ceph::ErasureCodeInterfaceRef &ec_impl,
std::map<int, ceph::buffer::list> &to_decode,
std::map<int, ceph::buffer::list*> &out);
int encode(
const stripe_info_t &sinfo,
ceph::ErasureCodeInterfaceRef &ec_impl,
ceph::buffer::list &in,
const std::set<int> &want,
std::map<int, ceph::buffer::list> *out);
class HashInfo {
uint64_t total_chunk_size = 0;
std::vector<uint32_t> cumulative_shard_hashes;
// purely ephemeral, represents the size once all in-flight ops commit
uint64_t projected_total_chunk_size = 0;
public:
HashInfo() {}
explicit HashInfo(unsigned num_chunks) :
cumulative_shard_hashes(num_chunks, -1) {}
void append(uint64_t old_size, std::map<int, ceph::buffer::list> &to_append);
void clear() {
total_chunk_size = 0;
cumulative_shard_hashes = std::vector<uint32_t>(
cumulative_shard_hashes.size(),
-1);
}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<HashInfo*>& o);
uint32_t get_chunk_hash(int shard) const {
ceph_assert((unsigned)shard < cumulative_shard_hashes.size());
return cumulative_shard_hashes[shard];
}
uint64_t get_total_chunk_size() const {
return total_chunk_size;
}
uint64_t get_projected_total_chunk_size() const {
return projected_total_chunk_size;
}
uint64_t get_total_logical_size(const stripe_info_t &sinfo) const {
return get_total_chunk_size() *
(sinfo.get_stripe_width()/sinfo.get_chunk_size());
}
uint64_t get_projected_total_logical_size(const stripe_info_t &sinfo) const {
return get_projected_total_chunk_size() *
(sinfo.get_stripe_width()/sinfo.get_chunk_size());
}
void set_projected_total_logical_size(
const stripe_info_t &sinfo,
uint64_t logical_size) {
ceph_assert(sinfo.logical_offset_is_stripe_aligned(logical_size));
projected_total_chunk_size = sinfo.aligned_logical_offset_to_chunk_offset(
logical_size);
}
void set_total_chunk_size_clear_hash(uint64_t new_chunk_size) {
cumulative_shard_hashes.clear();
total_chunk_size = new_chunk_size;
}
bool has_chunk_hash() const {
return !cumulative_shard_hashes.empty();
}
void update_to(const HashInfo &rhs) {
auto ptcs = projected_total_chunk_size;
*this = rhs;
projected_total_chunk_size = ptcs;
}
friend std::ostream& operator<<(std::ostream& out, const HashInfo& hi);
};
typedef std::shared_ptr<HashInfo> HashInfoRef;
bool is_hinfo_key_string(const std::string &key);
const std::string &get_hinfo_key();
WRITE_CLASS_ENCODER(ECUtil::HashInfo)
}
#endif
| 5,509 | 31.411765 | 79 | h |
null | ceph-main/src/osd/ExtentCache.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef EXTENT_CACHE_H
#define EXTENT_CACHE_H
#include <map>
#include <list>
#include <vector>
#include <utility>
#include <optional>
#include <boost/intrusive/set.hpp>
#include <boost/intrusive/list.hpp>
#include "include/interval_set.h"
#include "common/interval_map.h"
#include "include/buffer.h"
#include "common/hobject.h"
/**
ExtentCache
The main purpose of this cache is to ensure that we can pipeline
overlapping partial overwrites.
To that end we need to ensure that an extent pinned for an operation is
live until that operation completes. However, a particular extent
might be pinned by multiple operations (several pipelined writes
on the same object).
1) When we complete an operation, we only look at extents owned only
by that operation.
2) Per-extent overhead is fixed size.
2) Per-operation metadata is fixed size.
This is simple enough to realize with two main structures:
- extent: contains a pointer to the pin owning it and intrusive list
pointers to other extents owned by the same pin
- pin_state: contains the list head for extents owned by it
This works as long as we only need to remember one "owner" for
each extent. To make this work, we'll need to leverage some
invariants guaranteed by higher layers:
1) Writes on a particular object must be ordered
2) A particular object will have outstanding reads or writes, but not
both (note that you can have a read while a write is committed, but
not applied).
Our strategy therefore will be to have whichever in-progress op will
finish "last" be the owner of a particular extent. For now, we won't
cache reads, so 2) simply means that we can assume that reads and
recovery operations imply no unstable extents on the object in
question.
Write: WaitRead -> WaitCommit -> Complete
Invariant 1) above actually indicates that we can't have writes
bypassing the WaitRead state while there are writes waiting on
Reads. Thus, the set of operations pinning a particular extent
must always complete in order or arrival.
This suggests that a particular extent may be in only the following
states:
0) Empty (not in the map at all)
1) Write Pending N
- Some write with reqid <= N is currently fetching the data for
this extent
- The extent must persist until Write reqid N completes
- All ops pinning this extent are writes in the WaitRead state of
the Write pipeline (there must be an in progress write, so no
reads can be in progress).
2) Write Pinned N:
- This extent has data corresponding to some reqid M <= N
- The extent must persist until Write reqid N commits
- All ops pinning this extent are writes in some Write
state (all are possible). Reads are not possible
in this state (or the others) due to 2).
All of the above suggests that there are 3 things users can
ask of the cache corresponding to the 3 Write pipelines
states.
*/
/// If someone wants these types, but not ExtentCache, move to another file
struct bl_split_merge {
ceph::buffer::list split(
uint64_t offset,
uint64_t length,
ceph::buffer::list &bl) const {
ceph::buffer::list out;
out.substr_of(bl, offset, length);
return out;
}
bool can_merge(const ceph::buffer::list &left, const ceph::buffer::list &right) const {
return true;
}
ceph::buffer::list merge(ceph::buffer::list &&left, ceph::buffer::list &&right) const {
ceph::buffer::list bl{std::move(left)};
bl.claim_append(right);
return bl;
}
uint64_t length(const ceph::buffer::list &b) const { return b.length(); }
};
using extent_set = interval_set<uint64_t>;
using extent_map = interval_map<uint64_t, ceph::buffer::list, bl_split_merge>;
class ExtentCache {
struct object_extent_set;
struct pin_state;
private:
struct extent {
object_extent_set *parent_extent_set = nullptr;
pin_state *parent_pin_state = nullptr;
boost::intrusive::set_member_hook<> extent_set_member;
boost::intrusive::list_member_hook<> pin_list_member;
uint64_t offset;
uint64_t length;
std::optional<ceph::buffer::list> bl;
uint64_t get_length() const {
return length;
}
bool is_pending() const {
return bl == std::nullopt;
}
bool pinned_by_write() const {
ceph_assert(parent_pin_state);
return parent_pin_state->is_write();
}
uint64_t pin_tid() const {
ceph_assert(parent_pin_state);
return parent_pin_state->tid;
}
extent(uint64_t offset, ceph::buffer::list _bl)
: offset(offset), length(_bl.length()), bl(_bl) {}
extent(uint64_t offset, uint64_t length)
: offset(offset), length(length) {}
bool operator<(const extent &rhs) const {
return offset < rhs.offset;
}
private:
// can briefly violate the two link invariant, used in unlink() and move()
void _link_pin_state(pin_state &pin_state);
void _unlink_pin_state();
public:
void unlink();
void link(object_extent_set &parent_extent_set, pin_state &pin_state);
void move(pin_state &to);
};
struct object_extent_set : boost::intrusive::set_base_hook<> {
hobject_t oid;
explicit object_extent_set(const hobject_t &oid) : oid(oid) {}
using set_member_options = boost::intrusive::member_hook<
extent,
boost::intrusive::set_member_hook<>,
&extent::extent_set_member>;
using set = boost::intrusive::set<extent, set_member_options>;
set extent_set;
bool operator<(const object_extent_set &rhs) const {
return oid < rhs.oid;
}
struct uint_cmp {
bool operator()(uint64_t lhs, const extent &rhs) const {
return lhs < rhs.offset;
}
bool operator()(const extent &lhs, uint64_t rhs) const {
return lhs.offset < rhs;
}
};
std::pair<set::iterator, set::iterator> get_containing_range(
uint64_t offset, uint64_t length);
void erase(uint64_t offset, uint64_t length);
struct update_action {
enum type {
NONE,
UPDATE_PIN
};
type action = NONE;
std::optional<ceph::buffer::list> bl;
};
template <typename F>
void traverse_update(
pin_state &pin,
uint64_t offset,
uint64_t length,
F &&f) {
auto range = get_containing_range(offset, length);
if (range.first == range.second || range.first->offset > offset) {
uint64_t extlen = range.first == range.second ?
length : range.first->offset - offset;
update_action action;
f(offset, extlen, nullptr, &action);
ceph_assert(!action.bl || action.bl->length() == extlen);
if (action.action == update_action::UPDATE_PIN) {
extent *ext = action.bl ?
new extent(offset, *action.bl) :
new extent(offset, extlen);
ext->link(*this, pin);
} else {
ceph_assert(!action.bl);
}
}
for (auto p = range.first; p != range.second;) {
extent *ext = &*p;
++p;
uint64_t extoff = std::max(ext->offset, offset);
uint64_t extlen = std::min(
ext->length - (extoff - ext->offset),
offset + length - extoff);
update_action action;
f(extoff, extlen, ext, &action);
ceph_assert(!action.bl || action.bl->length() == extlen);
extent *final_extent = nullptr;
if (action.action == update_action::NONE) {
final_extent = ext;
} else {
pin_state *ps = ext->parent_pin_state;
ext->unlink();
if ((ext->offset < offset) &&
(ext->offset + ext->get_length() > offset)) {
extent *head = nullptr;
if (ext->bl) {
ceph::buffer::list bl;
bl.substr_of(
*(ext->bl),
0,
offset - ext->offset);
head = new extent(ext->offset, bl);
} else {
head = new extent(
ext->offset, offset - ext->offset);
}
head->link(*this, *ps);
}
if ((ext->offset + ext->length > offset + length) &&
(offset + length > ext->offset)) {
uint64_t nlen =
(ext->offset + ext->get_length()) - (offset + length);
extent *tail = nullptr;
if (ext->bl) {
ceph::buffer::list bl;
bl.substr_of(
*(ext->bl),
ext->get_length() - nlen,
nlen);
tail = new extent(offset + length, bl);
} else {
tail = new extent(offset + length, nlen);
}
tail->link(*this, *ps);
}
if (action.action == update_action::UPDATE_PIN) {
if (ext->bl) {
ceph::buffer::list bl;
bl.substr_of(
*(ext->bl),
extoff - ext->offset,
extlen);
final_extent = new ExtentCache::extent(
extoff,
bl);
} else {
final_extent = new ExtentCache::extent(
extoff, extlen);
}
final_extent->link(*this, pin);
}
delete ext;
}
if (action.bl) {
ceph_assert(final_extent);
ceph_assert(final_extent->length == action.bl->length());
final_extent->bl = *(action.bl);
}
uint64_t next_off = p == range.second ?
offset + length : p->offset;
if (extoff + extlen < next_off) {
uint64_t tailoff = extoff + extlen;
uint64_t taillen = next_off - tailoff;
update_action action;
f(tailoff, taillen, nullptr, &action);
ceph_assert(!action.bl || action.bl->length() == taillen);
if (action.action == update_action::UPDATE_PIN) {
extent *ext = action.bl ?
new extent(tailoff, *action.bl) :
new extent(tailoff, taillen);
ext->link(*this, pin);
} else {
ceph_assert(!action.bl);
}
}
}
}
};
struct Cmp {
bool operator()(const hobject_t &oid, const object_extent_set &rhs) const {
return oid < rhs.oid;
}
bool operator()(const object_extent_set &lhs, const hobject_t &oid) const {
return lhs.oid < oid;
}
};
object_extent_set &get_or_create(const hobject_t &oid);
object_extent_set *get_if_exists(const hobject_t &oid);
void remove_and_destroy_if_empty(object_extent_set &set);
using cache_set = boost::intrusive::set<object_extent_set>;
cache_set per_object_caches;
uint64_t next_write_tid = 1;
uint64_t next_read_tid = 1;
struct pin_state {
uint64_t tid = 0;
enum pin_type_t {
NONE,
WRITE,
};
pin_type_t pin_type = NONE;
bool is_write() const { return pin_type == WRITE; }
pin_state(const pin_state &other) = delete;
pin_state &operator=(const pin_state &other) = delete;
pin_state(pin_state &&other) = delete;
pin_state() = default;
using list_member_options = boost::intrusive::member_hook<
extent,
boost::intrusive::list_member_hook<>,
&extent::pin_list_member>;
using list = boost::intrusive::list<extent, list_member_options>;
list pin_list;
~pin_state() {
ceph_assert(pin_list.empty());
ceph_assert(tid == 0);
ceph_assert(pin_type == NONE);
}
void _open(uint64_t in_tid, pin_type_t in_type) {
ceph_assert(pin_type == NONE);
ceph_assert(in_tid > 0);
tid = in_tid;
pin_type = in_type;
}
};
void release_pin(pin_state &p) {
for (auto iter = p.pin_list.begin(); iter != p.pin_list.end(); ) {
std::unique_ptr<extent> extent(&*iter); // we now own this
iter++; // unlink will invalidate
ceph_assert(extent->parent_extent_set);
auto &eset = *(extent->parent_extent_set);
extent->unlink();
remove_and_destroy_if_empty(eset);
}
p.tid = 0;
p.pin_type = pin_state::NONE;
}
public:
class write_pin : private pin_state {
friend class ExtentCache;
private:
void open(uint64_t in_tid) {
_open(in_tid, pin_state::WRITE);
}
public:
write_pin() : pin_state() {}
};
void open_write_pin(write_pin &pin) {
pin.open(next_write_tid++);
}
/**
* Reserves extents required for rmw, and learn
* which need to be read
*
* Pins all extents in to_write. Returns subset of to_read not
* currently present in the cache. Caller must obtain those
* extents before calling get_remaining_extents_for_rmw.
*
* Transition table:
* - Empty -> Write Pending pin.reqid
* - Write Pending N -> Write Pending pin.reqid
* - Write Pinned N -> Write Pinned pin.reqid
*
* @param oid [in] object undergoing rmw
* @param pin [in,out] pin to use (obtained from create_write_pin)
* @param to_write [in] extents which will be written
* @param to_read [in] extents to read prior to write (must be subset
* of to_write)
* @return subset of to_read which isn't already present or pending
*/
extent_set reserve_extents_for_rmw(
const hobject_t &oid,
write_pin &pin,
const extent_set &to_write,
const extent_set &to_read);
/**
* Gets extents required for rmw not returned from
* reserve_extents_for_rmw
*
* Requested extents (to_get) must be the set to_read \ the set
* returned from reserve_extents_for_rmw. No transition table,
* all extents at this point must be present and already pinned
* for this pin by reserve_extents_for_rmw.
*
* @param oid [in] object
* @param pin [in,out] pin associated with this IO
* @param to_get [in] extents to get (see above for restrictions)
* @return map of buffers from to_get
*/
extent_map get_remaining_extents_for_rmw(
const hobject_t &oid,
write_pin &pin,
const extent_set &to_get);
/**
* Updates the cache to reflect the rmw write
*
* All presented extents must already have been specified in
* reserve_extents_for_rmw under to_write.
*
* Transition table:
* - Empty -> invalid, must call reserve_extents_for_rmw first
* - Write Pending N -> Write Pinned N, update buffer
* (assert N >= pin.reqid)
* - Write Pinned N -> Update buffer (assert N >= pin.reqid)
*
* @param oid [in] object
* @param pin [in,out] pin associated with this IO
* @param extents [in] map of buffers to update
* @return void
*/
void present_rmw_update(
const hobject_t &oid,
write_pin &pin,
const extent_map &extents);
/**
* Release all buffers pinned by pin
*/
void release_write_pin(
write_pin &pin) {
release_pin(pin);
}
std::ostream &print(std::ostream &out) const;
};
std::ostream &operator <<(std::ostream &lhs, const ExtentCache &cache);
#endif
| 14,556 | 28.708163 | 89 | h |
null | ceph-main/src/osd/HitSet.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_OSD_HITSET_H
#define CEPH_OSD_HITSET_H
#include <string_view>
#include <boost/scoped_ptr.hpp>
#include "include/encoding.h"
#include "include/unordered_set.h"
#include "common/bloom_filter.hpp"
#include "common/hobject.h"
/**
* generic container for a HitSet
*
* Encapsulate a HitSetImpl of any type. Expose a generic interface
* to users and wrap the encoded object with a type so that it can be
* safely decoded later.
*/
class HitSet {
public:
typedef enum {
TYPE_NONE = 0,
TYPE_EXPLICIT_HASH = 1,
TYPE_EXPLICIT_OBJECT = 2,
TYPE_BLOOM = 3
} impl_type_t;
static std::string_view get_type_name(impl_type_t t) {
switch (t) {
case TYPE_NONE: return "none";
case TYPE_EXPLICIT_HASH: return "explicit_hash";
case TYPE_EXPLICIT_OBJECT: return "explicit_object";
case TYPE_BLOOM: return "bloom";
default: return "???";
}
}
std::string_view get_type_name() const {
if (impl)
return get_type_name(impl->get_type());
return get_type_name(TYPE_NONE);
}
/// abstract interface for a HitSet implementation
class Impl {
public:
virtual impl_type_t get_type() const = 0;
virtual bool is_full() const = 0;
virtual void insert(const hobject_t& o) = 0;
virtual bool contains(const hobject_t& o) const = 0;
virtual unsigned insert_count() const = 0;
virtual unsigned approx_unique_insert_count() const = 0;
virtual void encode(ceph::buffer::list &bl) const = 0;
virtual void decode(ceph::buffer::list::const_iterator& p) = 0;
virtual void dump(ceph::Formatter *f) const = 0;
virtual Impl* clone() const = 0;
virtual void seal() {}
virtual ~Impl() {}
};
boost::scoped_ptr<Impl> impl;
bool sealed;
class Params {
/// create an Impl* of the given type
bool create_impl(impl_type_t t);
public:
class Impl {
public:
virtual impl_type_t get_type() const = 0;
virtual HitSet::Impl *get_new_impl() const = 0;
virtual void encode(ceph::buffer::list &bl) const {}
virtual void decode(ceph::buffer::list::const_iterator& p) {}
virtual void dump(ceph::Formatter *f) const {}
virtual void dump_stream(std::ostream& o) const {}
virtual ~Impl() {}
};
Params() {}
explicit Params(Impl *i) : impl(i) {}
virtual ~Params() {}
boost::scoped_ptr<Params::Impl> impl;
impl_type_t get_type() const {
if (impl)
return impl->get_type();
return TYPE_NONE;
}
Params(const Params& o) noexcept;
const Params& operator=(const Params& o);
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<HitSet::Params*>& o);
friend std::ostream& operator<<(std::ostream& out, const HitSet::Params& p);
};
HitSet() : impl(NULL), sealed(false) {}
explicit HitSet(Impl *i) : impl(i), sealed(false) {}
explicit HitSet(const HitSet::Params& params);
HitSet(const HitSet& o) {
sealed = o.sealed;
if (o.impl)
impl.reset(o.impl->clone());
else
impl.reset(NULL);
}
const HitSet& operator=(const HitSet& o) {
sealed = o.sealed;
if (o.impl)
impl.reset(o.impl->clone());
else
impl.reset(NULL);
return *this;
}
bool is_full() const {
return impl->is_full();
}
/// insert a hash into the set
void insert(const hobject_t& o) {
impl->insert(o);
}
/// query whether a hash is in the set
bool contains(const hobject_t& o) const {
return impl->contains(o);
}
unsigned insert_count() const {
return impl->insert_count();
}
unsigned approx_unique_insert_count() const {
return impl->approx_unique_insert_count();
}
void seal() {
ceph_assert(!sealed);
sealed = true;
impl->seal();
}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<HitSet*>& o);
private:
void reset_to_type(impl_type_t type);
};
WRITE_CLASS_ENCODER(HitSet)
WRITE_CLASS_ENCODER(HitSet::Params)
typedef boost::shared_ptr<HitSet> HitSetRef;
std::ostream& operator<<(std::ostream& out, const HitSet::Params& p);
/**
* explicitly enumerate hash hits in the set
*/
class ExplicitHashHitSet : public HitSet::Impl {
uint64_t count;
ceph::unordered_set<uint32_t> hits;
public:
class Params : public HitSet::Params::Impl {
public:
HitSet::impl_type_t get_type() const override {
return HitSet::TYPE_EXPLICIT_HASH;
}
HitSet::Impl *get_new_impl() const override {
return new ExplicitHashHitSet;
}
static void generate_test_instances(std::list<Params*>& o) {
o.push_back(new Params);
}
};
ExplicitHashHitSet() : count(0) {}
explicit ExplicitHashHitSet(const ExplicitHashHitSet::Params *p) : count(0) {}
ExplicitHashHitSet(const ExplicitHashHitSet &o) : count(o.count),
hits(o.hits) {}
HitSet::Impl *clone() const override {
return new ExplicitHashHitSet(*this);
}
HitSet::impl_type_t get_type() const override {
return HitSet::TYPE_EXPLICIT_HASH;
}
bool is_full() const override {
return false;
}
void insert(const hobject_t& o) override {
hits.insert(o.get_hash());
++count;
}
bool contains(const hobject_t& o) const override {
return hits.count(o.get_hash());
}
unsigned insert_count() const override {
return count;
}
unsigned approx_unique_insert_count() const override {
return hits.size();
}
void encode(ceph::buffer::list &bl) const override {
ENCODE_START(1, 1, bl);
encode(count, bl);
encode(hits, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator &bl) override {
DECODE_START(1, bl);
decode(count, bl);
decode(hits, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const override;
static void generate_test_instances(std::list<ExplicitHashHitSet*>& o) {
o.push_back(new ExplicitHashHitSet);
o.push_back(new ExplicitHashHitSet);
o.back()->insert(hobject_t());
o.back()->insert(hobject_t("asdf", "", CEPH_NOSNAP, 123, 1, ""));
o.back()->insert(hobject_t("qwer", "", CEPH_NOSNAP, 456, 1, ""));
}
};
WRITE_CLASS_ENCODER(ExplicitHashHitSet)
/**
* explicitly enumerate objects in the set
*/
class ExplicitObjectHitSet : public HitSet::Impl {
uint64_t count;
ceph::unordered_set<hobject_t> hits;
public:
class Params : public HitSet::Params::Impl {
public:
HitSet::impl_type_t get_type() const override {
return HitSet::TYPE_EXPLICIT_OBJECT;
}
HitSet::Impl *get_new_impl() const override {
return new ExplicitObjectHitSet;
}
static void generate_test_instances(std::list<Params*>& o) {
o.push_back(new Params);
}
};
ExplicitObjectHitSet() : count(0) {}
explicit ExplicitObjectHitSet(const ExplicitObjectHitSet::Params *p) : count(0) {}
ExplicitObjectHitSet(const ExplicitObjectHitSet &o) : count(o.count),
hits(o.hits) {}
HitSet::Impl *clone() const override {
return new ExplicitObjectHitSet(*this);
}
HitSet::impl_type_t get_type() const override {
return HitSet::TYPE_EXPLICIT_OBJECT;
}
bool is_full() const override {
return false;
}
void insert(const hobject_t& o) override {
hits.insert(o);
++count;
}
bool contains(const hobject_t& o) const override {
return hits.count(o);
}
unsigned insert_count() const override {
return count;
}
unsigned approx_unique_insert_count() const override {
return hits.size();
}
void encode(ceph::buffer::list &bl) const override {
ENCODE_START(1, 1, bl);
encode(count, bl);
encode(hits, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) override {
DECODE_START(1, bl);
decode(count, bl);
decode(hits, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const override;
static void generate_test_instances(std::list<ExplicitObjectHitSet*>& o) {
o.push_back(new ExplicitObjectHitSet);
o.push_back(new ExplicitObjectHitSet);
o.back()->insert(hobject_t());
o.back()->insert(hobject_t("asdf", "", CEPH_NOSNAP, 123, 1, ""));
o.back()->insert(hobject_t("qwer", "", CEPH_NOSNAP, 456, 1, ""));
}
};
WRITE_CLASS_ENCODER(ExplicitObjectHitSet)
/**
* use a bloom_filter to track hits to the set
*/
class BloomHitSet : public HitSet::Impl {
compressible_bloom_filter bloom;
public:
HitSet::impl_type_t get_type() const override {
return HitSet::TYPE_BLOOM;
}
class Params : public HitSet::Params::Impl {
public:
HitSet::impl_type_t get_type() const override {
return HitSet::TYPE_BLOOM;
}
HitSet::Impl *get_new_impl() const override {
return new BloomHitSet;
}
uint32_t fpp_micro; ///< false positive probability / 1M
uint64_t target_size; ///< number of unique insertions we expect to this HitSet
uint64_t seed; ///< seed to use when initializing the bloom filter
Params()
: fpp_micro(0), target_size(0), seed(0) {}
Params(double fpp, uint64_t t, uint64_t s)
: fpp_micro(fpp * 1000000.0), target_size(t), seed(s) {}
Params(const Params &o)
: fpp_micro(o.fpp_micro),
target_size(o.target_size),
seed(o.seed) {}
~Params() override {}
double get_fpp() const {
return (double)fpp_micro / 1000000.0;
}
void set_fpp(double f) {
fpp_micro = (unsigned)(llrintl(f * 1000000.0));
}
void encode(ceph::buffer::list& bl) const override {
ENCODE_START(1, 1, bl);
encode(fpp_micro, bl);
encode(target_size, bl);
encode(seed, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) override {
DECODE_START(1, bl);
decode(fpp_micro, bl);
decode(target_size, bl);
decode(seed, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const override;
void dump_stream(std::ostream& o) const override {
o << "false_positive_probability: "
<< get_fpp() << ", target_size: " << target_size
<< ", seed: " << seed;
}
static void generate_test_instances(std::list<Params*>& o) {
o.push_back(new Params);
o.push_back(new Params);
(*o.rbegin())->fpp_micro = 123456;
(*o.rbegin())->target_size = 300;
(*o.rbegin())->seed = 99;
}
};
BloomHitSet() {}
BloomHitSet(unsigned inserts, double fpp, int seed)
: bloom(inserts, fpp, seed)
{}
explicit BloomHitSet(const BloomHitSet::Params *p) : bloom(p->target_size,
p->get_fpp(),
p->seed)
{}
BloomHitSet(const BloomHitSet &o) {
// oh god
ceph::buffer::list bl;
o.encode(bl);
auto bli = std::cbegin(bl);
this->decode(bli);
}
HitSet::Impl *clone() const override {
return new BloomHitSet(*this);
}
bool is_full() const override {
return bloom.is_full();
}
void insert(const hobject_t& o) override {
bloom.insert(o.get_hash());
}
bool contains(const hobject_t& o) const override {
return bloom.contains(o.get_hash());
}
unsigned insert_count() const override {
return bloom.element_count();
}
unsigned approx_unique_insert_count() const override {
return bloom.approx_unique_element_count();
}
void seal() override {
// aim for a density of .5 (50% of bit set)
double pc = bloom.density() * 2.0;
if (pc < 1.0)
bloom.compress(pc);
}
void encode(ceph::buffer::list &bl) const override {
ENCODE_START(1, 1, bl);
encode(bloom, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) override {
DECODE_START(1, bl);
decode(bloom, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const override;
static void generate_test_instances(std::list<BloomHitSet*>& o) {
o.push_back(new BloomHitSet);
o.push_back(new BloomHitSet(10, .1, 1));
o.back()->insert(hobject_t());
o.back()->insert(hobject_t("asdf", "", CEPH_NOSNAP, 123, 1, ""));
o.back()->insert(hobject_t("qwer", "", CEPH_NOSNAP, 456, 1, ""));
}
};
WRITE_CLASS_ENCODER(BloomHitSet)
#endif
| 12,721 | 26.899123 | 84 | h |
null | ceph-main/src/osd/MissingLoc.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <map>
#include <set>
#include "OSDMap.h"
#include "common/HBHandle.h"
#include "common/ceph_context.h"
#include "common/dout.h"
#include "osd_types.h"
class MissingLoc {
public:
class MappingInfo {
public:
virtual const std::set<pg_shard_t> &get_upset() const = 0;
virtual bool is_ec_pg() const = 0;
virtual int get_pg_size() const = 0;
virtual ~MappingInfo() {}
};
// a loc_count indicates how many locations we know in each of
// these distinct sets
struct loc_count_t {
int up = 0; //< up
int other = 0; //< other
friend bool operator<(const loc_count_t& l,
const loc_count_t& r) {
return (l.up < r.up ||
(l.up == r.up &&
(l.other < r.other)));
}
friend std::ostream& operator<<(std::ostream& out, const loc_count_t& l) {
ceph_assert(l.up >= 0);
ceph_assert(l.other >= 0);
return out << "(" << l.up << "+" << l.other << ")";
}
};
using missing_by_count_t = std::map<shard_id_t, std::map<loc_count_t,int>>;
private:
loc_count_t _get_count(const std::set<pg_shard_t> &shards) {
loc_count_t r;
for (auto s : shards) {
if (mapping_info->get_upset().count(s)) {
r.up++;
} else {
r.other++;
}
}
return r;
}
std::map<hobject_t, pg_missing_item> needs_recovery_map;
std::map<hobject_t, std::set<pg_shard_t> > missing_loc;
std::set<pg_shard_t> missing_loc_sources;
// for every entry in missing_loc, we count how many of each type of shard we have,
// and maintain totals here. The sum of the values for this std::map will always equal
// missing_loc.size().
missing_by_count_t missing_by_count;
void pgs_by_shard_id(
const std::set<pg_shard_t>& s,
std::map<shard_id_t, std::set<pg_shard_t> >& pgsbs) {
if (mapping_info->is_ec_pg()) {
int num_shards = mapping_info->get_pg_size();
// For completely missing shards initialize with empty std::set<pg_shard_t>
for (int i = 0 ; i < num_shards ; ++i) {
shard_id_t shard(i);
pgsbs[shard];
}
for (auto pgs: s)
pgsbs[pgs.shard].insert(pgs);
} else {
pgsbs[shard_id_t::NO_SHARD] = s;
}
}
void _inc_count(const std::set<pg_shard_t>& s) {
std::map< shard_id_t, std::set<pg_shard_t> > pgsbs;
pgs_by_shard_id(s, pgsbs);
for (auto shard: pgsbs)
++missing_by_count[shard.first][_get_count(shard.second)];
}
void _dec_count(const std::set<pg_shard_t>& s) {
std::map< shard_id_t, std::set<pg_shard_t> > pgsbs;
pgs_by_shard_id(s, pgsbs);
for (auto shard: pgsbs) {
auto p = missing_by_count[shard.first].find(_get_count(shard.second));
ceph_assert(p != missing_by_count[shard.first].end());
if (--p->second == 0) {
missing_by_count[shard.first].erase(p);
}
}
}
spg_t pgid;
MappingInfo *mapping_info;
DoutPrefixProvider *dpp;
CephContext *cct;
std::set<pg_shard_t> empty_set;
public:
boost::scoped_ptr<IsPGReadablePredicate> is_readable;
boost::scoped_ptr<IsPGRecoverablePredicate> is_recoverable;
explicit MissingLoc(
spg_t pgid,
MappingInfo *mapping_info,
DoutPrefixProvider *dpp,
CephContext *cct)
: pgid(pgid), mapping_info(mapping_info), dpp(dpp), cct(cct) { }
void set_backend_predicates(
IsPGReadablePredicate *_is_readable,
IsPGRecoverablePredicate *_is_recoverable) {
is_readable.reset(_is_readable);
is_recoverable.reset(_is_recoverable);
}
const IsPGRecoverablePredicate &get_recoverable_predicate() const {
return *is_recoverable;
}
std::ostream& gen_prefix(std::ostream& out) const {
return dpp->gen_prefix(out);
}
bool needs_recovery(
const hobject_t &hoid,
eversion_t *v = 0) const {
std::map<hobject_t, pg_missing_item>::const_iterator i =
needs_recovery_map.find(hoid);
if (i == needs_recovery_map.end())
return false;
if (v)
*v = i->second.need;
return true;
}
bool is_deleted(const hobject_t &hoid) const {
auto i = needs_recovery_map.find(hoid);
if (i == needs_recovery_map.end())
return false;
return i->second.is_delete();
}
bool is_unfound(const hobject_t &hoid) const {
auto it = needs_recovery_map.find(hoid);
if (it == needs_recovery_map.end()) {
return false;
}
if (it->second.is_delete()) {
return false;
}
auto mit = missing_loc.find(hoid);
return mit == missing_loc.end() || !(*is_recoverable)(mit->second);
}
bool readable_with_acting(
const hobject_t &hoid,
const std::set<pg_shard_t> &acting,
eversion_t* v = 0) const;
uint64_t num_unfound() const {
uint64_t ret = 0;
for (std::map<hobject_t, pg_missing_item>::const_iterator i =
needs_recovery_map.begin();
i != needs_recovery_map.end();
++i) {
if (i->second.is_delete())
continue;
auto mi = missing_loc.find(i->first);
if (mi == missing_loc.end() || !(*is_recoverable)(mi->second))
++ret;
}
return ret;
}
bool have_unfound() const {
for (std::map<hobject_t, pg_missing_item>::const_iterator i =
needs_recovery_map.begin();
i != needs_recovery_map.end();
++i) {
if (i->second.is_delete())
continue;
auto mi = missing_loc.find(i->first);
if (mi == missing_loc.end() || !(*is_recoverable)(mi->second))
return true;
}
return false;
}
void clear() {
needs_recovery_map.clear();
missing_loc.clear();
missing_loc_sources.clear();
missing_by_count.clear();
}
void add_location(const hobject_t &hoid, pg_shard_t location) {
auto p = missing_loc.find(hoid);
if (p == missing_loc.end()) {
p = missing_loc.emplace(hoid, std::set<pg_shard_t>()).first;
} else {
_dec_count(p->second);
}
p->second.insert(location);
_inc_count(p->second);
}
void remove_location(const hobject_t &hoid, pg_shard_t location) {
auto p = missing_loc.find(hoid);
if (p != missing_loc.end()) {
_dec_count(p->second);
p->second.erase(location);
if (p->second.empty()) {
missing_loc.erase(p);
} else {
_inc_count(p->second);
}
}
}
void clear_location(const hobject_t &hoid) {
auto p = missing_loc.find(hoid);
if (p != missing_loc.end()) {
_dec_count(p->second);
missing_loc.erase(p);
}
}
void add_active_missing(const pg_missing_t &missing) {
for (std::map<hobject_t, pg_missing_item>::const_iterator i =
missing.get_items().begin();
i != missing.get_items().end();
++i) {
std::map<hobject_t, pg_missing_item>::const_iterator j =
needs_recovery_map.find(i->first);
if (j == needs_recovery_map.end()) {
needs_recovery_map.insert(*i);
} else {
if (i->second.need != j->second.need) {
lgeneric_dout(cct, 0) << this << " " << pgid << " unexpected need for "
<< i->first << " have " << j->second
<< " tried to add " << i->second << dendl;
ceph_assert(0 == "unexpected need for missing item");
}
}
}
}
void add_missing(const hobject_t &hoid, eversion_t need, eversion_t have, bool is_delete=false) {
needs_recovery_map[hoid] = pg_missing_item(need, have, is_delete);
}
void revise_need(const hobject_t &hoid, eversion_t need) {
auto it = needs_recovery_map.find(hoid);
ceph_assert(it != needs_recovery_map.end());
it->second.need = need;
}
/// Adds info about a possible recovery source
bool add_source_info(
pg_shard_t source, ///< [in] source
const pg_info_t &oinfo, ///< [in] info
const pg_missing_t &omissing, ///< [in] (optional) missing
HBHandle *handle ///< [in] ThreadPool handle
); ///< @return whether a new object location was discovered
/// Adds recovery sources in batch
void add_batch_sources_info(
const std::set<pg_shard_t> &sources, ///< [in] a std::set of resources which can be used for all objects
HBHandle *handle ///< [in] ThreadPool handle
);
/// Uses osdmap to update structures for now down sources
void check_recovery_sources(const OSDMapRef& osdmap);
/// Remove stray from recovery sources
void remove_stray_recovery_sources(pg_shard_t stray);
/// Call when hoid is no longer missing in acting std::set
void recovered(const hobject_t &hoid) {
needs_recovery_map.erase(hoid);
auto p = missing_loc.find(hoid);
if (p != missing_loc.end()) {
_dec_count(p->second);
missing_loc.erase(p);
}
}
/// Call to update structures for hoid after a change
void rebuild(
const hobject_t &hoid,
pg_shard_t self,
const std::set<pg_shard_t> &to_recover,
const pg_info_t &info,
const pg_missing_t &missing,
const std::map<pg_shard_t, pg_missing_t> &pmissing,
const std::map<pg_shard_t, pg_info_t> &pinfo) {
recovered(hoid);
std::optional<pg_missing_item> item;
auto miter = missing.get_items().find(hoid);
if (miter != missing.get_items().end()) {
item = miter->second;
} else {
for (auto &&i: to_recover) {
if (i == self)
continue;
auto pmiter = pmissing.find(i);
ceph_assert(pmiter != pmissing.end());
miter = pmiter->second.get_items().find(hoid);
if (miter != pmiter->second.get_items().end()) {
item = miter->second;
break;
}
}
}
if (!item)
return; // recovered!
needs_recovery_map[hoid] = *item;
if (item->is_delete())
return;
auto mliter =
missing_loc.emplace(hoid, std::set<pg_shard_t>()).first;
ceph_assert(info.last_backfill.is_max());
ceph_assert(info.last_update >= item->need);
if (!missing.is_missing(hoid))
mliter->second.insert(self);
for (auto &&i: pmissing) {
if (i.first == self)
continue;
auto pinfoiter = pinfo.find(i.first);
ceph_assert(pinfoiter != pinfo.end());
if (item->need <= pinfoiter->second.last_update &&
hoid <= pinfoiter->second.last_backfill &&
!i.second.is_missing(hoid))
mliter->second.insert(i.first);
}
_inc_count(mliter->second);
}
const std::set<pg_shard_t> &get_locations(const hobject_t &hoid) const {
auto it = missing_loc.find(hoid);
return it == missing_loc.end() ? empty_set : it->second;
}
const std::map<hobject_t, std::set<pg_shard_t>> &get_missing_locs() const {
return missing_loc;
}
const std::map<hobject_t, pg_missing_item> &get_needs_recovery() const {
return needs_recovery_map;
}
const missing_by_count_t &get_missing_by_count() const {
return missing_by_count;
}
};
| 10,587 | 28.909605 | 109 | h |
null | ceph-main/src/osd/OSDCap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
* OSDCaps: Hold the capabilities associated with a single authenticated
* user key. These are specified by text strings of the form
* "allow r" (which allows reading anything on the OSD)
* "allow rwx pool foo" (which allows full access to listed pools)
* "allow *" (which allows full access to EVERYTHING)
*
* The full grammar is documented in the parser in OSDCap.cc.
*
* The OSD assumes that anyone with * caps is an admin and has full
* message permissions. This means that only the monitor and the OSDs
* should get *
*/
#ifndef CEPH_OSDCAP_H
#define CEPH_OSDCAP_H
#include <ostream>
using std::ostream;
#include "include/types.h"
#include "OpRequest.h"
#include <list>
#include <vector>
#include <boost/optional.hpp>
#include <boost/fusion/include/adapt_struct.hpp>
static const __u8 OSD_CAP_R = (1 << 1); // read
static const __u8 OSD_CAP_W = (1 << 2); // write
static const __u8 OSD_CAP_CLS_R = (1 << 3); // class read
static const __u8 OSD_CAP_CLS_W = (1 << 4); // class write
static const __u8 OSD_CAP_X = (OSD_CAP_CLS_R | OSD_CAP_CLS_W); // execute
static const __u8 OSD_CAP_ANY = 0xff; // *
struct osd_rwxa_t {
__u8 val;
// cppcheck-suppress noExplicitConstructor
osd_rwxa_t(__u8 v = 0) : val(v) {}
osd_rwxa_t& operator=(__u8 v) {
val = v;
return *this;
}
operator __u8() const {
return val;
}
};
ostream& operator<<(ostream& out, const osd_rwxa_t& p);
struct OSDCapSpec {
osd_rwxa_t allow;
std::string class_name;
std::string method_name;
OSDCapSpec() : allow(0) {}
explicit OSDCapSpec(osd_rwxa_t v) : allow(v) {}
OSDCapSpec(std::string class_name, std::string method_name)
: allow(0), class_name(std::move(class_name)),
method_name(std::move(method_name)) {}
bool allow_all() const {
return allow == OSD_CAP_ANY;
}
};
ostream& operator<<(ostream& out, const OSDCapSpec& s);
struct OSDCapPoolNamespace {
std::string pool_name;
boost::optional<std::string> nspace = boost::none;
OSDCapPoolNamespace() {
}
OSDCapPoolNamespace(const std::string& pool_name,
const boost::optional<std::string>& nspace = boost::none)
: pool_name(pool_name), nspace(nspace) {
}
bool is_match(const std::string& pn, const std::string& ns) const;
bool is_match_all() const;
};
ostream& operator<<(ostream& out, const OSDCapPoolNamespace& pns);
struct OSDCapPoolTag {
typedef std::map<std::string, std::map<std::string, std::string> > app_map_t;
std::string application;
std::string key;
std::string value;
OSDCapPoolTag () {}
OSDCapPoolTag(const std::string& application, const std::string& key,
const std::string& value) :
application(application), key(key), value(value) {}
bool is_match(const app_map_t& app_map) const;
bool is_match_all() const;
};
// adapt for parsing with boost::spirit::qi in OSDCapParser
BOOST_FUSION_ADAPT_STRUCT(OSDCapPoolTag,
(std::string, application)
(std::string, key)
(std::string, value))
ostream& operator<<(ostream& out, const OSDCapPoolTag& pt);
struct OSDCapMatch {
typedef std::map<std::string, std::map<std::string, std::string> > app_map_t;
OSDCapPoolNamespace pool_namespace;
OSDCapPoolTag pool_tag;
std::string object_prefix;
OSDCapMatch() {}
explicit OSDCapMatch(const OSDCapPoolTag& pt) : pool_tag(pt) {}
explicit OSDCapMatch(const OSDCapPoolNamespace& pns) : pool_namespace(pns) {}
OSDCapMatch(const OSDCapPoolNamespace& pns, const std::string& pre)
: pool_namespace(pns), object_prefix(pre) {}
OSDCapMatch(const std::string& pl, const std::string& pre)
: pool_namespace(pl), object_prefix(pre) {}
OSDCapMatch(const std::string& pl, const std::string& ns,
const std::string& pre)
: pool_namespace(pl, ns), object_prefix(pre) {}
OSDCapMatch(const std::string& dummy, const std::string& app,
const std::string& key, const std::string& val)
: pool_tag(app, key, val) {}
OSDCapMatch(const std::string& ns, const OSDCapPoolTag& pt)
: pool_namespace("", ns), pool_tag(pt) {}
/**
* check if given request parameters match our constraints
*
* @param pool_name pool name
* @param nspace_name namespace name
* @param object object name
* @return true if we match, false otherwise
*/
bool is_match(const std::string& pool_name, const std::string& nspace_name,
const app_map_t& app_map,
const std::string& object) const;
bool is_match_all() const;
};
ostream& operator<<(ostream& out, const OSDCapMatch& m);
struct OSDCapProfile {
std::string name;
OSDCapPoolNamespace pool_namespace;
OSDCapProfile() {
}
OSDCapProfile(const std::string& name,
const std::string& pool_name,
const boost::optional<std::string>& nspace = boost::none)
: name(name), pool_namespace(pool_name, nspace) {
}
inline bool is_valid() const {
return !name.empty();
}
};
ostream& operator<<(ostream& out, const OSDCapProfile& m);
struct OSDCapGrant {
OSDCapMatch match;
OSDCapSpec spec;
OSDCapProfile profile;
std::string network;
entity_addr_t network_parsed;
unsigned network_prefix = 0;
bool network_valid = true;
// explicit grants that a profile grant expands to; populated as
// needed by expand_profile() and cached here.
std::list<OSDCapGrant> profile_grants;
OSDCapGrant() {}
OSDCapGrant(const OSDCapMatch& m, const OSDCapSpec& s,
boost::optional<std::string> n = {})
: match(m), spec(s) {
if (n) {
set_network(*n);
}
}
explicit OSDCapGrant(const OSDCapProfile& profile,
boost::optional<std::string> n = {})
: profile(profile) {
if (n) {
set_network(*n);
}
expand_profile();
}
void set_network(const std::string& n);
bool allow_all() const;
bool is_capable(const std::string& pool_name, const std::string& ns,
const OSDCapPoolTag::app_map_t& application_metadata,
const std::string& object, bool op_may_read, bool op_may_write,
const std::vector<OpInfo::ClassInfo>& classes,
const entity_addr_t& addr,
std::vector<bool>* class_allowed) const;
void expand_profile();
};
ostream& operator<<(ostream& out, const OSDCapGrant& g);
struct OSDCap {
std::vector<OSDCapGrant> grants;
OSDCap() {}
explicit OSDCap(std::vector<OSDCapGrant> g) : grants(std::move(g)) {}
bool allow_all() const;
void set_allow_all();
bool parse(const std::string& str, ostream *err=NULL);
/**
* check if we are capable of something
*
* This method actually checks a description of a particular operation against
* what the capability has specified. Currently that is just rwx with matches
* against pool, and object name prefix.
*
* @param pool_name name of the pool we are accessing
* @param ns name of the namespace we are accessing
* @param object name of the object we are accessing
* @param op_may_read whether the operation may need to read
* @param op_may_write whether the operation may need to write
* @param classes (class-name, rd, wr, allowed-flag) tuples
* @return true if the operation is allowed, false otherwise
*/
bool is_capable(const std::string& pool_name, const std::string& ns,
const OSDCapPoolTag::app_map_t& application_metadata,
const std::string& object, bool op_may_read, bool op_may_write,
const std::vector<OpInfo::ClassInfo>& classes,
const entity_addr_t& addr) const;
};
inline std::ostream& operator<<(std::ostream& out, const OSDCap& cap)
{
return out << "osdcap" << cap.grants;
}
#endif
| 8,045 | 29.709924 | 81 | h |
null | ceph-main/src/osd/OSDMapMapping.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_OSDMAPMAPPING_H
#define CEPH_OSDMAPMAPPING_H
#include <vector>
#include <map>
#include "osd/osd_types.h"
#include "common/WorkQueue.h"
#include "common/Cond.h"
class OSDMap;
/// work queue to perform work on batches of pgids on multiple CPUs
class ParallelPGMapper {
public:
struct Job {
utime_t start, finish;
unsigned shards = 0;
const OSDMap *osdmap;
bool aborted = false;
Context *onfinish = nullptr;
ceph::mutex lock = ceph::make_mutex("ParallelPGMapper::Job::lock");
ceph::condition_variable cond;
Job(const OSDMap *om) : start(ceph_clock_now()), osdmap(om) {}
virtual ~Job() {
ceph_assert(shards == 0);
}
// child must implement either form of process
virtual void process(const std::vector<pg_t>& pgs) = 0;
virtual void process(int64_t poolid, unsigned ps_begin, unsigned ps_end) = 0;
virtual void complete() = 0;
void set_finish_event(Context *fin) {
lock.lock();
if (shards == 0) {
// already done.
lock.unlock();
fin->complete(0);
} else {
// set finisher
onfinish = fin;
lock.unlock();
}
}
bool is_done() {
std::lock_guard l(lock);
return shards == 0;
}
utime_t get_duration() {
return finish - start;
}
void wait() {
std::unique_lock l(lock);
cond.wait(l, [this] { return shards == 0; });
}
bool wait_for(double duration) {
utime_t until = start;
until += duration;
std::unique_lock l(lock);
while (shards > 0) {
if (ceph_clock_now() >= until) {
return false;
}
cond.wait(l);
}
return true;
}
void abort() {
Context *fin = nullptr;
{
std::unique_lock l(lock);
aborted = true;
fin = onfinish;
onfinish = nullptr;
cond.wait(l, [this] { return shards == 0; });
}
if (fin) {
fin->complete(-ECANCELED);
}
}
void start_one() {
std::lock_guard l(lock);
++shards;
}
void finish_one();
};
protected:
CephContext *cct;
struct Item {
Job *job;
int64_t pool;
unsigned begin, end;
std::vector<pg_t> pgs;
Item(Job *j, std::vector<pg_t> pgs) : job(j), pgs(pgs) {}
Item(Job *j, int64_t p, unsigned b, unsigned e)
: job(j),
pool(p),
begin(b),
end(e) {}
};
std::deque<Item*> q;
struct WQ : public ThreadPool::WorkQueue<Item> {
ParallelPGMapper *m;
WQ(ParallelPGMapper *m_, ThreadPool *tp)
: ThreadPool::WorkQueue<Item>(
"ParallelPGMapper::WQ",
ceph::make_timespan(m_->cct->_conf->threadpool_default_timeout),
ceph::timespan::zero(),
tp),
m(m_) {}
bool _enqueue(Item *i) override {
m->q.push_back(i);
return true;
}
void _dequeue(Item *i) override {
ceph_abort();
}
Item *_dequeue() override {
while (!m->q.empty()) {
Item *i = m->q.front();
m->q.pop_front();
if (i->job->aborted) {
i->job->finish_one();
delete i;
} else {
return i;
}
}
return nullptr;
}
void _process(Item *i, ThreadPool::TPHandle &h) override;
void _clear() override {
ceph_assert(_empty());
}
bool _empty() override {
return m->q.empty();
}
} wq;
public:
ParallelPGMapper(CephContext *cct, ThreadPool *tp)
: cct(cct),
wq(this, tp) {}
void queue(
Job *job,
unsigned pgs_per_item,
const std::vector<pg_t>& input_pgs);
void drain() {
wq.drain();
}
};
/// a precalculated mapping of every PG for a given OSDMap
class OSDMapMapping {
public:
MEMPOOL_CLASS_HELPERS();
private:
struct PoolMapping {
MEMPOOL_CLASS_HELPERS();
unsigned size = 0;
unsigned pg_num = 0;
bool erasure = false;
mempool::osdmap_mapping::vector<int32_t> table;
size_t row_size() const {
return
1 + // acting_primary
1 + // up_primary
1 + // num acting
1 + // num up
size + // acting
size; // up
}
PoolMapping(int s, int p, bool e)
: size(s),
pg_num(p),
erasure(e),
table(pg_num * row_size()) {
}
void get(size_t ps,
std::vector<int> *up,
int *up_primary,
std::vector<int> *acting,
int *acting_primary) const {
const int32_t *row = &table[row_size() * ps];
if (acting_primary) {
*acting_primary = row[0];
}
if (up_primary) {
*up_primary = row[1];
}
if (acting) {
acting->resize(row[2]);
for (int i = 0; i < row[2]; ++i) {
(*acting)[i] = row[4 + i];
}
}
if (up) {
up->resize(row[3]);
for (int i = 0; i < row[3]; ++i) {
(*up)[i] = row[4 + size + i];
}
}
}
void set(size_t ps,
const std::vector<int>& up,
int up_primary,
const std::vector<int>& acting,
int acting_primary) {
int32_t *row = &table[row_size() * ps];
row[0] = acting_primary;
row[1] = up_primary;
// these should always be <= the pool size, but just in case, avoid
// blowing out the array. Note that our mapping is not completely
// accurate in this case--this is just to avoid crashing.
row[2] = std::min<int32_t>(acting.size(), size);
row[3] = std::min<int32_t>(up.size(), size);
for (int i = 0; i < row[2]; ++i) {
row[4 + i] = acting[i];
}
for (int i = 0; i < row[3]; ++i) {
row[4 + size + i] = up[i];
}
}
};
mempool::osdmap_mapping::map<int64_t,PoolMapping> pools;
mempool::osdmap_mapping::vector<
mempool::osdmap_mapping::vector<pg_t>> acting_rmap; // osd -> pg
//unused: mempool::osdmap_mapping::vector<std::vector<pg_t>> up_rmap; // osd -> pg
epoch_t epoch = 0;
uint64_t num_pgs = 0;
void _init_mappings(const OSDMap& osdmap);
void _update_range(
const OSDMap& map,
int64_t pool,
unsigned pg_begin, unsigned pg_end);
void _build_rmap(const OSDMap& osdmap);
void _start(const OSDMap& osdmap) {
_init_mappings(osdmap);
}
void _finish(const OSDMap& osdmap);
void _dump();
friend class ParallelPGMapper;
struct MappingJob : public ParallelPGMapper::Job {
OSDMapMapping *mapping;
MappingJob(const OSDMap *osdmap, OSDMapMapping *m)
: Job(osdmap), mapping(m) {
mapping->_start(*osdmap);
}
void process(const std::vector<pg_t>& pgs) override {}
void process(int64_t pool, unsigned ps_begin, unsigned ps_end) override {
mapping->_update_range(*osdmap, pool, ps_begin, ps_end);
}
void complete() override {
mapping->_finish(*osdmap);
}
};
friend class OSDMapTest;
// for testing only
void update(const OSDMap& map);
public:
void get(pg_t pgid,
std::vector<int> *up,
int *up_primary,
std::vector<int> *acting,
int *acting_primary) const {
auto p = pools.find(pgid.pool());
ceph_assert(p != pools.end());
ceph_assert(pgid.ps() < p->second.pg_num);
p->second.get(pgid.ps(), up, up_primary, acting, acting_primary);
}
bool get_primary_and_shard(pg_t pgid,
int *acting_primary,
spg_t *spgid) {
auto p = pools.find(pgid.pool());
ceph_assert(p != pools.end());
ceph_assert(pgid.ps() < p->second.pg_num);
std::vector<int> acting;
p->second.get(pgid.ps(), nullptr, nullptr, &acting, acting_primary);
if (p->second.erasure) {
for (uint8_t i = 0; i < acting.size(); ++i) {
if (acting[i] == *acting_primary) {
*spgid = spg_t(pgid, shard_id_t(i));
return true;
}
}
return false;
} else {
*spgid = spg_t(pgid);
return true;
}
}
const mempool::osdmap_mapping::vector<pg_t>& get_osd_acting_pgs(unsigned osd) {
ceph_assert(osd < acting_rmap.size());
return acting_rmap[osd];
}
void update(const OSDMap& map, pg_t pgid);
std::unique_ptr<MappingJob> start_update(
const OSDMap& map,
ParallelPGMapper& mapper,
unsigned pgs_per_item) {
std::unique_ptr<MappingJob> job(new MappingJob(&map, this));
mapper.queue(job.get(), pgs_per_item, {});
return job;
}
epoch_t get_epoch() const {
return epoch;
}
uint64_t get_num_pgs() const {
return num_pgs;
}
};
#endif
| 8,140 | 21.932394 | 85 | h |
null | ceph-main/src/osd/ObjectVersioner.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_OSD_OBJECTVERSIONER_H
#define CEPH_OSD_OBJECTVERSIONER_H
class ObjectVersioner {
public:
pobject_t oid;
void get_versions(list<version_t>& ls);
version_t head(); // newest
version_t committed(); // last committed
version_t tail(); // oldest
/*
* prepare a new version, starting wit "raw" transaction t.
*/
void prepare(ObjectStore::Transaction& t, version_t v);
void rollback_to(version_t v);
void commit_to(version_t v);
};
#endif
| 914 | 24.416667 | 71 | h |
null | ceph-main/src/osd/OpRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 New Dream Network/Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef OPREQUEST_H_
#define OPREQUEST_H_
#include "osd/osd_op_util.h"
#include "osd/osd_types.h"
#include "common/TrackedOp.h"
#include "common/tracer.h"
/**
* The OpRequest takes in a Message* and takes over a single reference
* to it, which it puts() when destroyed.
*/
struct OpRequest : public TrackedOp {
friend class OpTracker;
private:
OpInfo op_info;
public:
int maybe_init_op_info(const OSDMap &osdmap);
auto get_flags() const { return op_info.get_flags(); }
bool op_info_needs_init() const { return op_info.get_flags() == 0; }
bool check_rmw(int flag) const { return op_info.check_rmw(flag); }
bool may_read() const { return op_info.may_read(); }
bool may_read_data() const { return op_info.may_read_data(); }
bool may_write() const { return op_info.may_write(); }
bool may_cache() const { return op_info.may_cache(); }
bool rwordered_forced() const { return op_info.rwordered_forced(); }
bool rwordered() const { return op_info.rwordered(); }
bool includes_pg_op() const { return op_info.includes_pg_op(); }
bool need_read_cap() const { return op_info.need_read_cap(); }
bool need_write_cap() const { return op_info.need_write_cap(); }
bool need_promote() const { return op_info.need_promote(); }
bool need_skip_handle_cache() const { return op_info.need_skip_handle_cache(); }
bool need_skip_promote() const { return op_info.need_skip_promote(); }
bool allows_returnvec() const { return op_info.allows_returnvec(); }
std::vector<OpInfo::ClassInfo> classes() const {
return op_info.get_classes();
}
void _dump(ceph::Formatter *f) const override;
bool has_feature(uint64_t f) const {
#ifdef WITH_SEASTAR
ceph_abort("In crimson, conn is independently maintained outside Message");
#else
return request->get_connection()->has_feature(f);
#endif
}
private:
Message *request; /// the logical request we are tracking
osd_reqid_t reqid;
entity_inst_t req_src_inst;
uint8_t hit_flag_points;
uint8_t latest_flag_point;
const char* last_event_detail = nullptr;
utime_t dequeued_time;
static const uint8_t flag_queued_for_pg=1 << 0;
static const uint8_t flag_reached_pg = 1 << 1;
static const uint8_t flag_delayed = 1 << 2;
static const uint8_t flag_started = 1 << 3;
static const uint8_t flag_sub_op_sent = 1 << 4;
static const uint8_t flag_commit_sent = 1 << 5;
OpRequest(Message *req, OpTracker *tracker);
protected:
void _dump_op_descriptor_unlocked(std::ostream& stream) const override;
void _unregistered() override;
bool filter_out(const std::set<std::string>& filters) override;
public:
~OpRequest() override {
request->put();
}
bool check_send_map = true; ///< true until we check if sender needs a map
epoch_t sent_epoch = 0; ///< client's map epoch
epoch_t min_epoch = 0; ///< min epoch needed to handle this msg
bool hitset_inserted;
jspan osd_parent_span;
template<class T>
const T* get_req() const { return static_cast<const T*>(request); }
const Message *get_req() const { return request; }
Message *get_nonconst_req() { return request; }
entity_name_t get_source() {
if (request) {
return request->get_source();
} else {
return entity_name_t();
}
}
uint8_t state_flag() const {
return latest_flag_point;
}
std::string_view state_string() const override {
switch(latest_flag_point) {
case flag_queued_for_pg: return "queued for pg";
case flag_reached_pg: return "reached pg";
case flag_delayed: return last_event_detail;
case flag_started: return "started";
case flag_sub_op_sent: return "waiting for sub ops";
case flag_commit_sent: return "commit sent; apply or cleanup";
default: break;
}
return "no flag points reached";
}
static std::string get_state_string(uint8_t flag) {
std::string flag_point;
switch(flag) {
case flag_queued_for_pg:
flag_point = "queued for pg";
break;
case flag_reached_pg:
flag_point = "reached pg";
break;
case flag_delayed:
flag_point = "delayed";
break;
case flag_started:
flag_point = "started";
break;
case flag_sub_op_sent:
flag_point = "waiting for sub ops";
break;
case flag_commit_sent:
flag_point = "commit sent; apply or cleanup";
break;
}
return flag_point;
}
void mark_queued_for_pg() {
mark_flag_point(flag_queued_for_pg, "queued_for_pg");
}
void mark_reached_pg() {
mark_flag_point(flag_reached_pg, "reached_pg");
}
void mark_delayed(const char* s) {
mark_flag_point(flag_delayed, s);
}
void mark_started() {
mark_flag_point(flag_started, "started");
}
void mark_sub_op_sent(const std::string& s) {
mark_flag_point_string(flag_sub_op_sent, s);
}
void mark_commit_sent() {
mark_flag_point(flag_commit_sent, "commit_sent");
}
utime_t get_dequeued_time() const {
return dequeued_time;
}
void set_dequeued_time(utime_t deq_time) {
dequeued_time = deq_time;
}
osd_reqid_t get_reqid() const {
return reqid;
}
typedef boost::intrusive_ptr<OpRequest> Ref;
private:
void mark_flag_point(uint8_t flag, const char *s);
void mark_flag_point_string(uint8_t flag, const std::string& s);
};
typedef OpRequest::Ref OpRequestRef;
#endif /* OPREQUEST_H_ */
| 5,812 | 28.810256 | 82 | h |
null | ceph-main/src/osd/PGBackend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013,2014 Inktank Storage, Inc.
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef PGBACKEND_H
#define PGBACKEND_H
#include "osd_types.h"
#include "common/WorkQueue.h"
#include "include/Context.h"
#include "os/ObjectStore.h"
#include "common/LogClient.h"
#include <string>
#include "PGTransaction.h"
#include "common/ostream_temp.h"
namespace Scrub {
class Store;
}
struct shard_info_wrapper;
struct inconsistent_obj_wrapper;
//forward declaration
class OSDMap;
class PGLog;
typedef std::shared_ptr<const OSDMap> OSDMapRef;
/**
* PGBackend
*
* PGBackend defines an interface for logic handling IO and
* replication on RADOS objects. The PGBackend implementation
* is responsible for:
*
* 1) Handling client operations
* 2) Handling object recovery
* 3) Handling object access
* 4) Handling scrub, deep-scrub, repair
*/
class PGBackend {
public:
CephContext* cct;
protected:
ObjectStore *store;
const coll_t coll;
ObjectStore::CollectionHandle &ch;
public:
/**
* Provides interfaces for PGBackend callbacks
*
* The intention is that the parent calls into the PGBackend
* implementation holding a lock and that the callbacks are
* called under the same locks.
*/
class Listener {
public:
/// Debugging
virtual DoutPrefixProvider *get_dpp() = 0;
/// Recovery
/**
* Called with the transaction recovering oid
*/
virtual void on_local_recover(
const hobject_t &oid,
const ObjectRecoveryInfo &recovery_info,
ObjectContextRef obc,
bool is_delete,
ObjectStore::Transaction *t
) = 0;
/**
* Called when transaction recovering oid is durable and
* applied on all replicas
*/
virtual void on_global_recover(
const hobject_t &oid,
const object_stat_sum_t &stat_diff,
bool is_delete
) = 0;
/**
* Called when peer is recovered
*/
virtual void on_peer_recover(
pg_shard_t peer,
const hobject_t &oid,
const ObjectRecoveryInfo &recovery_info
) = 0;
virtual void begin_peer_recover(
pg_shard_t peer,
const hobject_t oid) = 0;
virtual void apply_stats(
const hobject_t &soid,
const object_stat_sum_t &delta_stats) = 0;
/**
* Called when a read from a std::set of replicas/primary fails
*/
virtual void on_failed_pull(
const std::set<pg_shard_t> &from,
const hobject_t &soid,
const eversion_t &v
) = 0;
/**
* Called when a pull on soid cannot be completed due to
* down peers
*/
virtual void cancel_pull(
const hobject_t &soid) = 0;
/**
* Called to remove an object.
*/
virtual void remove_missing_object(
const hobject_t &oid,
eversion_t v,
Context *on_complete) = 0;
/**
* Bless a context
*
* Wraps a context in whatever outer layers the parent usually
* uses to call into the PGBackend
*/
virtual Context *bless_context(Context *c) = 0;
virtual GenContext<ThreadPool::TPHandle&> *bless_gencontext(
GenContext<ThreadPool::TPHandle&> *c) = 0;
virtual GenContext<ThreadPool::TPHandle&> *bless_unlocked_gencontext(
GenContext<ThreadPool::TPHandle&> *c) = 0;
virtual void send_message(int to_osd, Message *m) = 0;
virtual void queue_transaction(
ObjectStore::Transaction&& t,
OpRequestRef op = OpRequestRef()
) = 0;
virtual void queue_transactions(
std::vector<ObjectStore::Transaction>& tls,
OpRequestRef op = OpRequestRef()
) = 0;
virtual epoch_t get_interval_start_epoch() const = 0;
virtual epoch_t get_last_peering_reset_epoch() const = 0;
virtual const std::set<pg_shard_t> &get_acting_recovery_backfill_shards() const = 0;
virtual const std::set<pg_shard_t> &get_acting_shards() const = 0;
virtual const std::set<pg_shard_t> &get_backfill_shards() const = 0;
virtual std::ostream& gen_dbg_prefix(std::ostream& out) const = 0;
virtual const std::map<hobject_t, std::set<pg_shard_t>> &get_missing_loc_shards()
const = 0;
virtual const pg_missing_tracker_t &get_local_missing() const = 0;
virtual void add_local_next_event(const pg_log_entry_t& e) = 0;
virtual const std::map<pg_shard_t, pg_missing_t> &get_shard_missing()
const = 0;
virtual const pg_missing_const_i * maybe_get_shard_missing(
pg_shard_t peer) const {
if (peer == primary_shard()) {
return &get_local_missing();
} else {
std::map<pg_shard_t, pg_missing_t>::const_iterator i =
get_shard_missing().find(peer);
if (i == get_shard_missing().end()) {
return nullptr;
} else {
return &(i->second);
}
}
}
virtual const pg_missing_const_i &get_shard_missing(pg_shard_t peer) const {
auto m = maybe_get_shard_missing(peer);
ceph_assert(m);
return *m;
}
virtual const std::map<pg_shard_t, pg_info_t> &get_shard_info() const = 0;
virtual const pg_info_t &get_shard_info(pg_shard_t peer) const {
if (peer == primary_shard()) {
return get_info();
} else {
std::map<pg_shard_t, pg_info_t>::const_iterator i =
get_shard_info().find(peer);
ceph_assert(i != get_shard_info().end());
return i->second;
}
}
virtual const PGLog &get_log() const = 0;
virtual bool pgb_is_primary() const = 0;
virtual const OSDMapRef& pgb_get_osdmap() const = 0;
virtual epoch_t pgb_get_osdmap_epoch() const = 0;
virtual const pg_info_t &get_info() const = 0;
virtual const pg_pool_t &get_pool() const = 0;
virtual ObjectContextRef get_obc(
const hobject_t &hoid,
const std::map<std::string, ceph::buffer::list, std::less<>> &attrs) = 0;
virtual bool try_lock_for_read(
const hobject_t &hoid,
ObcLockManager &manager) = 0;
virtual void release_locks(ObcLockManager &manager) = 0;
virtual void op_applied(
const eversion_t &applied_version) = 0;
virtual bool should_send_op(
pg_shard_t peer,
const hobject_t &hoid) = 0;
virtual bool pg_is_undersized() const = 0;
virtual bool pg_is_repair() const = 0;
virtual void log_operation(
std::vector<pg_log_entry_t>&& logv,
const std::optional<pg_hit_set_history_t> &hset_history,
const eversion_t &trim_to,
const eversion_t &roll_forward_to,
const eversion_t &min_last_complete_ondisk,
bool transaction_applied,
ObjectStore::Transaction &t,
bool async = false) = 0;
virtual void pgb_set_object_snap_mapping(
const hobject_t &soid,
const std::set<snapid_t> &snaps,
ObjectStore::Transaction *t) = 0;
virtual void pgb_clear_object_snap_mapping(
const hobject_t &soid,
ObjectStore::Transaction *t) = 0;
virtual void update_peer_last_complete_ondisk(
pg_shard_t fromosd,
eversion_t lcod) = 0;
virtual void update_last_complete_ondisk(
eversion_t lcod) = 0;
virtual void update_stats(
const pg_stat_t &stat) = 0;
virtual void schedule_recovery_work(
GenContext<ThreadPool::TPHandle&> *c,
uint64_t cost) = 0;
virtual pg_shard_t whoami_shard() const = 0;
int whoami() const {
return whoami_shard().osd;
}
spg_t whoami_spg_t() const {
return get_info().pgid;
}
virtual spg_t primary_spg_t() const = 0;
virtual pg_shard_t primary_shard() const = 0;
virtual uint64_t min_peer_features() const = 0;
virtual uint64_t min_upacting_features() const = 0;
virtual hobject_t get_temp_recovery_object(const hobject_t& target,
eversion_t version) = 0;
virtual void send_message_osd_cluster(
int peer, Message *m, epoch_t from_epoch) = 0;
virtual void send_message_osd_cluster(
std::vector<std::pair<int, Message*>>& messages, epoch_t from_epoch) = 0;
virtual void send_message_osd_cluster(
MessageRef, Connection *con) = 0;
virtual void send_message_osd_cluster(
Message *m, const ConnectionRef& con) = 0;
virtual ConnectionRef get_con_osd_cluster(int peer, epoch_t from_epoch) = 0;
virtual entity_name_t get_cluster_msgr_name() = 0;
virtual PerfCounters *get_logger() = 0;
virtual ceph_tid_t get_tid() = 0;
virtual OstreamTemp clog_error() = 0;
virtual OstreamTemp clog_warn() = 0;
virtual bool check_failsafe_full() = 0;
virtual void inc_osd_stat_repaired() = 0;
virtual bool pg_is_remote_backfilling() = 0;
virtual void pg_add_local_num_bytes(int64_t num_bytes) = 0;
virtual void pg_sub_local_num_bytes(int64_t num_bytes) = 0;
virtual void pg_add_num_bytes(int64_t num_bytes) = 0;
virtual void pg_sub_num_bytes(int64_t num_bytes) = 0;
virtual bool maybe_preempt_replica_scrub(const hobject_t& oid) = 0;
virtual ~Listener() {}
};
Listener *parent;
Listener *get_parent() const { return parent; }
PGBackend(CephContext* cct, Listener *l, ObjectStore *store, const coll_t &coll,
ObjectStore::CollectionHandle &ch) :
cct(cct),
store(store),
coll(coll),
ch(ch),
parent(l) {}
bool is_primary() const { return get_parent()->pgb_is_primary(); }
const OSDMapRef& get_osdmap() const { return get_parent()->pgb_get_osdmap(); }
epoch_t get_osdmap_epoch() const { return get_parent()->pgb_get_osdmap_epoch(); }
const pg_info_t &get_info() { return get_parent()->get_info(); }
std::ostream& gen_prefix(std::ostream& out) const {
return parent->gen_dbg_prefix(out);
}
/**
* RecoveryHandle
*
* We may want to recover multiple objects in the same std::set of
* messages. RecoveryHandle is an interface for the opaque
* object used by the implementation to store the details of
* the pending recovery operations.
*/
struct RecoveryHandle {
bool cache_dont_need;
std::map<pg_shard_t, std::vector<std::pair<hobject_t, eversion_t> > > deletes;
RecoveryHandle(): cache_dont_need(false) {}
virtual ~RecoveryHandle() {}
};
/// Get a fresh recovery operation
virtual RecoveryHandle *open_recovery_op() = 0;
/// run_recovery_op: finish the operation represented by h
virtual void run_recovery_op(
RecoveryHandle *h, ///< [in] op to finish
int priority ///< [in] msg priority
) = 0;
void recover_delete_object(const hobject_t &oid, eversion_t v,
RecoveryHandle *h);
void send_recovery_deletes(int prio,
const std::map<pg_shard_t, std::vector<std::pair<hobject_t, eversion_t> > > &deletes);
/**
* recover_object
*
* Triggers a recovery operation on the specified hobject_t
* onreadable must be called before onwriteable
*
* On each replica (primary included), get_parent()->on_not_missing()
* must be called when the transaction finalizing the recovery
* is queued. Similarly, get_parent()->on_readable() must be called
* when the transaction is applied in the backing store.
*
* get_parent()->on_not_degraded() should be called on the primary
* when writes can resume on the object.
*
* obc may be NULL if the primary lacks the object.
*
* head may be NULL only if the head/snapdir is missing
*
* @param missing [in] std::set of info, missing pairs for queried nodes
* @param overlaps [in] mapping of object to file offset overlaps
*/
virtual int recover_object(
const hobject_t &hoid, ///< [in] object to recover
eversion_t v, ///< [in] version to recover
ObjectContextRef head, ///< [in] context of the head/snapdir object
ObjectContextRef obc, ///< [in] context of the object
RecoveryHandle *h ///< [in,out] handle to attach recovery op to
) = 0;
/**
* true if PGBackend can handle this message while inactive
*
* If it returns true, handle_message *must* also return true
*/
virtual bool can_handle_while_inactive(OpRequestRef op) = 0;
/// gives PGBackend a crack at an incoming message
bool handle_message(
OpRequestRef op ///< [in] message received
); ///< @return true if the message was handled
/// the variant of handle_message that is overridden by child classes
virtual bool _handle_message(OpRequestRef op) = 0;
virtual void check_recovery_sources(const OSDMapRef& osdmap) = 0;
/**
* clean up any temporary on-disk state due to a pg interval change
*/
void on_change_cleanup(ObjectStore::Transaction *t);
/**
* implementation should clear itself, contexts blessed prior to on_change
* won't be called after on_change()
*/
virtual void on_change() = 0;
virtual void clear_recovery_state() = 0;
virtual IsPGRecoverablePredicate *get_is_recoverable_predicate() const = 0;
virtual IsPGReadablePredicate *get_is_readable_predicate() const = 0;
virtual int get_ec_data_chunk_count() const { return 0; };
virtual int get_ec_stripe_chunk_size() const { return 0; };
virtual void dump_recovery_info(ceph::Formatter *f) const = 0;
private:
std::set<hobject_t> temp_contents;
public:
// Track contents of temp collection, clear on reset
void add_temp_obj(const hobject_t &oid) {
temp_contents.insert(oid);
}
void add_temp_objs(const std::set<hobject_t> &oids) {
temp_contents.insert(oids.begin(), oids.end());
}
void clear_temp_obj(const hobject_t &oid) {
temp_contents.erase(oid);
}
void clear_temp_objs(const std::set<hobject_t> &oids) {
for (std::set<hobject_t>::const_iterator i = oids.begin();
i != oids.end();
++i) {
temp_contents.erase(*i);
}
}
virtual ~PGBackend() {}
/// execute implementation specific transaction
virtual void submit_transaction(
const hobject_t &hoid, ///< [in] object
const object_stat_sum_t &delta_stats,///< [in] stat change
const eversion_t &at_version, ///< [in] version
PGTransactionUPtr &&t, ///< [in] trans to execute (move)
const eversion_t &trim_to, ///< [in] trim log to here
const eversion_t &min_last_complete_ondisk, ///< [in] lower bound on
/// committed version
std::vector<pg_log_entry_t>&& log_entries, ///< [in] log entries for t
/// [in] hitset history (if updated with this transaction)
std::optional<pg_hit_set_history_t> &hset_history,
Context *on_all_commit, ///< [in] called when all commit
ceph_tid_t tid, ///< [in] tid
osd_reqid_t reqid, ///< [in] reqid
OpRequestRef op ///< [in] op
) = 0;
/// submit callback to be called in order with pending writes
virtual void call_write_ordered(std::function<void(void)> &&cb) = 0;
void try_stash(
const hobject_t &hoid,
version_t v,
ObjectStore::Transaction *t);
void rollback(
const pg_log_entry_t &entry,
ObjectStore::Transaction *t);
friend class LRBTrimmer;
void rollforward(
const pg_log_entry_t &entry,
ObjectStore::Transaction *t);
void trim(
const pg_log_entry_t &entry,
ObjectStore::Transaction *t);
void remove(
const hobject_t &hoid,
ObjectStore::Transaction *t);
protected:
void handle_recovery_delete(OpRequestRef op);
void handle_recovery_delete_reply(OpRequestRef op);
/// Reapply old attributes
void rollback_setattrs(
const hobject_t &hoid,
std::map<std::string, std::optional<ceph::buffer::list> > &old_attrs,
ObjectStore::Transaction *t);
/// Truncate object to rollback append
virtual void rollback_append(
const hobject_t &hoid,
uint64_t old_size,
ObjectStore::Transaction *t);
/// Unstash object to rollback stash
void rollback_stash(
const hobject_t &hoid,
version_t old_version,
ObjectStore::Transaction *t);
/// Unstash object to rollback stash
void rollback_try_stash(
const hobject_t &hoid,
version_t old_version,
ObjectStore::Transaction *t);
/// Delete object to rollback create
void rollback_create(
const hobject_t &hoid,
ObjectStore::Transaction *t) {
remove(hoid, t);
}
/// Clone the extents back into place
void rollback_extents(
version_t gen,
const std::vector<std::pair<uint64_t, uint64_t> > &extents,
const hobject_t &hoid,
ObjectStore::Transaction *t);
public:
/// Trim object stashed at version
void trim_rollback_object(
const hobject_t &hoid,
version_t gen,
ObjectStore::Transaction *t);
/// Std::list objects in collection
int objects_list_partial(
const hobject_t &begin,
int min,
int max,
std::vector<hobject_t> *ls,
hobject_t *next);
int objects_list_range(
const hobject_t &start,
const hobject_t &end,
std::vector<hobject_t> *ls,
std::vector<ghobject_t> *gen_obs=0);
int objects_get_attr(
const hobject_t &hoid,
const std::string &attr,
ceph::buffer::list *out);
virtual int objects_get_attrs(
const hobject_t &hoid,
std::map<std::string, ceph::buffer::list, std::less<>> *out);
virtual int objects_read_sync(
const hobject_t &hoid,
uint64_t off,
uint64_t len,
uint32_t op_flags,
ceph::buffer::list *bl) = 0;
virtual int objects_readv_sync(
const hobject_t &hoid,
std::map<uint64_t, uint64_t>&& m,
uint32_t op_flags,
ceph::buffer::list *bl) {
return -EOPNOTSUPP;
}
virtual void objects_read_async(
const hobject_t &hoid,
const std::list<std::pair<boost::tuple<uint64_t, uint64_t, uint32_t>,
std::pair<ceph::buffer::list*, Context*> > > &to_read,
Context *on_complete, bool fast_read = false) = 0;
virtual bool auto_repair_supported() const = 0;
int be_scan_list(
ScrubMap &map,
ScrubMapBuilder &pos);
virtual uint64_t be_get_ondisk_size(
uint64_t logical_size) const = 0;
virtual int be_deep_scrub(
const hobject_t &oid,
ScrubMap &map,
ScrubMapBuilder &pos,
ScrubMap::object &o) = 0;
static PGBackend *build_pg_backend(
const pg_pool_t &pool,
const std::map<std::string,std::string>& profile,
Listener *l,
coll_t coll,
ObjectStore::CollectionHandle &ch,
ObjectStore *store,
CephContext *cct);
};
#endif
| 19,053 | 30.338816 | 95 | h |
null | ceph-main/src/osd/PGPeeringEvent.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/statechart/event.hpp>
#include "osd/osd_types.h"
class MOSDPGLog;
/// what we need to instantiate a pg
struct PGCreateInfo {
spg_t pgid;
epoch_t epoch = 0;
pg_history_t history;
PastIntervals past_intervals;
bool by_mon;
PGCreateInfo(spg_t p, epoch_t e,
const pg_history_t& h,
const PastIntervals& pi,
bool mon)
: pgid(p), epoch(e), history(h), past_intervals(pi), by_mon(mon) {}
};
class PGPeeringEvent {
epoch_t epoch_sent;
epoch_t epoch_requested;
std::string desc;
public:
boost::intrusive_ptr< const boost::statechart::event_base > evt;
bool requires_pg;
std::unique_ptr<PGCreateInfo> create_info;
MEMPOOL_CLASS_HELPERS();
template <class T>
PGPeeringEvent(
epoch_t epoch_sent,
epoch_t epoch_requested,
const T &evt_,
bool req = true,
PGCreateInfo *ci = 0)
: epoch_sent(epoch_sent),
epoch_requested(epoch_requested),
evt(evt_.intrusive_from_this()),
requires_pg(req),
create_info(ci) {
std::stringstream out;
out << "epoch_sent: " << epoch_sent
<< " epoch_requested: " << epoch_requested << " ";
evt_.print(&out);
if (create_info) {
out << " +create_info";
}
desc = out.str();
}
epoch_t get_epoch_sent() const {
return epoch_sent;
}
epoch_t get_epoch_requested() const {
return epoch_requested;
}
const boost::statechart::event_base &get_event() const {
return *evt;
}
const std::string& get_desc() const {
return desc;
}
};
typedef std::shared_ptr<PGPeeringEvent> PGPeeringEventRef;
typedef std::unique_ptr<PGPeeringEvent> PGPeeringEventURef;
struct MInfoRec : boost::statechart::event< MInfoRec > {
pg_shard_t from;
pg_info_t info;
epoch_t msg_epoch;
std::optional<pg_lease_t> lease;
std::optional<pg_lease_ack_t> lease_ack;
MInfoRec(pg_shard_t from, const pg_info_t &info, epoch_t msg_epoch,
std::optional<pg_lease_t> l = {},
std::optional<pg_lease_ack_t> la = {})
: from(from), info(info), msg_epoch(msg_epoch),
lease(l), lease_ack(la) {}
void print(std::ostream *out) const {
*out << "MInfoRec from " << from << " info: " << info;
if (lease) {
*out << " " << *lease;
}
if (lease_ack) {
*out << " " << *lease_ack;
}
}
};
struct MLogRec : boost::statechart::event< MLogRec > {
pg_shard_t from;
boost::intrusive_ptr<MOSDPGLog> msg;
MLogRec(pg_shard_t from, MOSDPGLog *msg);
void print(std::ostream *out) const;
};
struct MNotifyRec : boost::statechart::event< MNotifyRec > {
spg_t pgid;
pg_shard_t from;
pg_notify_t notify;
uint64_t features;
MNotifyRec(spg_t p, pg_shard_t from, const pg_notify_t ¬ify, uint64_t f)
: pgid(p), from(from), notify(notify), features(f) {}
void print(std::ostream *out) const {
*out << "MNotifyRec " << pgid << " from " << from << " notify: " << notify
<< " features: 0x" << std::hex << features << std::dec;
}
};
struct MQuery : boost::statechart::event< MQuery > {
spg_t pgid;
pg_shard_t from;
pg_query_t query;
epoch_t query_epoch;
MQuery(spg_t p, pg_shard_t from, const pg_query_t &query, epoch_t query_epoch)
: pgid(p), from(from), query(query), query_epoch(query_epoch) {}
void print(std::ostream *out) const {
*out << "MQuery " << pgid << " from " << from
<< " query_epoch " << query_epoch
<< " query: " << query;
}
};
struct MTrim : boost::statechart::event<MTrim> {
epoch_t epoch;
int from;
shard_id_t shard;
eversion_t trim_to;
MTrim(epoch_t epoch, int from, shard_id_t shard, eversion_t trim_to)
: epoch(epoch), from(from), shard(shard), trim_to(trim_to) {}
void print(std::ostream *out) const {
*out << "MTrim epoch " << epoch << " from " << from << " shard " << shard
<< " trim_to " << trim_to;
}
};
struct MLease : boost::statechart::event<MLease> {
epoch_t epoch;
int from;
pg_lease_t lease;
MLease(epoch_t epoch, int from, pg_lease_t l)
: epoch(epoch), from(from), lease(l) {}
void print(std::ostream *out) const {
*out << "MLease epoch " << epoch << " from osd." << from << " " << lease;
}
};
struct MLeaseAck : boost::statechart::event<MLeaseAck> {
epoch_t epoch;
int from;
pg_lease_ack_t lease_ack;
MLeaseAck(epoch_t epoch, int from, pg_lease_ack_t l)
: epoch(epoch), from(from), lease_ack(l) {}
void print(std::ostream *out) const {
*out << "MLeaseAck epoch " << epoch << " from osd." << from
<< " " << lease_ack;
}
};
struct RequestBackfillPrio : boost::statechart::event< RequestBackfillPrio > {
unsigned priority;
int64_t primary_num_bytes;
int64_t local_num_bytes;
explicit RequestBackfillPrio(unsigned prio, int64_t pbytes, int64_t lbytes) :
boost::statechart::event< RequestBackfillPrio >(),
priority(prio), primary_num_bytes(pbytes), local_num_bytes(lbytes) {}
void print(std::ostream *out) const {
*out << "RequestBackfillPrio: priority " << priority
<< " primary bytes " << primary_num_bytes
<< " local bytes " << local_num_bytes;
}
};
struct RequestRecoveryPrio : boost::statechart::event< RequestRecoveryPrio > {
unsigned priority;
explicit RequestRecoveryPrio(unsigned prio) :
boost::statechart::event< RequestRecoveryPrio >(),
priority(prio) {}
void print(std::ostream *out) const {
*out << "RequestRecoveryPrio: priority " << priority;
}
};
#define TrivialEvent(T) struct T : boost::statechart::event< T > { \
T() : boost::statechart::event< T >() {} \
void print(std::ostream *out) const { \
*out << #T; \
} \
};
TrivialEvent(NullEvt)
TrivialEvent(RemoteBackfillReserved)
TrivialEvent(RemoteReservationRejectedTooFull)
TrivialEvent(RemoteReservationRevokedTooFull)
TrivialEvent(RemoteReservationRevoked)
TrivialEvent(RemoteReservationCanceled)
TrivialEvent(RemoteRecoveryReserved)
TrivialEvent(RecoveryDone)
struct DeferRecovery : boost::statechart::event<DeferRecovery> {
float delay;
explicit DeferRecovery(float delay) : delay(delay) {}
void print(std::ostream *out) const {
*out << "DeferRecovery: delay " << delay;
}
};
struct DeferBackfill : boost::statechart::event<DeferBackfill> {
float delay;
explicit DeferBackfill(float delay) : delay(delay) {}
void print(std::ostream *out) const {
*out << "DeferBackfill: delay " << delay;
}
};
TrivialEvent(RenewLease)
| 6,484 | 28.343891 | 80 | h |
null | ceph-main/src/osd/PGStateUtils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/utime.h"
#include "common/Formatter.h"
#include <stack>
#include <vector>
#include <boost/circular_buffer.hpp>
class PGStateHistory;
struct EpochSource {
virtual epoch_t get_osdmap_epoch() const = 0;
virtual ~EpochSource() {}
};
struct NamedState {
PGStateHistory *pgsh;
const char *state_name;
utime_t enter_time;
const char *get_state_name() { return state_name; }
NamedState(
PGStateHistory *pgsh,
const char *state_name_);
virtual ~NamedState();
};
using state_history_entry = std::tuple<utime_t, utime_t, const char*>;
using embedded_state = std::pair<utime_t, const char*>;
struct PGStateInstance {
// Time spent in pg states
void setepoch(const epoch_t current_epoch) {
this_epoch = current_epoch;
}
void enter_state(const utime_t entime, const char* state) {
embedded_states.push(std::make_pair(entime, state));
}
void exit_state(const utime_t extime) {
embedded_state this_state = embedded_states.top();
state_history.push_back(state_history_entry{
this_state.first, extime, this_state.second});
embedded_states.pop();
}
bool empty() const {
return embedded_states.empty();
}
epoch_t this_epoch;
std::vector<state_history_entry> state_history;
std::stack<embedded_state> embedded_states;
};
class PGStateHistory {
public:
PGStateHistory(const EpochSource &es) : buffer(10), es(es) {}
void enter(const utime_t entime, const char* state);
void exit(const char* state);
void reset() {
buffer.push_back(std::move(pi));
pi = nullptr;
}
void dump(ceph::Formatter* f) const;
const char *get_current_state() const {
if (pi == nullptr) return "unknown";
return std::get<1>(pi->embedded_states.top());
}
private:
std::unique_ptr<PGStateInstance> pi;
boost::circular_buffer<std::unique_ptr<PGStateInstance>> buffer;
const EpochSource &es;
};
| 2,007 | 22.348837 | 70 | h |
null | ceph-main/src/osd/PGTransaction.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef PGTRANSACTION_H
#define PGTRANSACTION_H
#include <map>
#include <memory>
#include <optional>
#include "common/hobject.h"
#include "osd/osd_types.h"
#include "osd/osd_internal_types.h"
#include "common/interval_map.h"
#include "common/inline_variant.h"
/**
* This class represents transactions which can be submitted to
* a PGBackend. For expediency, there are some constraints on
* the operations submitted:
* 1) Rename sources may only be referenced prior to the rename
* operation to the destination.
* 2) The graph formed by edges of source->destination for clones
* (Create) and Renames must be acyclic.
* 3) clone_range sources must not be modified by the same
* transaction
*/
class PGTransaction {
public:
std::map<hobject_t, ObjectContextRef> obc_map;
class ObjectOperation {
public:
struct Init
{
struct None {};
struct Create {};
struct Clone {
hobject_t source;
};
struct Rename {
hobject_t source; // must be temp object
};
};
using InitType = boost::variant<
Init::None,
Init::Create,
Init::Clone,
Init::Rename>;
InitType init_type = Init::None();
bool delete_first = false;
/**
* is_none() && is_delete() indicates that we are deleting an
* object which already exists and not recreating it. delete_first means
* that the transaction logically removes the object.
* There are really 4 cases:
* 1) We are modifying an existing object (is_none() &&
* !is_delete())
* a) If it's an append, we just write into the log entry the old size
* b) If it's an actual overwrite, we save the old versions of the
* extents being overwritten and write those offsets into the log
* entry
* 2) We are removing and then recreating an object (!is_none() && is_delete())
* -- stash
* 3) We are removing an object (is_none() && is_delete()) -- stash
* 4) We are creating an object (!is_none() && !is_delete()) -- create (no
* stash)
*
* Create, Clone, Rename are the three ways we can recreate it.
* ECBackend transaction planning needs this context
* to figure out how to perform the transaction.
*/
bool deletes_first() const {
return delete_first;
}
bool is_delete() const {
return boost::get<Init::None>(&init_type) != nullptr && delete_first;
}
bool is_none() const {
return boost::get<Init::None>(&init_type) != nullptr && !delete_first;
}
bool is_fresh_object() const {
return boost::get<Init::None>(&init_type) == nullptr;
}
bool is_rename() const {
return boost::get<Init::Rename>(&init_type) != nullptr;
}
bool has_source(hobject_t *source = nullptr) const {
return match(
init_type,
[&](const Init::Clone &op) -> bool {
if (source)
*source = op.source;
return true;
},
[&](const Init::Rename &op) -> bool {
if (source)
*source = op.source;
return true;
},
[&](const Init::None &) -> bool { return false; },
[&](const Init::Create &) -> bool { return false; });
}
bool clear_omap = false;
/**
* truncate
* <lowest, last> ?
*
* truncate is represented as a pair because in the event of
* multiple truncates within a single transaction we need to
* remember the lowest truncate and the final object size
* (the last truncate). We also adjust the buffers map
* to account for truncates overriding previous writes */
std::optional<std::pair<uint64_t, uint64_t> > truncate = std::nullopt;
std::map<std::string, std::optional<ceph::buffer::list> > attr_updates;
enum class OmapUpdateType {Remove, Insert, RemoveRange};
std::vector<std::pair<OmapUpdateType, ceph::buffer::list> > omap_updates;
std::optional<ceph::buffer::list> omap_header;
/// (old, new) -- only valid with no truncate or buffer updates
std::optional<std::pair<std::set<snapid_t>, std::set<snapid_t>>> updated_snaps;
struct alloc_hint_t {
uint64_t expected_object_size;
uint64_t expected_write_size;
uint32_t flags;
};
std::optional<alloc_hint_t> alloc_hint;
struct BufferUpdate {
struct Write {
ceph::buffer::list buffer;
uint32_t fadvise_flags;
};
struct Zero {
uint64_t len;
};
struct CloneRange {
hobject_t from;
uint64_t offset;
uint64_t len;
};
};
using BufferUpdateType = boost::variant<
BufferUpdate::Write,
BufferUpdate::Zero,
BufferUpdate::CloneRange>;
private:
struct SplitMerger {
BufferUpdateType split(
uint64_t offset,
uint64_t len,
const BufferUpdateType &bu) const {
return match(
bu,
[&](const BufferUpdate::Write &w) -> BufferUpdateType {
ceph::buffer::list bl;
bl.substr_of(w.buffer, offset, len);
return BufferUpdate::Write{bl, w.fadvise_flags};
},
[&](const BufferUpdate::Zero &) -> BufferUpdateType {
return BufferUpdate::Zero{len};
},
[&](const BufferUpdate::CloneRange &c) -> BufferUpdateType {
return BufferUpdate::CloneRange{c.from, c.offset + offset, len};
});
}
uint64_t length(
const BufferUpdateType &left) const {
return match(
left,
[&](const BufferUpdate::Write &w) -> uint64_t {
return w.buffer.length();
},
[&](const BufferUpdate::Zero &z) -> uint64_t {
return z.len;
},
[&](const BufferUpdate::CloneRange &c) -> uint64_t {
return c.len;
});
}
bool can_merge(
const BufferUpdateType &left,
const BufferUpdateType &right) const {
return match(
left,
[&](const BufferUpdate::Write &w) -> bool {
auto r = boost::get<BufferUpdate::Write>(&right);
return r != nullptr && (w.fadvise_flags == r->fadvise_flags);
},
[&](const BufferUpdate::Zero &) -> bool {
auto r = boost::get<BufferUpdate::Zero>(&right);
return r != nullptr;
},
[&](const BufferUpdate::CloneRange &c) -> bool {
return false;
});
}
BufferUpdateType merge(
BufferUpdateType &&left,
BufferUpdateType &&right) const {
return match(
left,
[&](const BufferUpdate::Write &w) -> BufferUpdateType {
auto r = boost::get<BufferUpdate::Write>(&right);
ceph_assert(r && w.fadvise_flags == r->fadvise_flags);
ceph::buffer::list bl = w.buffer;
bl.append(r->buffer);
return BufferUpdate::Write{bl, w.fadvise_flags};
},
[&](const BufferUpdate::Zero &z) -> BufferUpdateType {
auto r = boost::get<BufferUpdate::Zero>(&right);
ceph_assert(r);
return BufferUpdate::Zero{z.len + r->len};
},
[&](const BufferUpdate::CloneRange &c) -> BufferUpdateType {
ceph_abort_msg("violates can_merge condition");
return left;
});
}
};
public:
using buffer_update_type = interval_map<
uint64_t, BufferUpdateType, SplitMerger>;
buffer_update_type buffer_updates;
friend class PGTransaction;
};
std::map<hobject_t, ObjectOperation> op_map;
private:
ObjectOperation &get_object_op_for_modify(const hobject_t &hoid) {
auto &op = op_map[hoid];
ceph_assert(!op.is_delete());
return op;
}
ObjectOperation &get_object_op(const hobject_t &hoid) {
return op_map[hoid];
}
public:
void add_obc(
ObjectContextRef obc) {
ceph_assert(obc);
obc_map[obc->obs.oi.soid] = obc;
}
/// Sets up state for new object
void create(
const hobject_t &hoid
) {
auto &op = op_map[hoid];
ceph_assert(op.is_none() || op.is_delete());
op.init_type = ObjectOperation::Init::Create();
}
/// Sets up state for target cloned from source
void clone(
const hobject_t &target, ///< [in] obj to clone to
const hobject_t &source ///< [in] obj to clone from
) {
auto &op = op_map[target];
ceph_assert(op.is_none() || op.is_delete());
op.init_type = ObjectOperation::Init::Clone{source};
}
/// Sets up state for target renamed from source
void rename(
const hobject_t &target, ///< [in] to, must not exist, be non-temp
const hobject_t &source ///< [in] source (must be a temp object)
) {
ceph_assert(source.is_temp());
ceph_assert(!target.is_temp());
auto &op = op_map[target];
ceph_assert(op.is_none() || op.is_delete());
bool del_first = op.is_delete();
auto iter = op_map.find(source);
if (iter != op_map.end()) {
op = iter->second;
op_map.erase(iter);
op.delete_first = del_first;
}
op.init_type = ObjectOperation::Init::Rename{source};
}
/// Remove -- must not be called on rename target
void remove(
const hobject_t &hoid ///< [in] obj to remove
) {
auto &op = get_object_op_for_modify(hoid);
if (!op.is_fresh_object()) {
ceph_assert(!op.updated_snaps);
op = ObjectOperation();
op.delete_first = true;
} else {
ceph_assert(!op.is_rename());
op_map.erase(hoid); // make it a noop if it's a fresh object
}
}
void update_snaps(
const hobject_t &hoid, ///< [in] object for snaps
const std::set<snapid_t> &old_snaps,///< [in] old snaps value
const std::set<snapid_t> &new_snaps ///< [in] new snaps value
) {
auto &op = get_object_op(hoid);
ceph_assert(!op.updated_snaps);
ceph_assert(op.buffer_updates.empty());
ceph_assert(!op.truncate);
op.updated_snaps = make_pair(
old_snaps,
new_snaps);
}
/// Clears, truncates
void omap_clear(
const hobject_t &hoid ///< [in] object to clear omap
) {
auto &op = get_object_op_for_modify(hoid);
op.clear_omap = true;
op.omap_updates.clear();
op.omap_header = std::nullopt;
}
void truncate(
const hobject_t &hoid, ///< [in] object
uint64_t off ///< [in] offset to truncate to
) {
auto &op = get_object_op_for_modify(hoid);
ceph_assert(!op.updated_snaps);
op.buffer_updates.erase(
off,
std::numeric_limits<uint64_t>::max() - off);
if (!op.truncate || off < op.truncate->first) {
op.truncate = std::pair<uint64_t, uint64_t>(off, off);
} else {
op.truncate->second = off;
}
}
/// Attr ops
void setattrs(
const hobject_t &hoid, ///< [in] object to write
std::map<std::string, ceph::buffer::list, std::less<>> &attrs ///< [in] attrs, may be cleared
) {
auto &op = get_object_op_for_modify(hoid);
for (auto &[key, val]: attrs) {
auto& d = op.attr_updates[key];
d = val;
d->rebuild();
}
}
void setattr(
const hobject_t &hoid, ///< [in] object to write
const std::string &attrname, ///< [in] attr to write
ceph::buffer::list &bl ///< [in] val to write, may be claimed
) {
auto &op = get_object_op_for_modify(hoid);
auto& d = op.attr_updates[attrname];
d = bl;
d->rebuild();
}
void rmattr(
const hobject_t &hoid, ///< [in] object to write
const std::string &attrname ///< [in] attr to remove
) {
auto &op = get_object_op_for_modify(hoid);
op.attr_updates[attrname] = std::nullopt;
}
/// set alloc hint
void set_alloc_hint(
const hobject_t &hoid, ///< [in] object (must exist)
uint64_t expected_object_size, ///< [in]
uint64_t expected_write_size,
uint32_t flags
) {
auto &op = get_object_op_for_modify(hoid);
op.alloc_hint = ObjectOperation::alloc_hint_t{
expected_object_size, expected_write_size, flags};
}
/// Buffer updates
void write(
const hobject_t &hoid, ///< [in] object to write
uint64_t off, ///< [in] off at which to write
uint64_t len, ///< [in] len to write from bl
ceph::buffer::list &bl, ///< [in] bl to write will be claimed to len
uint32_t fadvise_flags = 0 ///< [in] fadvise hint
) {
auto &op = get_object_op_for_modify(hoid);
ceph_assert(!op.updated_snaps);
ceph_assert(len > 0);
ceph_assert(len == bl.length());
op.buffer_updates.insert(
off,
len,
ObjectOperation::BufferUpdate::Write{bl, fadvise_flags});
}
void clone_range(
const hobject_t &from, ///< [in] from
const hobject_t &to, ///< [in] to
uint64_t fromoff, ///< [in] offset
uint64_t len, ///< [in] len
uint64_t tooff ///< [in] offset
) {
auto &op = get_object_op_for_modify(to);
ceph_assert(!op.updated_snaps);
op.buffer_updates.insert(
tooff,
len,
ObjectOperation::BufferUpdate::CloneRange{from, fromoff, len});
}
void zero(
const hobject_t &hoid, ///< [in] object
uint64_t off, ///< [in] offset to start zeroing at
uint64_t len ///< [in] amount to zero
) {
auto &op = get_object_op_for_modify(hoid);
ceph_assert(!op.updated_snaps);
op.buffer_updates.insert(
off,
len,
ObjectOperation::BufferUpdate::Zero{len});
}
/// Omap updates
void omap_setkeys(
const hobject_t &hoid, ///< [in] object to write
ceph::buffer::list &keys_bl ///< [in] encoded map<string, ceph::buffer::list>
) {
auto &op = get_object_op_for_modify(hoid);
op.omap_updates.emplace_back(
std::make_pair(
ObjectOperation::OmapUpdateType::Insert,
keys_bl));
}
void omap_setkeys(
const hobject_t &hoid, ///< [in] object to write
std::map<std::string, ceph::buffer::list> &keys ///< [in] omap keys, may be cleared
) {
using ceph::encode;
ceph::buffer::list bl;
encode(keys, bl);
omap_setkeys(hoid, bl);
}
void omap_rmkeys(
const hobject_t &hoid, ///< [in] object to write
ceph::buffer::list &keys_bl ///< [in] encode set<string>
) {
auto &op = get_object_op_for_modify(hoid);
op.omap_updates.emplace_back(
std::make_pair(
ObjectOperation::OmapUpdateType::Remove,
keys_bl));
}
void omap_rmkeys(
const hobject_t &hoid, ///< [in] object to write
std::set<std::string> &keys ///< [in] omap keys, may be cleared
) {
using ceph::encode;
ceph::buffer::list bl;
encode(keys, bl);
omap_rmkeys(hoid, bl);
}
void omap_rmkeyrange(
const hobject_t &hoid, ///< [in] object to write
ceph::buffer::list &range_bl ///< [in] encode string[2]
) {
auto &op = get_object_op_for_modify(hoid);
op.omap_updates.emplace_back(
std::make_pair(
ObjectOperation::OmapUpdateType::RemoveRange,
range_bl));
}
void omap_rmkeyrange(
const hobject_t &hoid, ///< [in] object to write
std::string& key_begin, ///< [in] first key in range
std::string& key_end ///< [in] first key past range, range is [first,last)
) {
ceph::buffer::list bl;
::encode(key_begin, bl);
::encode(key_end, bl);
omap_rmkeyrange(hoid, bl);
}
void omap_setheader(
const hobject_t &hoid, ///< [in] object to write
ceph::buffer::list &header ///< [in] header
) {
auto &op = get_object_op_for_modify(hoid);
op.omap_header = header;
}
bool empty() const {
return op_map.empty();
}
uint64_t get_bytes_written() const {
uint64_t ret = 0;
for (auto &&i: op_map) {
for (auto &&j: i.second.buffer_updates) {
ret += j.get_len();
}
}
return ret;
}
void nop(
const hobject_t &hoid ///< [in] obj to which we are doing nothing
) {
get_object_op_for_modify(hoid);
}
/* Calls t() on all pair<hobject_t, ObjectOperation> & such that clone/rename
* sinks are always called before clone sources
*
* TODO: add a fast path for the single object case and possibly the single
* object clone from source case (make_writeable made a clone).
*
* This structure only requires that the source->sink graph be acyclic.
* This is much more general than is actually required by PrimaryLogPG.
* Only 4 flavors of multi-object transactions actually happen:
* 1) rename temp -> object for copyfrom
* 2) clone head -> clone, modify head for make_writeable on normal head write
* 3) clone clone -> head for rollback
* 4) 2 + 3
*
* We can bypass the below logic for single object transactions trivially
* (including case 1 above since temp doesn't show up again).
* For 2-3, we could add something ad-hoc to ensure that they happen in the
* right order, but it actually seems easier to just do the graph construction.
*/
template <typename T>
void safe_create_traverse(T &&t) {
std::map<hobject_t, std::list<hobject_t>> dgraph;
std::list<hobject_t> stack;
// Populate stack with roots, dgraph with edges
for (auto &&opair: op_map) {
hobject_t source;
if (opair.second.has_source(&source)) {
auto &l = dgraph[source];
if (l.empty() && !op_map.count(source)) {
/* Source oids not in op_map need to be added as roots
* (but only once!) */
stack.push_back(source);
}
l.push_back(opair.first);
} else {
stack.push_back(opair.first);
}
}
/* Why don't we need to worry about accessing the same node
* twice? dgraph nodes always have in-degree at most 1 because
* the inverse graph nodes (source->dest) can have out-degree
* at most 1 (only one possible source). We do a post-order
* depth-first traversal here to ensure we call f on children
* before parents.
*/
while (!stack.empty()) {
hobject_t &cur = stack.front();
auto diter = dgraph.find(cur);
if (diter == dgraph.end()) {
/* Leaf: pop and call t() */
auto opiter = op_map.find(cur);
if (opiter != op_map.end())
t(*opiter);
stack.pop_front();
} else {
/* Internal node: push children onto stack, remove edge,
* recurse. When this node is encountered again, it'll
* be a leaf */
ceph_assert(!diter->second.empty());
stack.splice(stack.begin(), diter->second);
dgraph.erase(diter);
}
}
}
};
using PGTransactionUPtr = std::unique_ptr<PGTransaction>;
#endif
| 18,554 | 29.822259 | 97 | h |
null | ceph-main/src/osd/ReplicatedBackend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef REPBACKEND_H
#define REPBACKEND_H
#include "PGBackend.h"
struct C_ReplicatedBackend_OnPullComplete;
class ReplicatedBackend : public PGBackend {
struct RPGHandle : public PGBackend::RecoveryHandle {
std::map<pg_shard_t, std::vector<PushOp> > pushes;
std::map<pg_shard_t, std::vector<PullOp> > pulls;
};
friend struct C_ReplicatedBackend_OnPullComplete;
public:
ReplicatedBackend(
PGBackend::Listener *pg,
const coll_t &coll,
ObjectStore::CollectionHandle &ch,
ObjectStore *store,
CephContext *cct);
/// @see PGBackend::open_recovery_op
RPGHandle *_open_recovery_op() {
return new RPGHandle();
}
PGBackend::RecoveryHandle *open_recovery_op() override {
return _open_recovery_op();
}
/// @see PGBackend::run_recovery_op
void run_recovery_op(
PGBackend::RecoveryHandle *h,
int priority) override;
/// @see PGBackend::recover_object
int recover_object(
const hobject_t &hoid,
eversion_t v,
ObjectContextRef head,
ObjectContextRef obc,
RecoveryHandle *h
) override;
void check_recovery_sources(const OSDMapRef& osdmap) override;
bool can_handle_while_inactive(OpRequestRef op) override;
/// @see PGBackend::handle_message
bool _handle_message(
OpRequestRef op
) override;
void on_change() override;
void clear_recovery_state() override;
class RPCRecPred : public IsPGRecoverablePredicate {
public:
bool operator()(const std::set<pg_shard_t> &have) const override {
return !have.empty();
}
};
IsPGRecoverablePredicate *get_is_recoverable_predicate() const override {
return new RPCRecPred;
}
class RPCReadPred : public IsPGReadablePredicate {
pg_shard_t whoami;
public:
explicit RPCReadPred(pg_shard_t whoami) : whoami(whoami) {}
bool operator()(const std::set<pg_shard_t> &have) const override {
return have.count(whoami);
}
};
IsPGReadablePredicate *get_is_readable_predicate() const override {
return new RPCReadPred(get_parent()->whoami_shard());
}
void dump_recovery_info(ceph::Formatter *f) const override {
{
f->open_array_section("pull_from_peer");
for (const auto& i : pull_from_peer) {
f->open_object_section("pulling_from");
f->dump_stream("pull_from") << i.first;
{
f->open_array_section("pulls");
for (const auto& j : i.second) {
f->open_object_section("pull_info");
ceph_assert(pulling.count(j));
pulling.find(j)->second.dump(f);
f->close_section();
}
f->close_section();
}
f->close_section();
}
f->close_section();
}
{
f->open_array_section("pushing");
for(const auto& i : pushing) {
f->open_object_section("object");
f->dump_stream("pushing") << i.first;
{
f->open_array_section("pushing_to");
for (const auto& j : i.second) {
f->open_object_section("push_progress");
f->dump_stream("pushing_to") << j.first;
{
f->open_object_section("push_info");
j.second.dump(f);
f->close_section();
}
f->close_section();
}
f->close_section();
}
f->close_section();
}
f->close_section();
}
}
int objects_read_sync(
const hobject_t &hoid,
uint64_t off,
uint64_t len,
uint32_t op_flags,
ceph::buffer::list *bl) override;
int objects_readv_sync(
const hobject_t &hoid,
std::map<uint64_t, uint64_t>&& m,
uint32_t op_flags,
ceph::buffer::list *bl) override;
void objects_read_async(
const hobject_t &hoid,
const std::list<std::pair<boost::tuple<uint64_t, uint64_t, uint32_t>,
std::pair<ceph::buffer::list*, Context*> > > &to_read,
Context *on_complete,
bool fast_read = false) override;
private:
// push
struct push_info_t {
ObjectRecoveryProgress recovery_progress;
ObjectRecoveryInfo recovery_info;
ObjectContextRef obc;
object_stat_sum_t stat;
ObcLockManager lock_manager;
void dump(ceph::Formatter *f) const {
{
f->open_object_section("recovery_progress");
recovery_progress.dump(f);
f->close_section();
}
{
f->open_object_section("recovery_info");
recovery_info.dump(f);
f->close_section();
}
}
};
std::map<hobject_t, std::map<pg_shard_t, push_info_t>> pushing;
// pull
struct pull_info_t {
pg_shard_t from;
hobject_t soid;
ObjectRecoveryProgress recovery_progress;
ObjectRecoveryInfo recovery_info;
ObjectContextRef head_ctx;
ObjectContextRef obc;
object_stat_sum_t stat;
bool cache_dont_need;
ObcLockManager lock_manager;
void dump(ceph::Formatter *f) const {
{
f->open_object_section("recovery_progress");
recovery_progress.dump(f);
f->close_section();
}
{
f->open_object_section("recovery_info");
recovery_info.dump(f);
f->close_section();
}
}
bool is_complete() const {
return recovery_progress.is_complete(recovery_info);
}
};
std::map<hobject_t, pull_info_t> pulling;
// Reverse mapping from osd peer to objects being pulled from that peer
std::map<pg_shard_t, std::set<hobject_t> > pull_from_peer;
void clear_pull(
std::map<hobject_t, pull_info_t>::iterator piter,
bool clear_pull_from_peer = true);
void clear_pull_from(
std::map<hobject_t, pull_info_t>::iterator piter);
void _do_push(OpRequestRef op);
void _do_pull_response(OpRequestRef op);
void do_push(OpRequestRef op) {
if (is_primary()) {
_do_pull_response(op);
} else {
_do_push(op);
}
}
void do_pull(OpRequestRef op);
void do_push_reply(OpRequestRef op);
bool handle_push_reply(pg_shard_t peer, const PushReplyOp &op, PushOp *reply);
void handle_pull(pg_shard_t peer, PullOp &op, PushOp *reply);
struct pull_complete_info {
hobject_t hoid;
object_stat_sum_t stat;
};
bool handle_pull_response(
pg_shard_t from, const PushOp &op, PullOp *response,
std::list<pull_complete_info> *to_continue,
ObjectStore::Transaction *t);
void handle_push(pg_shard_t from, const PushOp &op, PushReplyOp *response,
ObjectStore::Transaction *t, bool is_repair);
static void trim_pushed_data(const interval_set<uint64_t> ©_subset,
const interval_set<uint64_t> &intervals_received,
ceph::buffer::list data_received,
interval_set<uint64_t> *intervals_usable,
ceph::buffer::list *data_usable);
void _failed_pull(pg_shard_t from, const hobject_t &soid);
void send_pushes(int prio, std::map<pg_shard_t, std::vector<PushOp> > &pushes);
void prep_push_op_blank(const hobject_t& soid, PushOp *op);
void send_pulls(
int priority,
std::map<pg_shard_t, std::vector<PullOp> > &pulls);
int build_push_op(const ObjectRecoveryInfo &recovery_info,
const ObjectRecoveryProgress &progress,
ObjectRecoveryProgress *out_progress,
PushOp *out_op,
object_stat_sum_t *stat = 0,
bool cache_dont_need = true);
void submit_push_data(const ObjectRecoveryInfo &recovery_info,
bool first,
bool complete,
bool clear_omap,
bool cache_dont_need,
interval_set<uint64_t> &data_zeros,
const interval_set<uint64_t> &intervals_included,
ceph::buffer::list data_included,
ceph::buffer::list omap_header,
const std::map<std::string, ceph::buffer::list, std::less<>> &attrs,
const std::map<std::string, ceph::buffer::list> &omap_entries,
ObjectStore::Transaction *t);
void submit_push_complete(const ObjectRecoveryInfo &recovery_info,
ObjectStore::Transaction *t);
void calc_clone_subsets(
SnapSet& snapset, const hobject_t& poid, const pg_missing_t& missing,
const hobject_t &last_backfill,
interval_set<uint64_t>& data_subset,
std::map<hobject_t, interval_set<uint64_t>>& clone_subsets,
ObcLockManager &lock_manager);
void prepare_pull(
eversion_t v,
const hobject_t& soid,
ObjectContextRef headctx,
RPGHandle *h);
int start_pushes(
const hobject_t &soid,
ObjectContextRef obj,
RPGHandle *h);
int prep_push_to_replica(
ObjectContextRef obc, const hobject_t& soid, pg_shard_t peer,
PushOp *pop, bool cache_dont_need = true);
int prep_push(
ObjectContextRef obc,
const hobject_t& oid, pg_shard_t dest,
PushOp *op,
bool cache_dont_need);
int prep_push(
ObjectContextRef obc,
const hobject_t& soid, pg_shard_t peer,
eversion_t version,
interval_set<uint64_t> &data_subset,
std::map<hobject_t, interval_set<uint64_t>>& clone_subsets,
PushOp *op,
bool cache,
ObcLockManager &&lock_manager);
void calc_head_subsets(
ObjectContextRef obc, SnapSet& snapset, const hobject_t& head,
const pg_missing_t& missing,
const hobject_t &last_backfill,
interval_set<uint64_t>& data_subset,
std::map<hobject_t, interval_set<uint64_t>>& clone_subsets,
ObcLockManager &lock_manager);
ObjectRecoveryInfo recalc_subsets(
const ObjectRecoveryInfo& recovery_info,
SnapSetContext *ssc,
ObcLockManager &lock_manager);
/**
* Client IO
*/
struct InProgressOp : public RefCountedObject {
ceph_tid_t tid;
std::set<pg_shard_t> waiting_for_commit;
Context *on_commit;
OpRequestRef op;
eversion_t v;
bool done() const {
return waiting_for_commit.empty();
}
private:
FRIEND_MAKE_REF(InProgressOp);
InProgressOp(ceph_tid_t tid, Context *on_commit, OpRequestRef op, eversion_t v)
:
tid(tid), on_commit(on_commit),
op(op), v(v) {}
};
std::map<ceph_tid_t, ceph::ref_t<InProgressOp>> in_progress_ops;
public:
friend class C_OSD_OnOpCommit;
void call_write_ordered(std::function<void(void)> &&cb) override {
// ReplicatedBackend submits writes inline in submit_transaction, so
// we can just call the callback.
cb();
}
void submit_transaction(
const hobject_t &hoid,
const object_stat_sum_t &delta_stats,
const eversion_t &at_version,
PGTransactionUPtr &&t,
const eversion_t &trim_to,
const eversion_t &min_last_complete_ondisk,
std::vector<pg_log_entry_t>&& log_entries,
std::optional<pg_hit_set_history_t> &hset_history,
Context *on_all_commit,
ceph_tid_t tid,
osd_reqid_t reqid,
OpRequestRef op
) override;
private:
Message * generate_subop(
const hobject_t &soid,
const eversion_t &at_version,
ceph_tid_t tid,
osd_reqid_t reqid,
eversion_t pg_trim_to,
eversion_t min_last_complete_ondisk,
hobject_t new_temp_oid,
hobject_t discard_temp_oid,
const ceph::buffer::list &log_entries,
std::optional<pg_hit_set_history_t> &hset_history,
ObjectStore::Transaction &op_t,
pg_shard_t peer,
const pg_info_t &pinfo);
void issue_op(
const hobject_t &soid,
const eversion_t &at_version,
ceph_tid_t tid,
osd_reqid_t reqid,
eversion_t pg_trim_to,
eversion_t min_last_complete_ondisk,
hobject_t new_temp_oid,
hobject_t discard_temp_oid,
const std::vector<pg_log_entry_t> &log_entries,
std::optional<pg_hit_set_history_t> &hset_history,
InProgressOp *op,
ObjectStore::Transaction &op_t);
void op_commit(const ceph::ref_t<InProgressOp>& op);
void do_repop_reply(OpRequestRef op);
void do_repop(OpRequestRef op);
struct RepModify {
OpRequestRef op;
bool committed;
int ackerosd;
eversion_t last_complete;
epoch_t epoch_started;
ObjectStore::Transaction opt, localt;
RepModify() : committed(false), ackerosd(-1),
epoch_started(0) {}
};
typedef std::shared_ptr<RepModify> RepModifyRef;
struct C_OSD_RepModifyCommit;
void repop_commit(RepModifyRef rm);
bool auto_repair_supported() const override { return store->has_builtin_csum(); }
int be_deep_scrub(
const hobject_t &poid,
ScrubMap &map,
ScrubMapBuilder &pos,
ScrubMap::object &o) override;
uint64_t be_get_ondisk_size(uint64_t logical_size) const final {
return logical_size;
}
};
#endif
| 12,369 | 27.634259 | 83 | h |
null | ceph-main/src/osd/Session.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_OSD_SESSION_H
#define CEPH_OSD_SESSION_H
#include "common/RefCountedObj.h"
#include "common/ceph_mutex.h"
#include "global/global_context.h"
#include "include/spinlock.h"
#include "OSDCap.h"
#include "Watch.h"
#include "OSDMap.h"
#include "PeeringState.h"
//#define PG_DEBUG_REFS
class PG;
#ifdef PG_DEBUG_REFS
#include "common/tracked_int_ptr.hpp"
typedef TrackedIntPtr<PG> PGRef;
#else
typedef boost::intrusive_ptr<PG> PGRef;
#endif
/*
* A Backoff represents one instance of either a PG or an OID
* being plugged at the client. It's refcounted and linked from
* the PG {pg_oid}_backoffs map and from the client Session
* object.
*
* The Backoff has a lock that protects it's internal fields.
*
* The PG has a backoff_lock that protects it's maps to Backoffs.
* This lock is *inside* of Backoff::lock.
*
* The Session has a backoff_lock that protects it's map of pg and
* oid backoffs. This lock is *inside* the Backoff::lock *and*
* PG::backoff_lock.
*
* That's
*
* Backoff::lock
* PG::backoff_lock
* Session::backoff_lock
*
* When the Session goes away, we move our backoff lists aside,
* then we lock each of the Backoffs we
* previously referenced and clear the Session* pointer. If the PG
* is still linked, we unlink it, too.
*
* When the PG clears the backoff, it will send an unblock message
* if the Session* is still non-null, and unlink the session.
*
*/
struct Backoff : public RefCountedObject {
enum {
STATE_NEW = 1, ///< backoff in flight to client
STATE_ACKED = 2, ///< backoff acked
STATE_DELETING = 3 ///< backoff deleted, but un-acked
};
std::atomic<int> state = {STATE_NEW};
spg_t pgid; ///< owning pgid
uint64_t id = 0; ///< unique id (within the Session)
bool is_new() const {
return state.load() == STATE_NEW;
}
bool is_acked() const {
return state.load() == STATE_ACKED;
}
bool is_deleting() const {
return state.load() == STATE_DELETING;
}
const char *get_state_name() const {
switch (state.load()) {
case STATE_NEW: return "new";
case STATE_ACKED: return "acked";
case STATE_DELETING: return "deleting";
default: return "???";
}
}
ceph::mutex lock = ceph::make_mutex("Backoff::lock");
// NOTE: the owning PG and session are either
// - *both* set, or
// - both null (teardown), or
// - only session is set (and state == DELETING)
PGRef pg; ///< owning pg
ceph::ref_t<struct Session> session; ///< owning session
hobject_t begin, end; ///< [) range to block, unless ==, then single obj
friend ostream& operator<<(ostream& out, const Backoff& b) {
return out << "Backoff(" << &b << " " << b.pgid << " " << b.id
<< " " << b.get_state_name()
<< " [" << b.begin << "," << b.end << ") "
<< " session " << b.session
<< " pg " << b.pg << ")";
}
private:
FRIEND_MAKE_REF(Backoff);
Backoff(spg_t pgid, PGRef pg, ceph::ref_t<Session> s,
uint64_t i,
const hobject_t& b, const hobject_t& e)
: RefCountedObject(g_ceph_context),
pgid(pgid),
id(i),
pg(pg),
session(std::move(s)),
begin(b),
end(e) {}
};
struct Session : public RefCountedObject {
EntityName entity_name;
OSDCap caps;
ConnectionRef con;
entity_addr_t socket_addr;
WatchConState wstate;
ceph::mutex session_dispatch_lock =
ceph::make_mutex("Session::session_dispatch_lock");
boost::intrusive::list<OpRequest> waiting_on_map;
ceph::spinlock projected_epoch_lock;
epoch_t projected_epoch = 0;
/// protects backoffs; orders inside Backoff::lock *and* PG::backoff_lock
ceph::mutex backoff_lock = ceph::make_mutex("Session::backoff_lock");
std::atomic<int> backoff_count= {0}; ///< simple count of backoffs
std::map<spg_t, std::map<hobject_t, std::set<ceph::ref_t<Backoff>>>> backoffs;
std::atomic<uint64_t> backoff_seq = {0};
// for heartbeat connections only
int peer = -1;
HeartbeatStampsRef stamps;
entity_addr_t& get_peer_socket_addr() {
return socket_addr;
}
void ack_backoff(
CephContext *cct,
spg_t pgid,
uint64_t id,
const hobject_t& start,
const hobject_t& end);
ceph::ref_t<Backoff> have_backoff(spg_t pgid, const hobject_t& oid) {
if (!backoff_count.load()) {
return nullptr;
}
std::lock_guard l(backoff_lock);
ceph_assert(!backoff_count == backoffs.empty());
auto i = backoffs.find(pgid);
if (i == backoffs.end()) {
return nullptr;
}
auto p = i->second.lower_bound(oid);
if (p != i->second.begin() &&
(p == i->second.end() || p->first > oid)) {
--p;
}
if (p != i->second.end()) {
int r = cmp(oid, p->first);
if (r == 0 || r > 0) {
for (auto& q : p->second) {
if (r == 0 || oid < q->end) {
return &(*q);
}
}
}
}
return nullptr;
}
bool check_backoff(
CephContext *cct, spg_t pgid, const hobject_t& oid, const Message *m);
void add_backoff(ceph::ref_t<Backoff> b) {
std::lock_guard l(backoff_lock);
ceph_assert(!backoff_count == backoffs.empty());
backoffs[b->pgid][b->begin].insert(std::move(b));
++backoff_count;
}
// called by PG::release_*_backoffs and PG::clear_backoffs()
void rm_backoff(const ceph::ref_t<Backoff>& b) {
std::lock_guard l(backoff_lock);
ceph_assert(ceph_mutex_is_locked_by_me(b->lock));
ceph_assert(b->session == this);
auto i = backoffs.find(b->pgid);
if (i != backoffs.end()) {
// may race with clear_backoffs()
auto p = i->second.find(b->begin);
if (p != i->second.end()) {
auto q = p->second.find(b);
if (q != p->second.end()) {
p->second.erase(q);
--backoff_count;
if (p->second.empty()) {
i->second.erase(p);
if (i->second.empty()) {
backoffs.erase(i);
}
}
}
}
}
ceph_assert(!backoff_count == backoffs.empty());
}
void clear_backoffs();
private:
FRIEND_MAKE_REF(Session);
explicit Session(CephContext *cct, Connection *con_) :
RefCountedObject(cct),
con(con_),
socket_addr(con_->get_peer_socket_addr()),
wstate(cct)
{}
};
#endif
| 6,592 | 26.356846 | 80 | h |
null | ceph-main/src/osd/SnapMapReaderI.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
/**
* \file
* \brief Defines the interface for the snap-mapper used by the scrubber.
*/
#include <set>
#include "common/scrub_types.h"
#include "include/expected.hpp"
namespace Scrub {
/*
* snaps-related aux structures:
* the scrub-backend scans the snaps associated with each scrubbed object, and
* fixes corrupted snap-sets.
* The actual access to the PG's snap_mapper, and the actual I/O transactions,
* are performed by the main PgScrubber object.
* the following aux structures are used to facilitate the required exchanges:
* - pre-fix snap-sets are accessed by the scrub-backend, and:
* - a list of fix-orders (either insert or replace operations) are returned
*/
struct SnapMapReaderI {
struct result_t {
enum class code_t { success, backend_error, not_found, inconsistent };
code_t code{code_t::success};
int backend_error{0}; ///< errno returned by the backend
};
/**
* get SnapMapper's snap-set for a given object
* \returns a set of snaps, or an error code
* \attn: only OBJ_ DB entries are consulted
*/
virtual tl::expected<std::set<snapid_t>, result_t> get_snaps(
const hobject_t& hoid) const = 0;
/**
* get SnapMapper's snap-set for a given object.
* The snaps gleaned from the OBJ_ entry are verified against the
* mapping ('SNA_') entries.
* A mismatch between both sets of entries will result in an error.
* \returns a set of snaps, or an error code.
*/
virtual tl::expected<std::set<snapid_t>, result_t>
get_snaps_check_consistency(const hobject_t& hoid) const = 0;
virtual ~SnapMapReaderI() = default;
};
enum class snap_mapper_op_t {
add,
update,
overwrite, //< the mapper's data is internally inconsistent. Similar
//< to an 'update' operation, but the logs are different.
};
struct snap_mapper_fix_t {
snap_mapper_op_t op;
hobject_t hoid;
std::set<snapid_t> snaps;
std::set<snapid_t> wrong_snaps; // only collected & returned for logging sake
};
} // namespace Scrub
| 2,119 | 29.724638 | 80 | h |
null | ceph-main/src/osd/SnapMapper.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef SNAPMAPPER_H
#define SNAPMAPPER_H
#include <cstring>
#include <set>
#include <string>
#include <utility>
#include "common/hobject.h"
#include "common/map_cacher.hpp"
#ifdef WITH_SEASTAR
# include "crimson/os/futurized_store.h"
# include "crimson/os/futurized_collection.h"
#endif
#include "include/buffer.h"
#include "include/encoding.h"
#include "include/object.h"
#include "os/ObjectStore.h"
#include "osd/OSDMap.h"
#include "osd/SnapMapReaderI.h"
class OSDriver : public MapCacher::StoreDriver<std::string, ceph::buffer::list> {
#ifdef WITH_SEASTAR
using ObjectStoreT = crimson::os::FuturizedStore::Shard;
using CollectionHandleT = ObjectStoreT::CollectionRef;
#else
using ObjectStoreT = ObjectStore;
using CollectionHandleT = ObjectStoreT::CollectionHandle;
#endif
ObjectStoreT *os;
CollectionHandleT ch;
ghobject_t hoid;
public:
class OSTransaction : public MapCacher::Transaction<std::string, ceph::buffer::list> {
friend class OSDriver;
coll_t cid;
ghobject_t hoid;
ceph::os::Transaction *t;
OSTransaction(
const coll_t &cid,
const ghobject_t &hoid,
ceph::os::Transaction *t)
: cid(cid), hoid(hoid), t(t) {}
public:
void set_keys(
const std::map<std::string, ceph::buffer::list> &to_set) override {
t->omap_setkeys(cid, hoid, to_set);
}
void remove_keys(
const std::set<std::string> &to_remove) override {
t->omap_rmkeys(cid, hoid, to_remove);
}
void add_callback(
Context *c) override {
t->register_on_applied(c);
}
};
OSTransaction get_transaction(
ceph::os::Transaction *t) const {
return OSTransaction(ch->get_cid(), hoid, t);
}
#ifndef WITH_SEASTAR
OSDriver(ObjectStoreT *os, const coll_t& cid, const ghobject_t &hoid) :
OSDriver(os, os->open_collection(cid), hoid) {}
#endif
OSDriver(ObjectStoreT *os, CollectionHandleT ch, const ghobject_t &hoid) :
os(os),
ch(ch),
hoid(hoid) {}
int get_keys(
const std::set<std::string> &keys,
std::map<std::string, ceph::buffer::list> *out) override;
int get_next(
const std::string &key,
std::pair<std::string, ceph::buffer::list> *next) override;
int get_next_or_current(
const std::string &key,
std::pair<std::string, ceph::buffer::list> *next_or_current) override;
};
/**
* SnapMapper
*
* Manages two mappings:
* 1) hobject_t -> {snapid}
* 2) snapid -> {hobject_t}
*
* We accomplish this using two sets of keys:
* 1) OBJECT_PREFIX + obj.str() -> encoding of object_snaps
* 2) MAPPING_PREFIX + poolid + snapid_t + obj.str() -> encoding of std::pair<snapid_t, obj>
*
* The on disk strings and encodings are implemented in to_raw, to_raw_key,
* from_raw, to_object_key.
*
* The object -> {snapid} mapping is primarily included so that the
* SnapMapper state can be verified against the external PG state during
* scrub etc.
*
* The 2) mapping is arranged such that all objects in a particular
* snap will sort together, and so that all objects in a pg for a
* particular snap will group under up to 8 prefixes.
*/
class SnapMapper : public Scrub::SnapMapReaderI {
friend class MapperVerifier; // unit-test support
friend class DirectMapper; // unit-test support
public:
CephContext* cct;
struct object_snaps {
hobject_t oid;
std::set<snapid_t> snaps;
object_snaps(hobject_t oid, const std::set<snapid_t> &snaps)
: oid(oid), snaps(snaps) {}
object_snaps() {}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bp);
};
struct Mapping {
snapid_t snap;
hobject_t hoid;
explicit Mapping(const std::pair<snapid_t, hobject_t> &in)
: snap(in.first), hoid(in.second) {}
Mapping() : snap(0) {}
void encode(ceph::buffer::list &bl) const {
ENCODE_START(1, 1, bl);
encode(snap, bl);
encode(hoid, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator &bl) {
DECODE_START(1, bl);
decode(snap, bl);
decode(hoid, bl);
DECODE_FINISH(bl);
}
};
static const std::string LEGACY_MAPPING_PREFIX;
static const std::string MAPPING_PREFIX;
static const std::string OBJECT_PREFIX;
static const char *PURGED_SNAP_EPOCH_PREFIX;
static const char *PURGED_SNAP_PREFIX;
#ifndef WITH_SEASTAR
struct Scrubber {
CephContext *cct;
ObjectStore *store;
ObjectStore::CollectionHandle ch;
ghobject_t mapping_hoid;
ghobject_t purged_snaps_hoid;
ObjectMap::ObjectMapIterator psit;
int64_t pool;
snapid_t begin, end;
bool _parse_p(); ///< advance the purged_snaps pointer
ObjectMap::ObjectMapIterator mapit;
Mapping mapping;
shard_id_t shard;
bool _parse_m(); ///< advance the (object) mapper pointer
std::vector<std::tuple<int64_t, snapid_t, uint32_t, shard_id_t>> stray;
Scrubber(
CephContext *cct,
ObjectStore *store,
ObjectStore::CollectionHandle& ch,
ghobject_t mapping_hoid,
ghobject_t purged_snaps_hoid)
: cct(cct),
store(store),
ch(ch),
mapping_hoid(mapping_hoid),
purged_snaps_hoid(purged_snaps_hoid) {}
void run();
};
static std::string convert_legacy_key(
const std::string& old_key,
const bufferlist& value);
static int convert_legacy(
CephContext *cct,
ObjectStore *store,
ObjectStore::CollectionHandle& ch,
ghobject_t hoid,
unsigned max);
#endif
static void record_purged_snaps(
CephContext *cct,
OSDriver& backend,
OSDriver::OSTransaction&& txn,
std::map<epoch_t,mempool::osdmap::map<int64_t,snap_interval_set_t>> purged_snaps);
private:
static int _lookup_purged_snap(
CephContext *cct,
OSDriver& backend,
int64_t pool, snapid_t snap,
snapid_t *begin, snapid_t *end);
static void make_purged_snap_key_value(
int64_t pool, snapid_t begin,
snapid_t end, std::map<std::string,ceph::buffer::list> *m);
static std::string make_purged_snap_key(int64_t pool, snapid_t last);
// note: marked 'mutable', as functions as a cache and used in some 'const'
// functions.
mutable MapCacher::MapCacher<std::string, ceph::buffer::list> backend;
static std::string get_legacy_prefix(snapid_t snap);
std::string to_legacy_raw_key(
const std::pair<snapid_t, hobject_t> &to_map);
static bool is_legacy_mapping(const std::string &to_test);
static std::string get_prefix(int64_t pool, snapid_t snap);
std::string to_raw_key(
const std::pair<snapid_t, hobject_t> &to_map) const;
std::string to_raw_key(snapid_t snap, const hobject_t& clone) const;
std::pair<std::string, ceph::buffer::list> to_raw(
const std::pair<snapid_t, hobject_t> &to_map) const;
static bool is_mapping(const std::string &to_test);
static std::pair<snapid_t, hobject_t> from_raw(
const std::pair<std::string, ceph::buffer::list> &image);
static std::pair<snapid_t, hobject_t> from_raw(
const ceph::buffer::list& image);
std::string to_object_key(const hobject_t &hoid) const;
int get_snaps(const hobject_t &oid, object_snaps *out) const;
std::set<std::string> to_raw_keys(
const hobject_t &clone,
const std::set<snapid_t> &snaps) const;
void set_snaps(
const hobject_t &oid,
const object_snaps &out,
MapCacher::Transaction<std::string, ceph::buffer::list> *t);
void clear_snaps(
const hobject_t &oid,
MapCacher::Transaction<std::string, ceph::buffer::list> *t);
// True if hoid belongs in this mapping based on mask_bits and match
bool check(const hobject_t &hoid) const;
int _remove_oid(
const hobject_t &oid, ///< [in] oid to remove
MapCacher::Transaction<std::string, ceph::buffer::list> *t ///< [out] transaction
);
/// Get snaps (as an 'object_snaps' object) for oid
tl::expected<object_snaps, SnapMapReaderI::result_t> get_snaps_common(
const hobject_t &hoid) const;
public:
static std::string make_shard_prefix(shard_id_t shard) {
if (shard == shard_id_t::NO_SHARD)
return std::string();
char buf[20];
int r = snprintf(buf, sizeof(buf), ".%x", (int)shard);
ceph_assert(r < (int)sizeof(buf));
return std::string(buf, r) + '_';
}
uint32_t mask_bits;
const uint32_t match;
std::string last_key_checked;
const int64_t pool;
const shard_id_t shard;
const std::string shard_prefix;
SnapMapper(
CephContext* cct,
MapCacher::StoreDriver<std::string, ceph::buffer::list> *driver,
uint32_t match, ///< [in] pgid
uint32_t bits, ///< [in] current split bits
int64_t pool, ///< [in] pool
shard_id_t shard ///< [in] shard
)
: cct(cct), backend(driver), mask_bits(bits), match(match), pool(pool),
shard(shard), shard_prefix(make_shard_prefix(shard)) {
update_bits(mask_bits);
}
std::set<std::string> prefixes;
/// Update bits in case of pg split or merge
void update_bits(
uint32_t new_bits ///< [in] new split bits
) {
mask_bits = new_bits;
std::set<std::string> _prefixes = hobject_t::get_prefixes(
mask_bits,
match,
pool);
prefixes.clear();
for (auto i = _prefixes.begin(); i != _prefixes.end(); ++i) {
prefixes.insert(shard_prefix + *i);
}
}
/// Update snaps for oid, empty new_snaps removes the mapping
int update_snaps(
const hobject_t &oid, ///< [in] oid to update
const std::set<snapid_t> &new_snaps, ///< [in] new snap std::set
const std::set<snapid_t> *old_snaps, ///< [in] old snaps (for debugging)
MapCacher::Transaction<std::string, ceph::buffer::list> *t ///< [out] transaction
); ///@ return error, 0 on success
/// Add mapping for oid, must not already be mapped
void add_oid(
const hobject_t &oid, ///< [in] oid to add
const std::set<snapid_t>& new_snaps, ///< [in] snaps
MapCacher::Transaction<std::string, ceph::buffer::list> *t ///< [out] transaction
);
/// Returns first object with snap as a snap
int get_next_objects_to_trim(
snapid_t snap, ///< [in] snap to check
unsigned max, ///< [in] max to get
std::vector<hobject_t> *out ///< [out] next objects to trim (must be empty)
); ///< @return error, -ENOENT if no more objects
/// Remove mapping for oid
int remove_oid(
const hobject_t &oid, ///< [in] oid to remove
MapCacher::Transaction<std::string, ceph::buffer::list> *t ///< [out] transaction
); ///< @return error, -ENOENT if the object is not mapped
/// Get snaps for oid
int get_snaps(
const hobject_t &oid, ///< [in] oid to get snaps for
std::set<snapid_t> *snaps ///< [out] snaps
) const; ///< @return error, -ENOENT if oid is not recorded
/// Get snaps for oid - alternative interface
tl::expected<std::set<snapid_t>, SnapMapReaderI::result_t> get_snaps(
const hobject_t &hoid) const final;
/**
* get_snaps_check_consistency
*
* Returns snaps for hoid as in get_snaps(), but additionally validates the
* snap->hobject_t mappings ('SNA_' entries).
*/
tl::expected<std::set<snapid_t>, SnapMapReaderI::result_t>
get_snaps_check_consistency(const hobject_t &hoid) const final;
};
WRITE_CLASS_ENCODER(SnapMapper::object_snaps)
WRITE_CLASS_ENCODER(SnapMapper::Mapping)
#endif
| 11,687 | 29.83905 | 93 | h |
null | ceph-main/src/osd/TierAgentState.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_OSD_TIERAGENT_H
#define CEPH_OSD_TIERAGENT_H
#include <ctime>
#include <list>
#include <map>
#include <utility>
#include "common/Formatter.h"
#include "common/histogram.h"
#include "common/hobject.h"
#include "osd/HitSet.h"
struct TierAgentState {
/// current position iterating across pool
hobject_t position;
/// Count of agent_work since "start" position of object hash space
int started;
hobject_t start;
bool delaying;
/// histogram of ages we've encountered
pow2_hist_t temp_hist;
int hist_age;
/// past HitSet(s) (not current)
std::map<time_t,HitSetRef> hit_set_map;
/// a few recent things we've seen that are clean
std::list<hobject_t> recent_clean;
enum flush_mode_t {
FLUSH_MODE_IDLE, // nothing to flush
FLUSH_MODE_LOW, // flush dirty objects with a low speed
FLUSH_MODE_HIGH, //flush dirty objects with a high speed
} flush_mode; ///< current flush behavior
static const char *get_flush_mode_name(flush_mode_t m) {
switch (m) {
case FLUSH_MODE_IDLE: return "idle";
case FLUSH_MODE_LOW: return "low";
case FLUSH_MODE_HIGH: return "high";
default: ceph_abort_msg("bad flush mode");
}
}
const char *get_flush_mode_name() const {
return get_flush_mode_name(flush_mode);
}
enum evict_mode_t {
EVICT_MODE_IDLE, // no need to evict anything
EVICT_MODE_SOME, // evict some things as we are near the target
EVICT_MODE_FULL, // evict anything
} evict_mode; ///< current evict behavior
static const char *get_evict_mode_name(evict_mode_t m) {
switch (m) {
case EVICT_MODE_IDLE: return "idle";
case EVICT_MODE_SOME: return "some";
case EVICT_MODE_FULL: return "full";
default: ceph_abort_msg("bad evict mode");
}
}
const char *get_evict_mode_name() const {
return get_evict_mode_name(evict_mode);
}
/// approximate ratio of objects (assuming they are uniformly
/// distributed) that i should aim to evict.
unsigned evict_effort;
TierAgentState()
: started(0),
delaying(false),
hist_age(0),
flush_mode(FLUSH_MODE_IDLE),
evict_mode(EVICT_MODE_IDLE),
evict_effort(0)
{}
/// false if we have any work to do
bool is_idle() const {
return
delaying ||
(flush_mode == FLUSH_MODE_IDLE &&
evict_mode == EVICT_MODE_IDLE);
}
/// add archived HitSet
void add_hit_set(time_t start, HitSetRef hs) {
hit_set_map.insert(std::make_pair(start, hs));
}
/// remove old/trimmed HitSet
void remove_oldest_hit_set() {
if (!hit_set_map.empty())
hit_set_map.erase(hit_set_map.begin());
}
/// discard all open hit sets
void discard_hit_sets() {
hit_set_map.clear();
}
void dump(ceph::Formatter *f) const {
f->dump_string("flush_mode", get_flush_mode_name());
f->dump_string("evict_mode", get_evict_mode_name());
f->dump_unsigned("evict_effort", evict_effort);
f->dump_stream("position") << position;
f->open_object_section("temp_hist");
temp_hist.dump(f);
f->close_section();
}
};
#endif
| 3,480 | 25.984496 | 72 | h |
null | ceph-main/src/osd/Watch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_WATCH_H
#define CEPH_WATCH_H
#include <set>
#include "msg/Connection.h"
#include "include/Context.h"
enum WatcherState {
WATCHER_PENDING,
WATCHER_NOTIFIED,
};
class OSDService;
class PrimaryLogPG;
void intrusive_ptr_add_ref(PrimaryLogPG *pg);
void intrusive_ptr_release(PrimaryLogPG *pg);
struct ObjectContext;
class MWatchNotify;
class Watch;
typedef std::shared_ptr<Watch> WatchRef;
typedef std::weak_ptr<Watch> WWatchRef;
class Notify;
typedef std::shared_ptr<Notify> NotifyRef;
typedef std::weak_ptr<Notify> WNotifyRef;
struct CancelableContext;
/**
* Notify tracks the progress of a particular notify
*
* References are held by Watch and the timeout callback.
*/
class Notify {
friend class NotifyTimeoutCB;
friend class Watch;
WNotifyRef self;
ConnectionRef client;
uint64_t client_gid;
bool complete;
bool discarded;
bool timed_out; ///< true if the notify timed out
std::set<WatchRef> watchers;
ceph::buffer::list payload;
uint32_t timeout;
uint64_t cookie;
uint64_t notify_id;
uint64_t version;
OSDService *osd;
CancelableContext *cb;
ceph::mutex lock = ceph::make_mutex("Notify::lock");
/// (gid,cookie) -> reply_bl for everyone who acked the notify
std::multimap<std::pair<uint64_t,uint64_t>, ceph::buffer::list> notify_replies;
/// true if this notify is being discarded
bool is_discarded() {
return discarded || complete;
}
/// Sends notify completion if watchers.empty() or timeout
void maybe_complete_notify();
/// Called on Notify timeout
void do_timeout();
Notify(
ConnectionRef client,
uint64_t client_gid,
ceph::buffer::list& payload,
uint32_t timeout,
uint64_t cookie,
uint64_t notify_id,
uint64_t version,
OSDService *osd);
/// registers a timeout callback with the watch_timer
void register_cb();
/// removes the timeout callback, called on completion or cancellation
void unregister_cb();
public:
std::ostream& gen_dbg_prefix(std::ostream& out) {
return out << "Notify(" << std::make_pair(cookie, notify_id) << " "
<< " watchers=" << watchers.size()
<< ") ";
}
void set_self(NotifyRef _self) {
self = _self;
}
static NotifyRef makeNotifyRef(
ConnectionRef client,
uint64_t client_gid,
ceph::buffer::list &payload,
uint32_t timeout,
uint64_t cookie,
uint64_t notify_id,
uint64_t version,
OSDService *osd);
/// Call after creation to initialize
void init();
/// Called once per watcher prior to init()
void start_watcher(
WatchRef watcher ///< [in] watcher to complete
);
/// Called once per NotifyAck
void complete_watcher(
WatchRef watcher, ///< [in] watcher to complete
ceph::buffer::list& reply_bl ///< [in] reply buffer from the notified watcher
);
/// Called when a watcher unregisters or times out
void complete_watcher_remove(
WatchRef watcher ///< [in] watcher to complete
);
/// Called when the notify is canceled due to a new peering interval
void discard();
};
/**
* Watch is a mapping between a Connection and an ObjectContext
*
* References are held by ObjectContext and the timeout callback
*/
class HandleWatchTimeout;
class HandleDelayedWatchTimeout;
class Watch {
WWatchRef self;
friend class HandleWatchTimeout;
friend class HandleDelayedWatchTimeout;
ConnectionRef conn;
CancelableContext *cb;
OSDService *osd;
boost::intrusive_ptr<PrimaryLogPG> pg;
std::shared_ptr<ObjectContext> obc;
std::map<uint64_t, NotifyRef> in_progress_notifies;
// Could have watch_info_t here, but this file includes osd_types.h
uint32_t timeout; ///< timeout in seconds
uint64_t cookie;
entity_addr_t addr;
bool will_ping; ///< is client new enough to ping the watch
utime_t last_ping; ///< last client ping
entity_name_t entity;
bool discarded;
Watch(
PrimaryLogPG *pg, OSDService *osd,
std::shared_ptr<ObjectContext> obc, uint32_t timeout,
uint64_t cookie, entity_name_t entity,
const entity_addr_t& addr);
/// Registers the timeout callback with watch_timer
void register_cb();
/// send a Notify message when connected for notif
void send_notify(NotifyRef notif);
/// Cleans up state on discard or remove (including Connection state, obc)
void discard_state();
public:
/// Unregisters the timeout callback
void unregister_cb();
/// note receipt of a ping
void got_ping(utime_t t);
/// True if currently connected
bool is_connected() const {
return conn.get() != NULL;
}
bool is_connected(Connection *con) const {
return conn.get() == con;
}
/// NOTE: must be called with pg lock held
~Watch();
uint64_t get_watcher_gid() const {
return entity.num();
}
std::ostream& gen_dbg_prefix(std::ostream& out);
static WatchRef makeWatchRef(
PrimaryLogPG *pg, OSDService *osd,
std::shared_ptr<ObjectContext> obc, uint32_t timeout, uint64_t cookie, entity_name_t entity, const entity_addr_t &addr);
void set_self(WatchRef _self) {
self = _self;
}
/// Does not grant a ref count!
boost::intrusive_ptr<PrimaryLogPG> get_pg() { return pg; }
std::shared_ptr<ObjectContext> get_obc() { return obc; }
uint64_t get_cookie() const { return cookie; }
entity_name_t get_entity() const { return entity; }
entity_addr_t get_peer_addr() const { return addr; }
uint32_t get_timeout() const { return timeout; }
/// Generates context for use if watch timeout is delayed by scrub or recovery
Context *get_delayed_cb();
/// Transitions Watch to connected, unregister_cb, resends pending Notifies
void connect(
ConnectionRef con, ///< [in] Reference to new connection
bool will_ping ///< [in] client is new and will send pings
);
/// Transitions watch to disconnected, register_cb
void disconnect();
/// Called if Watch state is discarded due to new peering interval
void discard();
/// True if removed or discarded
bool is_discarded() const;
/// Called on unwatch
void remove(bool send_disconnect);
/// Adds notif as in-progress notify
void start_notify(
NotifyRef notif ///< [in] Reference to new in-progress notify
);
/// Removes timed out notify
void cancel_notify(
NotifyRef notif ///< [in] notify which timed out
);
/// Call when notify_ack received on notify_id
void notify_ack(
uint64_t notify_id, ///< [in] id of acked notify
ceph::buffer::list& reply_bl ///< [in] notify reply buffer
);
};
/**
* Holds weak refs to Watch structures corresponding to a connection
* Lives in the Session object of an OSD connection
*/
class WatchConState {
ceph::mutex lock = ceph::make_mutex("WatchConState");
std::set<WatchRef> watches;
public:
CephContext* cct;
explicit WatchConState(CephContext* cct) : cct(cct) {}
/// Add a watch
void addWatch(
WatchRef watch ///< [in] Ref to new watch object
);
/// Remove a watch
void removeWatch(
WatchRef watch ///< [in] Ref to watch object to remove
);
/// Called on session reset, disconnects watchers
void reset(Connection *con);
};
#endif
| 7,546 | 25.114187 | 124 | h |
null | ceph-main/src/osd/error_code.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat <[email protected]>
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <boost/system/error_code.hpp>
#include "include/rados.h"
const boost::system::error_category& osd_category() noexcept;
// Since the OSD mostly uses POSIX error codes plus a couple
// additions, this will be a degenerate error category for now that
// mostly forwards to POSIX.
enum class osd_errc {
old_snapc = 85, /* ORDERSNAP flag set; writer has old snapc*/
blocklisted = 108 /* blocklisted */
};
namespace boost::system {
template<>
struct is_error_code_enum<::osd_errc> {
static const bool value = true;
};
template<>
struct is_error_condition_enum<::osd_errc> {
static const bool value = false;
};
}
// implicit conversion:
inline boost::system::error_code make_error_code(osd_errc e) noexcept {
return { static_cast<int>(e), osd_category() };
}
// explicit conversion:
inline boost::system::error_condition make_error_condition(osd_errc e) noexcept {
return { static_cast<int>(e), osd_category() };
}
| 1,422 | 25.351852 | 81 | h |
null | ceph-main/src/osd/object_state.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "osd_types.h"
struct ObjectState {
object_info_t oi;
bool exists; ///< the stored object exists (i.e., we will remember the object_info_t)
ObjectState() : exists(false) {}
ObjectState(const object_info_t &oi_, bool exists_)
: oi(oi_), exists(exists_) {}
ObjectState(object_info_t &&oi_, bool exists_)
: oi(std::move(oi_)), exists(exists_) {}
ObjectState(const hobject_t &obj) : oi(obj), exists(false) {}
};
struct RWState {
enum State {
RWNONE,
RWREAD,
RWWRITE,
RWEXCL,
};
static const char *get_state_name(State s) {
switch (s) {
case RWNONE: return "none";
case RWREAD: return "read";
case RWWRITE: return "write";
case RWEXCL: return "excl";
default: return "???";
}
}
const char *get_state_name() const {
return get_state_name(state);
}
int count; ///< number of readers or writers
int waiters = 0; ///< number waiting
State state:4; ///< rw state
/// if set, restart backfill when we can get a read lock
bool recovery_read_marker:1;
/// if set, requeue snaptrim on lock release
bool snaptrimmer_write_marker:1;
RWState()
: count(0),
state(RWNONE),
recovery_read_marker(false),
snaptrimmer_write_marker(false)
{}
/// this function adjusts the counts if necessary
bool get_read_lock() {
// don't starve anybody!
if (waiters > 0) {
return false;
}
switch (state) {
case RWNONE:
ceph_assert(count == 0);
state = RWREAD;
// fall through
case RWREAD:
count++;
return true;
case RWWRITE:
return false;
case RWEXCL:
return false;
default:
ceph_abort_msg("unhandled case");
return false;
}
}
bool get_write_lock(bool greedy=false) {
if (!greedy) {
// don't starve anybody!
if (waiters > 0 ||
recovery_read_marker) {
return false;
}
}
switch (state) {
case RWNONE:
ceph_assert(count == 0);
state = RWWRITE;
// fall through
case RWWRITE:
count++;
return true;
case RWREAD:
return false;
case RWEXCL:
return false;
default:
ceph_abort_msg("unhandled case");
return false;
}
}
bool get_excl_lock() {
switch (state) {
case RWNONE:
ceph_assert(count == 0);
state = RWEXCL;
count = 1;
return true;
case RWWRITE:
return false;
case RWREAD:
return false;
case RWEXCL:
return false;
default:
ceph_abort_msg("unhandled case");
return false;
}
}
/// same as get_write_lock, but ignore starvation
bool take_write_lock() {
if (state == RWWRITE) {
count++;
return true;
}
return get_write_lock();
}
bool dec() {
ceph_assert(count > 0);
count--;
if (count == 0) {
state = RWNONE;
return true;
} else {
return false;
}
}
bool put_read() {
ceph_assert(state == RWREAD);
return dec();
}
bool put_write() {
ceph_assert(state == RWWRITE);
return dec();
}
bool put_excl() {
ceph_assert(state == RWEXCL);
return dec();
}
void inc_waiters() {
++waiters;
}
void release_waiters() {
waiters = 0;
}
void dec_waiters(int count) {
ceph_assert(waiters >= count);
waiters -= count;
}
bool empty() const { return state == RWNONE; }
bool get_snaptrimmer_write(bool mark_if_unsuccessful) {
if (get_write_lock()) {
return true;
} else {
if (mark_if_unsuccessful)
snaptrimmer_write_marker = true;
return false;
}
}
bool get_recovery_read() {
recovery_read_marker = true;
if (get_read_lock()) {
return true;
}
return false;
}
};
inline std::ostream& operator<<(std::ostream& out, const RWState& rw)
{
return out << "rwstate(" << rw.get_state_name()
<< " n=" << rw.count
<< " w=" << rw.waiters
<< ")";
}
| 4,084 | 20.387435 | 95 | h |
null | ceph-main/src/osd/object_state_fmt.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
/**
* \file fmtlib formatters for some types.h classes
*/
#include "osd/object_state.h"
#include "osd/osd_types_fmt.h"
#if FMT_VERSION >= 90000
#include <fmt/ostream.h>
#endif
template <>
struct fmt::formatter<ObjectState> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const ObjectState& os, FormatContext& ctx) const
{
return fmt::format_to(ctx.out(), "exists {} oi {}", os.exists, os.oi);
}
};
| 600 | 24.041667 | 74 | h |
null | ceph-main/src/osd/osd_internal_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_OSD_INTERNAL_TYPES_H
#define CEPH_OSD_INTERNAL_TYPES_H
#include "osd_types.h"
#include "OpRequest.h"
#include "object_state.h"
/*
* keep tabs on object modifications that are in flight.
* we need to know the projected existence, size, snapset,
* etc., because we don't send writes down to disk until after
* replicas ack.
*/
struct SnapSetContext {
hobject_t oid;
SnapSet snapset;
int ref;
bool registered : 1;
bool exists : 1;
explicit SnapSetContext(const hobject_t& o) :
oid(o), ref(0), registered(false), exists(true) { }
};
inline std::ostream& operator<<(std::ostream& out, const SnapSetContext& ssc)
{
return out << "ssc(" << ssc.oid << " snapset: " << ssc.snapset
<< " ref: " << ssc.ref << " registered: "
<< ssc.registered << " exists: " << ssc.exists << ")";
}
struct ObjectContext;
typedef std::shared_ptr<ObjectContext> ObjectContextRef;
struct ObjectContext {
ObjectState obs;
SnapSetContext *ssc; // may be null
Context *destructor_callback;
public:
// any entity in obs.oi.watchers MUST be in either watchers or unconnected_watchers.
std::map<std::pair<uint64_t, entity_name_t>, WatchRef> watchers;
// attr cache
std::map<std::string, ceph::buffer::list, std::less<>> attr_cache;
RWState rwstate;
std::list<OpRequestRef> waiters; ///< ops waiting on state change
bool get_read(OpRequestRef& op) {
if (rwstate.get_read_lock()) {
return true;
} // else
// Now we really need to bump up the ref-counter.
waiters.emplace_back(op);
rwstate.inc_waiters();
return false;
}
bool get_write(OpRequestRef& op, bool greedy=false) {
if (rwstate.get_write_lock(greedy)) {
return true;
} // else
if (op) {
waiters.emplace_back(op);
rwstate.inc_waiters();
}
return false;
}
bool get_excl(OpRequestRef& op) {
if (rwstate.get_excl_lock()) {
return true;
} // else
if (op) {
waiters.emplace_back(op);
rwstate.inc_waiters();
}
return false;
}
void wake(std::list<OpRequestRef> *requeue) {
rwstate.release_waiters();
requeue->splice(requeue->end(), waiters);
}
void put_read(std::list<OpRequestRef> *requeue) {
if (rwstate.put_read()) {
wake(requeue);
}
}
void put_write(std::list<OpRequestRef> *requeue) {
if (rwstate.put_write()) {
wake(requeue);
}
}
void put_excl(std::list<OpRequestRef> *requeue) {
if (rwstate.put_excl()) {
wake(requeue);
}
}
bool empty() const { return rwstate.empty(); }
bool get_lock_type(OpRequestRef& op, RWState::State type) {
switch (type) {
case RWState::RWWRITE:
return get_write(op);
case RWState::RWREAD:
return get_read(op);
case RWState::RWEXCL:
return get_excl(op);
default:
ceph_abort_msg("invalid lock type");
return true;
}
}
bool get_write_greedy(OpRequestRef& op) {
return get_write(op, true);
}
bool get_snaptrimmer_write(bool mark_if_unsuccessful) {
return rwstate.get_snaptrimmer_write(mark_if_unsuccessful);
}
bool get_recovery_read() {
return rwstate.get_recovery_read();
}
bool try_get_read_lock() {
return rwstate.get_read_lock();
}
void drop_recovery_read(std::list<OpRequestRef> *ls) {
ceph_assert(rwstate.recovery_read_marker);
put_read(ls);
rwstate.recovery_read_marker = false;
}
void put_lock_type(
RWState::State type,
std::list<OpRequestRef> *to_wake,
bool *requeue_recovery,
bool *requeue_snaptrimmer) {
switch (type) {
case RWState::RWWRITE:
put_write(to_wake);
break;
case RWState::RWREAD:
put_read(to_wake);
break;
case RWState::RWEXCL:
put_excl(to_wake);
break;
default:
ceph_abort_msg("invalid lock type");
}
if (rwstate.empty() && rwstate.recovery_read_marker) {
rwstate.recovery_read_marker = false;
*requeue_recovery = true;
}
if (rwstate.empty() && rwstate.snaptrimmer_write_marker) {
rwstate.snaptrimmer_write_marker = false;
*requeue_snaptrimmer = true;
}
}
bool is_request_pending() {
return !rwstate.empty();
}
ObjectContext()
: ssc(NULL),
destructor_callback(0),
blocked(false), requeue_scrub_on_unblock(false) {}
~ObjectContext() {
ceph_assert(rwstate.empty());
if (destructor_callback)
destructor_callback->complete(0);
}
void start_block() {
ceph_assert(!blocked);
blocked = true;
}
void stop_block() {
ceph_assert(blocked);
blocked = false;
}
bool is_blocked() const {
return blocked;
}
/// in-progress copyfrom ops for this object
bool blocked;
bool requeue_scrub_on_unblock; // true if we need to requeue scrub on unblock
};
inline std::ostream& operator<<(std::ostream& out, const ObjectState& obs)
{
out << obs.oi.soid;
if (!obs.exists)
out << "(dne)";
return out;
}
inline std::ostream& operator<<(std::ostream& out, const ObjectContext& obc)
{
return out << "obc(" << obc.obs << " " << obc.rwstate << ")";
}
class ObcLockManager {
struct ObjectLockState {
ObjectContextRef obc;
RWState::State type;
ObjectLockState(
ObjectContextRef obc,
RWState::State type)
: obc(std::move(obc)), type(type) {}
};
std::map<hobject_t, ObjectLockState> locks;
public:
ObcLockManager() = default;
ObcLockManager(ObcLockManager &&) = default;
ObcLockManager(const ObcLockManager &) = delete;
ObcLockManager &operator=(ObcLockManager &&) = default;
bool empty() const {
return locks.empty();
}
bool get_lock_type(
RWState::State type,
const hobject_t &hoid,
ObjectContextRef& obc,
OpRequestRef& op) {
ceph_assert(locks.find(hoid) == locks.end());
if (obc->get_lock_type(op, type)) {
locks.insert(std::make_pair(hoid, ObjectLockState(obc, type)));
return true;
} else {
return false;
}
}
/// Get write lock, ignore starvation
bool take_write_lock(
const hobject_t &hoid,
ObjectContextRef obc) {
ceph_assert(locks.find(hoid) == locks.end());
if (obc->rwstate.take_write_lock()) {
locks.insert(
std::make_pair(
hoid, ObjectLockState(obc, RWState::RWWRITE)));
return true;
} else {
return false;
}
}
/// Get write lock for snap trim
bool get_snaptrimmer_write(
const hobject_t &hoid,
ObjectContextRef obc,
bool mark_if_unsuccessful) {
ceph_assert(locks.find(hoid) == locks.end());
if (obc->get_snaptrimmer_write(mark_if_unsuccessful)) {
locks.insert(
std::make_pair(
hoid, ObjectLockState(obc, RWState::RWWRITE)));
return true;
} else {
return false;
}
}
/// Get write lock greedy
bool get_write_greedy(
const hobject_t &hoid,
ObjectContextRef obc,
OpRequestRef op) {
ceph_assert(locks.find(hoid) == locks.end());
if (obc->get_write_greedy(op)) {
locks.insert(
std::make_pair(
hoid, ObjectLockState(obc, RWState::RWWRITE)));
return true;
} else {
return false;
}
}
/// try get read lock
bool try_get_read_lock(
const hobject_t &hoid,
ObjectContextRef obc) {
ceph_assert(locks.find(hoid) == locks.end());
if (obc->try_get_read_lock()) {
locks.insert(
std::make_pair(
hoid,
ObjectLockState(obc, RWState::RWREAD)));
return true;
} else {
return false;
}
}
void put_locks(
std::list<std::pair<ObjectContextRef, std::list<OpRequestRef> > > *to_requeue,
bool *requeue_recovery,
bool *requeue_snaptrimmer) {
for (auto& p: locks) {
std::list<OpRequestRef> _to_requeue;
p.second.obc->put_lock_type(
p.second.type,
&_to_requeue,
requeue_recovery,
requeue_snaptrimmer);
if (to_requeue) {
// We can safely std::move here as the whole `locks` is going
// to die just after the loop.
to_requeue->emplace_back(std::move(p.second.obc),
std::move(_to_requeue));
}
}
locks.clear();
}
~ObcLockManager() {
ceph_assert(locks.empty());
}
};
#endif
| 8,217 | 24.054878 | 86 | h |
null | ceph-main/src/osd/osd_op_util.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <vector>
#include <string>
#include "osd/OSDMap.h"
#include "messages/MOSDOp.h"
class OpInfo {
public:
struct ClassInfo {
ClassInfo(std::string&& class_name, std::string&& method_name,
bool read, bool write, bool allowed) :
class_name(std::move(class_name)), method_name(std::move(method_name)),
read(read), write(write), allowed(allowed)
{}
const std::string class_name;
const std::string method_name;
const bool read, write, allowed;
};
private:
uint64_t rmw_flags = 0;
std::vector<ClassInfo> classes;
void set_rmw_flags(int flags);
void add_class(std::string&& class_name, std::string&& method_name,
bool read, bool write, bool allowed) {
classes.emplace_back(std::move(class_name), std::move(method_name),
read, write, allowed);
}
public:
void clear() {
rmw_flags = 0;
}
uint64_t get_flags() const {
return rmw_flags;
}
bool check_rmw(int flag) const ;
bool may_read() const;
bool may_read_data() const;
bool may_write() const;
bool may_cache() const;
bool rwordered_forced() const;
bool rwordered() const;
bool includes_pg_op() const;
bool need_read_cap() const;
bool need_write_cap() const;
bool need_promote() const;
bool need_skip_handle_cache() const;
bool need_skip_promote() const;
bool allows_returnvec() const;
void set_read();
void set_write();
void set_cache();
void set_class_read();
void set_class_write();
void set_pg_op();
void set_promote();
void set_skip_handle_cache();
void set_skip_promote();
void set_force_rwordered();
void set_returnvec();
void set_read_data();
int set_from_op(
const MOSDOp *m,
const OSDMap &osdmap);
int set_from_op(
const std::vector<OSDOp> &ops,
const pg_t &pg,
const OSDMap &osdmap);
std::vector<ClassInfo> get_classes() const {
return classes;
}
};
std::ostream& operator<<(std::ostream& out, const OpInfo::ClassInfo& i);
| 2,115 | 22.511111 | 77 | h |
null | ceph-main/src/osd/osd_types_fmt.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
/**
* \file fmtlib formatters for some types.h classes
*/
#include "common/hobject_fmt.h"
#include "osd/osd_types.h"
#include <fmt/chrono.h>
#if FMT_VERSION >= 90000
#include <fmt/ostream.h>
#endif
template <>
struct fmt::formatter<osd_reqid_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const osd_reqid_t& req_id, FormatContext& ctx) const
{
return fmt::format_to(ctx.out(), "{}.{}:{}", req_id.name, req_id.inc,
req_id.tid);
}
};
template <>
struct fmt::formatter<pg_shard_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const pg_shard_t& shrd, FormatContext& ctx) const
{
if (shrd.is_undefined()) {
return fmt::format_to(ctx.out(), "?");
}
if (shrd.shard == shard_id_t::NO_SHARD) {
return fmt::format_to(ctx.out(), "{}", shrd.get_osd());
}
return fmt::format_to(ctx.out(), "{}({})", shrd.get_osd(), shrd.shard);
}
};
template <>
struct fmt::formatter<eversion_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const eversion_t& ev, FormatContext& ctx) const
{
return fmt::format_to(ctx.out(), "{}'{}", ev.epoch, ev.version);
}
};
template <>
struct fmt::formatter<chunk_info_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const chunk_info_t& ci, FormatContext& ctx) const
{
return fmt::format_to(ctx.out(), "(len: {} oid: {} offset: {} flags: {})",
ci.length, ci.oid, ci.offset,
ci.get_flag_string(ci.flags));
}
};
template <>
struct fmt::formatter<object_manifest_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const object_manifest_t& om, FormatContext& ctx) const
{
fmt::format_to(ctx.out(), "manifest({}", om.get_type_name());
if (om.is_redirect()) {
fmt::format_to(ctx.out(), " {}", om.redirect_target);
} else if (om.is_chunked()) {
fmt::format_to(ctx.out(), " {}", om.chunk_map);
}
return fmt::format_to(ctx.out(), ")");
}
};
template <>
struct fmt::formatter<object_info_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const object_info_t& oi, FormatContext& ctx) const
{
fmt::format_to(ctx.out(), "{}({} {} {} s {} uv {}", oi.soid, oi.version,
oi.last_reqid, (oi.flags ? oi.get_flag_string() : ""), oi.size,
oi.user_version);
if (oi.is_data_digest()) {
fmt::format_to(ctx.out(), " dd {:x}", oi.data_digest);
}
if (oi.is_omap_digest()) {
fmt::format_to(ctx.out(), " od {:x}", oi.omap_digest);
}
fmt::format_to(ctx.out(), " alloc_hint [{} {} {}]", oi.expected_object_size,
oi.expected_write_size, oi.alloc_hint_flags);
if (oi.has_manifest()) {
fmt::format_to(ctx.out(), " {}", oi.manifest);
}
return fmt::format_to(ctx.out(), ")");
}
};
template <>
struct fmt::formatter<pg_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const pg_t& pg, FormatContext& ctx) const
{
return fmt::format_to(ctx.out(), "{}.{:x}", pg.pool(), pg.m_seed);
}
};
template <>
struct fmt::formatter<spg_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const spg_t& spg, FormatContext& ctx) const
{
if (shard_id_t::NO_SHARD == spg.shard.id) {
return fmt::format_to(ctx.out(), "{}", spg.pgid);
} else {
return fmt::format_to(ctx.out(), "{}s{}>", spg.pgid, spg.shard.id);
}
}
};
template <>
struct fmt::formatter<pg_history_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const pg_history_t& pgh, FormatContext& ctx) const
{
fmt::format_to(ctx.out(),
"ec={}/{} lis/c={}/{} les/c/f={}/{}/{} sis={}",
pgh.epoch_created,
pgh.epoch_pool_created,
pgh.last_interval_started,
pgh.last_interval_clean,
pgh.last_epoch_started,
pgh.last_epoch_clean,
pgh.last_epoch_marked_full,
pgh.same_interval_since);
if (pgh.prior_readable_until_ub != ceph::timespan::zero()) {
return fmt::format_to(ctx.out(),
" pruub={}",
pgh.prior_readable_until_ub);
} else {
return ctx.out();
}
}
};
template <>
struct fmt::formatter<pg_info_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const pg_info_t& pgi, FormatContext& ctx) const
{
fmt::format_to(ctx.out(), "{}({}", pgi.pgid, (pgi.dne() ? " DNE" : ""));
if (pgi.is_empty()) {
fmt::format_to(ctx.out(), " empty");
} else {
fmt::format_to(ctx.out(), " v {}", pgi.last_update);
if (pgi.last_complete != pgi.last_update) {
fmt::format_to(ctx.out(), " lc {}", pgi.last_complete);
}
fmt::format_to(ctx.out(), " ({},{}]", pgi.log_tail, pgi.last_update);
}
if (pgi.is_incomplete()) {
fmt::format_to(ctx.out(), " lb {}", pgi.last_backfill);
}
fmt::format_to(ctx.out(),
" local-lis/les={}/{}",
pgi.last_interval_started,
pgi.last_epoch_started);
return fmt::format_to(ctx.out(),
" n={} {})",
pgi.stats.stats.sum.num_objects,
pgi.history);
}
};
// snaps and snap-sets
template <>
struct fmt::formatter<SnapSet> {
template <typename ParseContext>
constexpr auto parse(ParseContext& ctx)
{
auto it = ctx.begin();
if (it != ctx.end() && *it == 'D') {
verbose = true;
++it;
}
return it;
}
template <typename FormatContext>
auto format(const SnapSet& snps, FormatContext& ctx) const
{
if (verbose) {
// similar to SnapSet::dump()
fmt::format_to(ctx.out(),
"snaps{{{}: clns ({}): ",
snps.seq,
snps.clones.size());
for (auto cln : snps.clones) {
fmt::format_to(ctx.out(), "[{}: sz:", cln);
auto cs = snps.clone_size.find(cln);
if (cs != snps.clone_size.end()) {
fmt::format_to(ctx.out(), "{} ", cs->second);
} else {
fmt::format_to(ctx.out(), "??");
}
auto co = snps.clone_overlap.find(cln);
if (co != snps.clone_overlap.end()) {
fmt::format_to(ctx.out(), "olp:{} ", co->second);
} else {
fmt::format_to(ctx.out(), "olp:?? ");
}
auto cln_snps = snps.clone_snaps.find(cln);
if (cln_snps != snps.clone_snaps.end()) {
fmt::format_to(ctx.out(), "cl-snps:{} ]", cln_snps->second);
} else {
fmt::format_to(ctx.out(), "cl-snps:?? ]");
}
}
return fmt::format_to(ctx.out(), "}}");
} else {
return fmt::format_to(ctx.out(),
"{}={}:{}",
snps.seq,
snps.snaps,
snps.clone_snaps);
}
}
bool verbose{false};
};
template <>
struct fmt::formatter<ScrubMap::object> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
///\todo: consider passing the 'D" flag to control snapset dump
template <typename FormatContext>
auto format(const ScrubMap::object& so, FormatContext& ctx) const
{
fmt::format_to(ctx.out(),
"so{{ sz:{} dd:{} od:{} ",
so.size,
so.digest,
so.digest_present);
// note the special handling of (1) OI_ATTR and (2) non-printables
for (auto [k, v] : so.attrs) {
std::string bkstr{v.raw_c_str(), v.raw_length()};
if (k == std::string{OI_ATTR}) {
/// \todo consider parsing the OI args here. Maybe add a specific format
/// specifier
fmt::format_to(ctx.out(), "{{{}:<<OI_ATTR>>({})}} ", k, bkstr.length());
} else if (k == std::string{SS_ATTR}) {
bufferlist bl;
bl.push_back(v);
SnapSet sns{bl};
fmt::format_to(ctx.out(), "{{{}:{:D}}} ", k, sns);
} else {
fmt::format_to(ctx.out(), "{{{}:{}({})}} ", k, bkstr, bkstr.length());
}
}
return fmt::format_to(ctx.out(), "}}");
}
};
template <>
struct fmt::formatter<ScrubMap> {
template <typename ParseContext>
constexpr auto parse(ParseContext& ctx)
{
auto it = ctx.begin();
if (it != ctx.end() && *it == 'D') {
debug_log = true; // list the objects
++it;
}
return it;
}
template <typename FormatContext>
auto format(const ScrubMap& smap, FormatContext& ctx) const
{
fmt::format_to(ctx.out(),
"smap{{ valid:{} incr-since:{} #:{}",
smap.valid_through,
smap.incr_since,
smap.objects.size());
if (debug_log) {
fmt::format_to(ctx.out(), " objects:");
for (const auto& [ho, so] : smap.objects) {
fmt::format_to(ctx.out(), "\n\th.o<{}>:<{}> ", ho, so);
}
fmt::format_to(ctx.out(), "\n");
}
return fmt::format_to(ctx.out(), "}}");
}
bool debug_log{false};
};
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<ObjectRecoveryInfo> : fmt::ostream_formatter {};
template <> struct fmt::formatter<ObjectRecoveryProgress> : fmt::ostream_formatter {};
template <> struct fmt::formatter<PastIntervals> : fmt::ostream_formatter {};
template <> struct fmt::formatter<pg_log_op_return_item_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<watch_info_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<pg_log_entry_t> : fmt::ostream_formatter {};
template <bool TrackChanges> struct fmt::formatter<pg_missing_set<TrackChanges>> : fmt::ostream_formatter {};
#endif
| 9,683 | 27.482353 | 109 | h |
null | ceph-main/src/osd/recovery_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <map>
#include "osd_types.h"
/**
* BackfillInterval
*
* Represents the objects in a range [begin, end)
*
* Possible states:
* 1) begin == end == hobject_t() indicates the the interval is unpopulated
* 2) Else, objects contains all objects in [begin, end)
*/
struct BackfillInterval {
// info about a backfill interval on a peer
eversion_t version; /// version at which the scan occurred
std::map<hobject_t,eversion_t> objects;
hobject_t begin;
hobject_t end;
/// clear content
void clear() {
*this = BackfillInterval();
}
/// clear objects std::list only
void clear_objects() {
objects.clear();
}
/// reinstantiate with a new start+end position and sort order
void reset(hobject_t start) {
clear();
begin = end = start;
}
/// true if there are no objects in this interval
bool empty() const {
return objects.empty();
}
/// true if interval extends to the end of the range
bool extends_to_end() const {
return end.is_max();
}
/// removes items <= soid and adjusts begin to the first object
void trim_to(const hobject_t &soid) {
trim();
while (!objects.empty() &&
objects.begin()->first <= soid) {
pop_front();
}
}
/// Adjusts begin to the first object
void trim() {
if (!objects.empty())
begin = objects.begin()->first;
else
begin = end;
}
/// drop first entry, and adjust @begin accordingly
void pop_front() {
ceph_assert(!objects.empty());
objects.erase(objects.begin());
trim();
}
/// dump
void dump(ceph::Formatter *f) const {
f->dump_stream("begin") << begin;
f->dump_stream("end") << end;
f->open_array_section("objects");
for (std::map<hobject_t, eversion_t>::const_iterator i =
objects.begin();
i != objects.end();
++i) {
f->open_object_section("object");
f->dump_stream("object") << i->first;
f->dump_stream("version") << i->second;
f->close_section();
}
f->close_section();
}
};
std::ostream &operator<<(std::ostream &out, const BackfillInterval &bi);
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<BackfillInterval> : fmt::ostream_formatter {};
#endif
| 2,343 | 22.676768 | 80 | h |
null | ceph-main/src/osd/scrubber_common.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <fmt/ranges.h>
#include "common/scrub_types.h"
#include "include/types.h"
#include "os/ObjectStore.h"
#include "OpRequest.h"
namespace ceph {
class Formatter;
}
struct PGPool;
namespace Scrub {
class ReplicaReservations;
}
/// Facilitating scrub-realated object access to private PG data
class ScrubberPasskey {
private:
friend class Scrub::ReplicaReservations;
friend class PrimaryLogScrub;
friend class PgScrubber;
friend class ScrubBackend;
ScrubberPasskey() {}
ScrubberPasskey(const ScrubberPasskey&) = default;
ScrubberPasskey& operator=(const ScrubberPasskey&) = delete;
};
namespace Scrub {
/// high/low OP priority
enum class scrub_prio_t : bool { low_priority = false, high_priority = true };
/// Identifies a specific scrub activation within an interval,
/// see ScrubPGgIF::m_current_token
using act_token_t = uint32_t;
/// "environment" preconditions affecting which PGs are eligible for scrubbing
struct ScrubPreconds {
bool allow_requested_repair_only{false};
bool load_is_low{true};
bool time_permit{true};
bool only_deadlined{false};
};
/// PG services used by the scrubber backend
struct PgScrubBeListener {
virtual ~PgScrubBeListener() = default;
virtual const PGPool& get_pgpool() const = 0;
virtual pg_shard_t get_primary() const = 0;
virtual void force_object_missing(ScrubberPasskey,
const std::set<pg_shard_t>& peer,
const hobject_t& oid,
eversion_t version) = 0;
virtual const pg_info_t& get_pg_info(ScrubberPasskey) const = 0;
// query the PG backend for the on-disk size of an object
virtual uint64_t logical_to_ondisk_size(uint64_t logical_size) const = 0;
// used to verify our "cleaness" before scrubbing
virtual bool is_waiting_for_unreadable_object() const = 0;
};
} // namespace Scrub
/**
* Flags affecting the scheduling and behaviour of the *next* scrub.
*
* we hold two of these flag collections: one
* for the next scrub, and one frozen at initiation (i.e. in pg::queue_scrub())
*/
struct requested_scrub_t {
// flags to indicate explicitly requested scrubs (by admin):
// bool must_scrub, must_deep_scrub, must_repair, need_auto;
/**
* 'must_scrub' is set by an admin command (or by need_auto).
* Affects the priority of the scrubbing, and the sleep periods
* during the scrub.
*/
bool must_scrub{false};
/**
* scrub must not be aborted.
* Set for explicitly requested scrubs, and for scrubs originated by the
* pairing process with the 'repair' flag set (in the RequestScrub event).
*
* Will be copied into the 'required' scrub flag upon scrub start.
*/
bool req_scrub{false};
/**
* Set from:
* - scrub_requested() with need_auto param set, which only happens in
* - scrub_finish() - if deep_scrub_on_error is set, and we have errors
*
* If set, will prevent the OSD from casually postponing our scrub. When
* scrubbing starts, will cause must_scrub, must_deep_scrub and auto_repair to
* be set.
*/
bool need_auto{false};
/**
* Set for scrub-after-recovery just before we initiate the recovery deep
* scrub, or if scrub_requested() was called with either need_auto ot repair.
* Affects PG_STATE_DEEP_SCRUB.
*/
bool must_deep_scrub{false};
/**
* (An intermediary flag used by pg::sched_scrub() on the first time
* a planned scrub has all its resources). Determines whether the next
* repair/scrub will be 'deep'.
*
* Note: 'dumped' by PgScrubber::dump() and such. In reality, being a
* temporary that is set and reset by the same operation, will never
* appear externally to be set
*/
bool time_for_deep{false};
bool deep_scrub_on_error{false};
/**
* If set, we should see must_deep_scrub & must_scrub, too
*
* - 'must_repair' is checked by the OSD when scheduling the scrubs.
* - also checked & cleared at pg::queue_scrub()
*/
bool must_repair{false};
/*
* the value of auto_repair is determined in sched_scrub() (once per scrub.
* previous value is not remembered). Set if
* - allowed by configuration and backend, and
* - must_scrub is not set (i.e. - this is a periodic scrub),
* - time_for_deep was just set
*/
bool auto_repair{false};
/**
* indicating that we are scrubbing post repair to verify everything is fixed.
* Otherwise - PG_STATE_FAILED_REPAIR will be asserted.
*/
bool check_repair{false};
/**
* Used to indicate, both in client-facing listings and internally, that
* the planned scrub will be a deep one.
*/
bool calculated_to_deep{false};
};
std::ostream& operator<<(std::ostream& out, const requested_scrub_t& sf);
template <>
struct fmt::formatter<requested_scrub_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const requested_scrub_t& rs, FormatContext& ctx)
{
return fmt::format_to(ctx.out(),
"(plnd:{}{}{}{}{}{}{}{}{}{})",
rs.must_repair ? " must_repair" : "",
rs.auto_repair ? " auto_repair" : "",
rs.check_repair ? " check_repair" : "",
rs.deep_scrub_on_error ? " deep_scrub_on_error" : "",
rs.must_deep_scrub ? " must_deep_scrub" : "",
rs.must_scrub ? " must_scrub" : "",
rs.time_for_deep ? " time_for_deep" : "",
rs.need_auto ? " need_auto" : "",
rs.req_scrub ? " req_scrub" : "",
rs.calculated_to_deep ? " deep" : "");
}
};
/**
* The interface used by the PG when requesting scrub-related info or services
*/
struct ScrubPgIF {
virtual ~ScrubPgIF() = default;
friend std::ostream& operator<<(std::ostream& out, const ScrubPgIF& s)
{
return s.show(out);
}
virtual std::ostream& show(std::ostream& out) const = 0;
// --------------- triggering state-machine events:
virtual void initiate_regular_scrub(epoch_t epoch_queued) = 0;
virtual void initiate_scrub_after_repair(epoch_t epoch_queued) = 0;
virtual void send_scrub_resched(epoch_t epoch_queued) = 0;
virtual void active_pushes_notification(epoch_t epoch_queued) = 0;
virtual void update_applied_notification(epoch_t epoch_queued) = 0;
virtual void digest_update_notification(epoch_t epoch_queued) = 0;
virtual void send_scrub_unblock(epoch_t epoch_queued) = 0;
virtual void send_replica_maps_ready(epoch_t epoch_queued) = 0;
virtual void send_replica_pushes_upd(epoch_t epoch_queued) = 0;
virtual void send_start_replica(epoch_t epoch_queued,
Scrub::act_token_t token) = 0;
virtual void send_sched_replica(epoch_t epoch_queued,
Scrub::act_token_t token) = 0;
virtual void send_chunk_free(epoch_t epoch_queued) = 0;
virtual void send_chunk_busy(epoch_t epoch_queued) = 0;
virtual void send_local_map_done(epoch_t epoch_queued) = 0;
virtual void send_get_next_chunk(epoch_t epoch_queued) = 0;
virtual void send_scrub_is_finished(epoch_t epoch_queued) = 0;
virtual void on_applied_when_primary(const eversion_t& applied_version) = 0;
// --------------------------------------------------
[[nodiscard]] virtual bool are_callbacks_pending() const = 0; // currently
// only used
// for an
// assert
/**
* the scrubber is marked 'active':
* - for the primary: when all replica OSDs grant us the requested resources
* - for replicas: upon receiving the scrub request from the primary
*/
[[nodiscard]] virtual bool is_scrub_active() const = 0;
/**
* 'true' until after the FSM processes the 'scrub-finished' event,
* and scrubbing is completely cleaned-up.
*
* In other words - holds longer than is_scrub_active(), thus preventing
* a rescrubbing of the same PG while the previous scrub has not fully
* terminated.
*/
[[nodiscard]] virtual bool is_queued_or_active() const = 0;
/**
* Manipulate the 'scrubbing request has been queued, or - we are
* actually scrubbing' Scrubber's flag
*
* clear_queued_or_active() will also restart any blocked snaptrimming.
*/
virtual void set_queued_or_active() = 0;
virtual void clear_queued_or_active() = 0;
/// are we waiting for resource reservation grants form our replicas?
[[nodiscard]] virtual bool is_reserving() const = 0;
/// handle a message carrying a replica map
virtual void map_from_replica(OpRequestRef op) = 0;
virtual void replica_scrub_op(OpRequestRef op) = 0;
virtual void set_op_parameters(const requested_scrub_t&) = 0;
/// stop any active scrubbing (on interval end) and unregister from
/// the OSD scrub queue
virtual void on_new_interval() = 0;
virtual void scrub_clear_state() = 0;
virtual void handle_query_state(ceph::Formatter* f) = 0;
virtual pg_scrubbing_status_t get_schedule() const = 0;
virtual void dump_scrubber(ceph::Formatter* f,
const requested_scrub_t& request_flags) const = 0;
/**
* Return true if soid is currently being scrubbed and pending IOs should
* block. May have a side effect of preempting an in-progress scrub -- will
* return false in that case.
*
* @param soid object to check for ongoing scrub
* @return boolean whether a request on soid should block until scrub
* completion
*/
virtual bool write_blocked_by_scrub(const hobject_t& soid) = 0;
/// Returns whether any objects in the range [begin, end] are being scrubbed
virtual bool range_intersects_scrub(const hobject_t& start,
const hobject_t& end) = 0;
/// the op priority, taken from the primary's request message
virtual Scrub::scrub_prio_t replica_op_priority() const = 0;
/// the priority of the on-going scrub (used when requeuing events)
virtual unsigned int scrub_requeue_priority(
Scrub::scrub_prio_t with_priority) const = 0;
virtual unsigned int scrub_requeue_priority(
Scrub::scrub_prio_t with_priority,
unsigned int suggested_priority) const = 0;
virtual void add_callback(Context* context) = 0;
/// add to scrub statistics, but only if the soid is below the scrub start
virtual void stats_of_handled_objects(const object_stat_sum_t& delta_stats,
const hobject_t& soid) = 0;
/**
* the version of 'scrub_clear_state()' that does not try to invoke FSM
* services (thus can be called from FSM reactions)
*/
virtual void clear_pgscrub_state() = 0;
/**
* triggers the 'RemotesReserved' (all replicas granted scrub resources)
* state-machine event
*/
virtual void send_remotes_reserved(epoch_t epoch_queued) = 0;
/**
* triggers the 'ReservationFailure' (at least one replica denied us the
* requested resources) state-machine event
*/
virtual void send_reservation_failure(epoch_t epoch_queued) = 0;
virtual void cleanup_store(ObjectStore::Transaction* t) = 0;
virtual bool get_store_errors(const scrub_ls_arg_t& arg,
scrub_ls_result_t& res_inout) const = 0;
/**
* force a periodic 'publish_stats_to_osd()' call, to update scrub-related
* counters and statistics.
*/
virtual void update_scrub_stats(
ceph::coarse_real_clock::time_point now_is) = 0;
// --------------- reservations -----------------------------------
/**
* message all replicas with a request to "unreserve" scrub
*/
virtual void unreserve_replicas() = 0;
/**
* "forget" all replica reservations. No messages are sent to the
* previously-reserved.
*
* Used upon interval change. The replicas' state is guaranteed to
* be reset separately by the interval-change event.
*/
virtual void discard_replica_reservations() = 0;
/**
* clear both local and OSD-managed resource reservation flags
*/
virtual void clear_scrub_reservations() = 0;
/**
* Reserve local scrub resources (managed by the OSD)
*
* Fails if OSD's local-scrubs budget was exhausted
* \returns were local resources reserved?
*/
virtual bool reserve_local() = 0;
/**
* if activated as a Primary - register the scrub job with the OSD
* scrub queue
*/
virtual void on_pg_activate(const requested_scrub_t& request_flags) = 0;
/**
* Recalculate the required scrub time.
*
* This function assumes that the queue registration status is up-to-date,
* i.e. the OSD "knows our name" if-f we are the Primary.
*/
virtual void update_scrub_job(const requested_scrub_t& request_flags) = 0;
// on the replica:
virtual void handle_scrub_reserve_request(OpRequestRef op) = 0;
virtual void handle_scrub_reserve_release(OpRequestRef op) = 0;
// and on the primary:
virtual void handle_scrub_reserve_grant(OpRequestRef op, pg_shard_t from) = 0;
virtual void handle_scrub_reserve_reject(OpRequestRef op,
pg_shard_t from) = 0;
virtual void rm_from_osd_scrubbing() = 0;
virtual void scrub_requested(scrub_level_t scrub_level,
scrub_type_t scrub_type,
requested_scrub_t& req_flags) = 0;
// --------------- debugging via the asok ------------------------------
virtual int asok_debug(std::string_view cmd,
std::string param,
Formatter* f,
std::stringstream& ss) = 0;
};
| 13,461 | 31.052381 | 80 | h |
null | ceph-main/src/osd/scheduler/OpScheduler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <ostream>
#include <variant>
#include "common/ceph_context.h"
#include "mon/MonClient.h"
#include "osd/scheduler/OpSchedulerItem.h"
namespace ceph::osd::scheduler {
using client = uint64_t;
using WorkItem = std::variant<std::monostate, OpSchedulerItem, double>;
/**
* Base interface for classes responsible for choosing
* op processing order in the OSD.
*/
class OpScheduler {
public:
// Enqueue op for scheduling
virtual void enqueue(OpSchedulerItem &&item) = 0;
// Enqueue op for processing as though it were enqueued prior
// to other items already scheduled.
virtual void enqueue_front(OpSchedulerItem &&item) = 0;
// Returns true iff there are no ops scheduled
virtual bool empty() const = 0;
// Return next op to be processed
virtual WorkItem dequeue() = 0;
// Dump formatted representation for the queue
virtual void dump(ceph::Formatter &f) const = 0;
// Print human readable brief description with relevant parameters
virtual void print(std::ostream &out) const = 0;
// Apply config changes to the scheduler (if any)
virtual void update_configuration() = 0;
// Destructor
virtual ~OpScheduler() {};
};
std::ostream &operator<<(std::ostream &lhs, const OpScheduler &);
using OpSchedulerRef = std::unique_ptr<OpScheduler>;
OpSchedulerRef make_scheduler(
CephContext *cct, int whoami, uint32_t num_shards, int shard_id,
bool is_rotational, std::string_view osd_objectstore, MonClient *monc);
/**
* Implements OpScheduler in terms of OpQueue
*
* Templated on queue type to avoid dynamic dispatch, T should implement
* OpQueue<OpSchedulerItem, client>. This adapter is mainly responsible for
* the boilerplate priority cutoff/strict concept which is needed for
* OpQueue based implementations.
*/
template <typename T>
class ClassedOpQueueScheduler final : public OpScheduler {
unsigned cutoff;
T queue;
static unsigned int get_io_prio_cut(CephContext *cct) {
if (cct->_conf->osd_op_queue_cut_off == "debug_random") {
srand(time(NULL));
return (rand() % 2 < 1) ? CEPH_MSG_PRIO_HIGH : CEPH_MSG_PRIO_LOW;
} else if (cct->_conf->osd_op_queue_cut_off == "high") {
return CEPH_MSG_PRIO_HIGH;
} else {
// default / catch-all is 'low'
return CEPH_MSG_PRIO_LOW;
}
}
public:
template <typename... Args>
ClassedOpQueueScheduler(CephContext *cct, Args&&... args) :
cutoff(get_io_prio_cut(cct)),
queue(std::forward<Args>(args)...)
{}
void enqueue(OpSchedulerItem &&item) final {
unsigned priority = item.get_priority();
unsigned cost = item.get_cost();
if (priority >= cutoff)
queue.enqueue_strict(
item.get_owner(), priority, std::move(item));
else
queue.enqueue(
item.get_owner(), priority, cost, std::move(item));
}
void enqueue_front(OpSchedulerItem &&item) final {
unsigned priority = item.get_priority();
unsigned cost = item.get_cost();
if (priority >= cutoff)
queue.enqueue_strict_front(
item.get_owner(),
priority, std::move(item));
else
queue.enqueue_front(
item.get_owner(),
priority, cost, std::move(item));
}
bool empty() const final {
return queue.empty();
}
WorkItem dequeue() final {
return queue.dequeue();
}
void dump(ceph::Formatter &f) const final {
return queue.dump(&f);
}
void print(std::ostream &out) const final {
out << "ClassedOpQueueScheduler(queue=";
queue.print(out);
out << ", cutoff=" << cutoff << ")";
}
void update_configuration() final {
// no-op
}
~ClassedOpQueueScheduler() final {};
};
}
| 4,007 | 25.72 | 76 | h |
null | ceph-main/src/osd/scheduler/OpSchedulerItem.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <ostream>
#include "include/types.h"
#include "include/utime_fmt.h"
#include "osd/osd_types_fmt.h"
#include "osd/OpRequest.h"
#include "osd/PG.h"
#include "osd/PGPeeringEvent.h"
#include "messages/MOSDOp.h"
class OSD;
struct OSDShard;
namespace ceph::osd::scheduler {
enum class op_scheduler_class : uint8_t {
background_recovery = 0,
background_best_effort,
immediate,
client,
};
std::ostream& operator<<(std::ostream& out, const op_scheduler_class& class_id);
class OpSchedulerItem {
public:
// Abstraction for operations queueable in the op queue
class OpQueueable {
public:
using Ref = std::unique_ptr<OpQueueable>;
/// Items with the same queue token will end up in the same shard
virtual uint32_t get_queue_token() const = 0;
/* Items will be dequeued and locked atomically w.r.t. other items with the
* same ordering token */
virtual const spg_t& get_ordering_token() const = 0;
virtual std::optional<OpRequestRef> maybe_get_op() const {
return std::nullopt;
}
virtual uint64_t get_reserved_pushes() const {
return 0;
}
virtual bool is_peering() const {
return false;
}
virtual bool peering_requires_pg() const {
ceph_abort();
}
virtual const PGCreateInfo *creates_pg() const {
return nullptr;
}
virtual std::ostream &print(std::ostream &rhs) const = 0;
/// and a version geared towards fmt::format use:
virtual std::string print() const = 0;
virtual void run(OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) = 0;
virtual op_scheduler_class get_scheduler_class() const = 0;
virtual ~OpQueueable() {}
friend std::ostream& operator<<(std::ostream& out, const OpQueueable& q) {
return q.print(out);
}
};
private:
OpQueueable::Ref qitem;
int cost;
unsigned priority;
utime_t start_time;
uint64_t owner; ///< global id (e.g., client.XXX)
epoch_t map_epoch; ///< an epoch we expect the PG to exist in
/**
* qos_cost
*
* Set by mClockScheduler iff queued into mclock proper and not the
* high/immediate queues. Represents mClockScheduler's adjusted
* cost value.
*/
uint32_t qos_cost = 0;
/// True iff queued via mclock proper, not the high/immediate queues
bool was_queued_via_mclock() const {
return qos_cost > 0;
}
public:
OpSchedulerItem(
OpQueueable::Ref &&item,
int cost,
unsigned priority,
utime_t start_time,
uint64_t owner,
epoch_t e)
: qitem(std::move(item)),
cost(cost),
priority(priority),
start_time(start_time),
owner(owner),
map_epoch(e) {}
OpSchedulerItem(OpSchedulerItem &&) = default;
OpSchedulerItem(const OpSchedulerItem &) = delete;
OpSchedulerItem &operator=(OpSchedulerItem &&) = default;
OpSchedulerItem &operator=(const OpSchedulerItem &) = delete;
friend struct fmt::formatter<OpSchedulerItem>;
uint32_t get_queue_token() const {
return qitem->get_queue_token();
}
const spg_t& get_ordering_token() const {
return qitem->get_ordering_token();
}
std::optional<OpRequestRef> maybe_get_op() const {
return qitem->maybe_get_op();
}
uint64_t get_reserved_pushes() const {
return qitem->get_reserved_pushes();
}
void run(OSD *osd, OSDShard *sdata,PGRef& pg, ThreadPool::TPHandle &handle) {
qitem->run(osd, sdata, pg, handle);
}
unsigned get_priority() const { return priority; }
int get_cost() const { return cost; }
utime_t get_start_time() const { return start_time; }
uint64_t get_owner() const { return owner; }
epoch_t get_map_epoch() const { return map_epoch; }
bool is_peering() const {
return qitem->is_peering();
}
const PGCreateInfo *creates_pg() const {
return qitem->creates_pg();
}
bool peering_requires_pg() const {
return qitem->peering_requires_pg();
}
op_scheduler_class get_scheduler_class() const {
return qitem->get_scheduler_class();
}
void set_qos_cost(uint32_t scaled_cost) {
qos_cost = scaled_cost;
}
friend std::ostream& operator<<(std::ostream& out, const OpSchedulerItem& item) {
out << "OpSchedulerItem("
<< item.get_ordering_token() << " " << *item.qitem;
out << " class_id " << item.get_scheduler_class();
out << " prio " << item.get_priority();
if (item.was_queued_via_mclock()) {
out << " qos_cost " << item.qos_cost;
}
out << " cost " << item.get_cost()
<< " e" << item.get_map_epoch();
if (item.get_reserved_pushes()) {
out << " reserved_pushes " << item.get_reserved_pushes();
}
return out << ")";
}
}; // class OpSchedulerItem
/// Implements boilerplate for operations queued for the pg lock
class PGOpQueueable : public OpSchedulerItem::OpQueueable {
spg_t pgid;
protected:
const spg_t& get_pgid() const {
return pgid;
}
static op_scheduler_class priority_to_scheduler_class(int priority) {
if (priority >= CEPH_MSG_PRIO_HIGH) {
return op_scheduler_class::immediate;
} else if (priority >= PeeringState::recovery_msg_priority_t::DEGRADED) {
return op_scheduler_class::background_recovery;
} else {
return op_scheduler_class::background_best_effort;
}
}
public:
explicit PGOpQueueable(spg_t pg) : pgid(pg) {}
uint32_t get_queue_token() const final {
return get_pgid().ps();
}
const spg_t& get_ordering_token() const final {
return get_pgid();
}
};
class PGOpItem : public PGOpQueueable {
OpRequestRef op;
public:
PGOpItem(spg_t pg, OpRequestRef op) : PGOpQueueable(pg), op(std::move(op)) {}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGOpItem(op=" << *(op->get_req()) << ")";
}
std::string print() const override {
return fmt::format("PGOpItem(op={})", *(op->get_req()));
}
std::optional<OpRequestRef> maybe_get_op() const final {
return op;
}
op_scheduler_class get_scheduler_class() const final {
auto type = op->get_req()->get_type();
if (type == CEPH_MSG_OSD_OP ||
type == CEPH_MSG_OSD_BACKOFF) {
return op_scheduler_class::client;
} else {
return op_scheduler_class::immediate;
}
}
void run(OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
};
class PGPeeringItem : public PGOpQueueable {
PGPeeringEventRef evt;
public:
PGPeeringItem(spg_t pg, PGPeeringEventRef e) : PGOpQueueable(pg), evt(e) {}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGPeeringEvent(" << evt->get_desc() << ")";
}
std::string print() const final {
return fmt::format("PGPeeringEvent({})", evt->get_desc());
}
void run(OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
bool is_peering() const override {
return true;
}
bool peering_requires_pg() const override {
return evt->requires_pg;
}
const PGCreateInfo *creates_pg() const override {
return evt->create_info.get();
}
op_scheduler_class get_scheduler_class() const final {
return op_scheduler_class::immediate;
}
};
class PGSnapTrim : public PGOpQueueable {
epoch_t epoch_queued;
public:
PGSnapTrim(
spg_t pg,
epoch_t epoch_queued)
: PGOpQueueable(pg), epoch_queued(epoch_queued) {}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGSnapTrim(pgid=" << get_pgid()
<< " epoch_queued=" << epoch_queued
<< ")";
}
std::string print() const final {
return fmt::format(
"PGSnapTrim(pgid={} epoch_queued={})", get_pgid(), epoch_queued);
}
void run(
OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
op_scheduler_class get_scheduler_class() const final {
return op_scheduler_class::background_best_effort;
}
};
class PGScrub : public PGOpQueueable {
epoch_t epoch_queued;
public:
PGScrub(
spg_t pg,
epoch_t epoch_queued)
: PGOpQueueable(pg), epoch_queued(epoch_queued) {}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGScrub(pgid=" << get_pgid()
<< "epoch_queued=" << epoch_queued
<< ")";
}
std::string print() const final {
return fmt::format(
"PGScrub(pgid={} epoch_queued={})", get_pgid(), epoch_queued);
}
void run(
OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
op_scheduler_class get_scheduler_class() const final {
return op_scheduler_class::background_best_effort;
}
};
class PGScrubItem : public PGOpQueueable {
protected:
epoch_t epoch_queued;
Scrub::act_token_t activation_index;
std::string_view message_name;
PGScrubItem(spg_t pg, epoch_t epoch_queued, std::string_view derivative_name)
: PGOpQueueable{pg}
, epoch_queued{epoch_queued}
, activation_index{0}
, message_name{derivative_name}
{}
PGScrubItem(spg_t pg,
epoch_t epoch_queued,
Scrub::act_token_t op_index,
std::string_view derivative_name)
: PGOpQueueable{pg}
, epoch_queued{epoch_queued}
, activation_index{op_index}
, message_name{derivative_name}
{}
std::ostream& print(std::ostream& rhs) const final
{
return rhs << message_name << "(pgid=" << get_pgid()
<< "epoch_queued=" << epoch_queued
<< " scrub-token=" << activation_index << ")";
}
std::string print() const override {
return fmt::format(
"{}(pgid={} epoch_queued={} scrub-token={})", message_name, get_pgid(),
epoch_queued, activation_index);
}
void run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle) override = 0;
op_scheduler_class get_scheduler_class() const final
{
return op_scheduler_class::background_best_effort;
}
};
class PGScrubResched : public PGScrubItem {
public:
PGScrubResched(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubResched"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
/**
* all replicas have granted our scrub resources request
*/
class PGScrubResourcesOK : public PGScrubItem {
public:
PGScrubResourcesOK(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubResourcesOK"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
/**
* scrub resources requests denied by replica(s)
*/
class PGScrubDenied : public PGScrubItem {
public:
PGScrubDenied(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubDenied"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
/**
* called when a repair process completes, to initiate scrubbing. No local/remote
* resources are allocated.
*/
class PGScrubAfterRepair : public PGScrubItem {
public:
PGScrubAfterRepair(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubAfterRepair"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubPushesUpdate : public PGScrubItem {
public:
PGScrubPushesUpdate(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubPushesUpdate"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubAppliedUpdate : public PGScrubItem {
public:
PGScrubAppliedUpdate(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubAppliedUpdate"}
{}
void run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
[[maybe_unused]] ThreadPool::TPHandle& handle) final;
};
class PGScrubUnblocked : public PGScrubItem {
public:
PGScrubUnblocked(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubUnblocked"}
{}
void run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
[[maybe_unused]] ThreadPool::TPHandle& handle) final;
};
class PGScrubDigestUpdate : public PGScrubItem {
public:
PGScrubDigestUpdate(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubDigestUpdate"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubGotLocalMap : public PGScrubItem {
public:
PGScrubGotLocalMap(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubGotLocalMap"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubGotReplMaps : public PGScrubItem {
public:
PGScrubGotReplMaps(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubGotReplMaps"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGRepScrub : public PGScrubItem {
public:
PGRepScrub(spg_t pg, epoch_t epoch_queued, Scrub::act_token_t op_token)
: PGScrubItem{pg, epoch_queued, op_token, "PGRepScrub"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGRepScrubResched : public PGScrubItem {
public:
PGRepScrubResched(spg_t pg, epoch_t epoch_queued, Scrub::act_token_t op_token)
: PGScrubItem{pg, epoch_queued, op_token, "PGRepScrubResched"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubReplicaPushes : public PGScrubItem {
public:
PGScrubReplicaPushes(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubReplicaPushes"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubScrubFinished : public PGScrubItem {
public:
PGScrubScrubFinished(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubScrubFinished"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubGetNextChunk : public PGScrubItem {
public:
PGScrubGetNextChunk(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubGetNextChunk"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubChunkIsBusy : public PGScrubItem {
public:
PGScrubChunkIsBusy(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubChunkIsBusy"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubChunkIsFree : public PGScrubItem {
public:
PGScrubChunkIsFree(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubChunkIsFree"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGRecovery : public PGOpQueueable {
utime_t time_queued;
epoch_t epoch_queued;
uint64_t reserved_pushes;
int priority;
public:
PGRecovery(
spg_t pg,
epoch_t epoch_queued,
uint64_t reserved_pushes,
int priority)
: PGOpQueueable(pg),
time_queued(ceph_clock_now()),
epoch_queued(epoch_queued),
reserved_pushes(reserved_pushes),
priority(priority) {}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGRecovery(pgid=" << get_pgid()
<< " epoch_queued=" << epoch_queued
<< " reserved_pushes=" << reserved_pushes
<< ")";
}
std::string print() const final {
return fmt::format(
"PGRecovery(pgid={} epoch_queued={} reserved_pushes={})", get_pgid(),
epoch_queued, reserved_pushes);
}
uint64_t get_reserved_pushes() const final {
return reserved_pushes;
}
void run(
OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
op_scheduler_class get_scheduler_class() const final {
return priority_to_scheduler_class(priority);
}
};
class PGRecoveryContext : public PGOpQueueable {
utime_t time_queued;
std::unique_ptr<GenContext<ThreadPool::TPHandle&>> c;
epoch_t epoch;
int priority;
public:
PGRecoveryContext(spg_t pgid,
GenContext<ThreadPool::TPHandle&> *c, epoch_t epoch,
int priority)
: PGOpQueueable(pgid),
time_queued(ceph_clock_now()),
c(c), epoch(epoch), priority(priority) {}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGRecoveryContext(pgid=" << get_pgid()
<< " c=" << c.get() << " epoch=" << epoch
<< ")";
}
std::string print() const final {
return fmt::format(
"PGRecoveryContext(pgid={} c={} epoch={})", get_pgid(), (void*)c.get(), epoch);
}
void run(
OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
op_scheduler_class get_scheduler_class() const final {
return priority_to_scheduler_class(priority);
}
};
class PGDelete : public PGOpQueueable {
epoch_t epoch_queued;
public:
PGDelete(
spg_t pg,
epoch_t epoch_queued)
: PGOpQueueable(pg),
epoch_queued(epoch_queued) {}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGDelete(" << get_pgid()
<< " e" << epoch_queued
<< ")";
}
std::string print() const final {
return fmt::format(
"PGDelete(pgid={} epoch_queued={})", get_pgid(), epoch_queued);
}
void run(
OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
op_scheduler_class get_scheduler_class() const final {
return op_scheduler_class::background_best_effort;
}
};
class PGRecoveryMsg : public PGOpQueueable {
utime_t time_queued;
OpRequestRef op;
public:
PGRecoveryMsg(spg_t pg, OpRequestRef op)
: PGOpQueueable(pg), time_queued(ceph_clock_now()), op(std::move(op)) {}
static bool is_recovery_msg(OpRequestRef &op) {
switch (op->get_req()->get_type()) {
case MSG_OSD_PG_PUSH:
case MSG_OSD_PG_PUSH_REPLY:
case MSG_OSD_PG_PULL:
case MSG_OSD_PG_BACKFILL:
case MSG_OSD_PG_BACKFILL_REMOVE:
case MSG_OSD_PG_SCAN:
return true;
default:
return false;
}
}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGRecoveryMsg(op=" << *(op->get_req()) << ")";
}
std::string print() const final {
return fmt::format("PGRecoveryMsg(op={})", *(op->get_req()));
}
std::optional<OpRequestRef> maybe_get_op() const final {
return op;
}
op_scheduler_class get_scheduler_class() const final {
return priority_to_scheduler_class(op->get_req()->get_priority());
}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle)
final;
};
} // namespace ceph::osd::scheduler
template <>
struct fmt::formatter<ceph::osd::scheduler::OpSchedulerItem> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(
const ceph::osd::scheduler::OpSchedulerItem& opsi,
FormatContext& ctx) const
{
// matching existing op_scheduler_item_t::operator<<() format
using class_t =
std::underlying_type_t<ceph::osd::scheduler::op_scheduler_class>;
const auto qos_cost = opsi.was_queued_via_mclock()
? fmt::format(" qos_cost {}", opsi.qos_cost)
: "";
const auto pushes =
opsi.get_reserved_pushes()
? fmt::format(" reserved_pushes {}", opsi.get_reserved_pushes())
: "";
return fmt::format_to(
ctx.out(), "OpSchedulerItem({} {} class_id {} prio {}{} cost {} e{}{})",
opsi.get_ordering_token(), opsi.qitem->print(),
static_cast<class_t>(opsi.get_scheduler_class()), opsi.get_priority(),
qos_cost, opsi.get_cost(), opsi.get_map_epoch(), pushes);
}
};
| 19,776 | 28.126657 | 93 | h |
null | ceph-main/src/osd/scheduler/mClockScheduler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <functional>
#include <ostream>
#include <map>
#include <vector>
#include "boost/variant.hpp"
#include "dmclock/src/dmclock_server.h"
#include "osd/scheduler/OpScheduler.h"
#include "common/config.h"
#include "common/ceph_context.h"
#include "common/mClockPriorityQueue.h"
#include "osd/scheduler/OpSchedulerItem.h"
namespace ceph::osd::scheduler {
constexpr double default_min = 0.0;
constexpr double default_max = std::numeric_limits<double>::is_iec559 ?
std::numeric_limits<double>::infinity() :
std::numeric_limits<double>::max();
using client_id_t = uint64_t;
using profile_id_t = uint64_t;
struct client_profile_id_t {
client_id_t client_id;
profile_id_t profile_id;
auto operator<=>(const client_profile_id_t&) const = default;
friend std::ostream& operator<<(std::ostream& out,
const client_profile_id_t& client_profile) {
out << " client_id: " << client_profile.client_id
<< " profile_id: " << client_profile.profile_id;
return out;
}
};
struct scheduler_id_t {
op_scheduler_class class_id;
client_profile_id_t client_profile_id;
auto operator<=>(const scheduler_id_t&) const = default;
friend std::ostream& operator<<(std::ostream& out,
const scheduler_id_t& sched_id) {
out << "{ class_id: " << sched_id.class_id
<< sched_id.client_profile_id;
return out << " }";
}
};
/**
* Scheduler implementation based on mclock.
*
* TODO: explain configs
*/
class mClockScheduler : public OpScheduler, md_config_obs_t {
CephContext *cct;
const int whoami;
const uint32_t num_shards;
const int shard_id;
const bool is_rotational;
MonClient *monc;
/**
* osd_bandwidth_cost_per_io
*
* mClock expects all queued items to have a uniform expression of
* "cost". However, IO devices generally have quite different capacity
* for sequential IO vs small random IO. This implementation handles this
* by expressing all costs as a number of sequential bytes written adding
* additional cost for each random IO equal to osd_bandwidth_cost_per_io.
*
* Thus, an IO operation requiring a total of <size> bytes to be written
* accross <iops> different locations will have a cost of
* <size> + (osd_bandwidth_cost_per_io * <iops>) bytes.
*
* Set in set_osd_capacity_params_from_config in the constructor and upon
* config change.
*
* Has units bytes/io.
*/
double osd_bandwidth_cost_per_io;
/**
* osd_bandwidth_capacity_per_shard
*
* mClock expects reservation and limit paramters to be expressed in units
* of cost/second -- which means bytes/second for this implementation.
*
* Rather than expecting users to compute appropriate limit and reservation
* values for each class of OSDs in their cluster, we instead express
* reservation and limit paramaters as ratios of the OSD's maxmimum capacity.
* osd_bandwidth_capacity_per_shard is that capacity divided by the number
* of shards.
*
* Set in set_osd_capacity_params_from_config in the constructor and upon
* config change.
*
* This value gets passed to ClientRegistry::update_from_config in order
* to resolve the full reservaiton and limit parameters for mclock from
* the configured ratios.
*
* Has units bytes/second.
*/
double osd_bandwidth_capacity_per_shard;
class ClientRegistry {
std::array<
crimson::dmclock::ClientInfo,
static_cast<size_t>(op_scheduler_class::immediate)
> internal_client_infos = {
// Placeholder, gets replaced with configured values
crimson::dmclock::ClientInfo(1, 1, 1),
crimson::dmclock::ClientInfo(1, 1, 1)
};
crimson::dmclock::ClientInfo default_external_client_info = {1, 1, 1};
std::map<client_profile_id_t,
crimson::dmclock::ClientInfo> external_client_infos;
const crimson::dmclock::ClientInfo *get_external_client(
const client_profile_id_t &client) const;
public:
/**
* update_from_config
*
* Sets the mclock paramaters (reservation, weight, and limit)
* for each class of IO (background_recovery, background_best_effort,
* and client).
*/
void update_from_config(
const ConfigProxy &conf,
double capacity_per_shard);
const crimson::dmclock::ClientInfo *get_info(
const scheduler_id_t &id) const;
} client_registry;
using mclock_queue_t = crimson::dmclock::PullPriorityQueue<
scheduler_id_t,
OpSchedulerItem,
true,
true,
2>;
using priority_t = unsigned;
using SubQueue = std::map<priority_t,
std::list<OpSchedulerItem>,
std::greater<priority_t>>;
mclock_queue_t scheduler;
/**
* high_priority
*
* Holds entries to be dequeued in strict order ahead of mClock
* Invariant: entries are never empty
*/
SubQueue high_priority;
priority_t immediate_class_priority = std::numeric_limits<priority_t>::max();
static scheduler_id_t get_scheduler_id(const OpSchedulerItem &item) {
return scheduler_id_t{
item.get_scheduler_class(),
client_profile_id_t{
item.get_owner(),
0
}
};
}
static unsigned int get_io_prio_cut(CephContext *cct) {
if (cct->_conf->osd_op_queue_cut_off == "debug_random") {
std::random_device rd;
std::mt19937 random_gen(rd());
return (random_gen() % 2 < 1) ? CEPH_MSG_PRIO_HIGH : CEPH_MSG_PRIO_LOW;
} else if (cct->_conf->osd_op_queue_cut_off == "high") {
return CEPH_MSG_PRIO_HIGH;
} else {
// default / catch-all is 'low'
return CEPH_MSG_PRIO_LOW;
}
}
unsigned cutoff_priority = get_io_prio_cut(cct);
/**
* set_osd_capacity_params_from_config
*
* mClockScheduler uses two parameters, osd_bandwidth_cost_per_io
* and osd_bandwidth_capacity_per_shard, internally. These two
* parameters are derived from config parameters
* osd_mclock_max_capacity_iops_(hdd|ssd) and
* osd_mclock_max_sequential_bandwidth_(hdd|ssd) as well as num_shards.
* Invoking set_osd_capacity_params_from_config() resets those derived
* params based on the current config and should be invoked any time they
* are modified as well as in the constructor. See handle_conf_change().
*/
void set_osd_capacity_params_from_config();
// Set the mclock related config params based on the profile
void set_config_defaults_from_profile();
public:
mClockScheduler(CephContext *cct, int whoami, uint32_t num_shards,
int shard_id, bool is_rotational, MonClient *monc);
~mClockScheduler() override;
/// Calculate scaled cost per item
uint32_t calc_scaled_cost(int cost);
// Helper method to display mclock queues
std::string display_queues() const;
// Enqueue op in the back of the regular queue
void enqueue(OpSchedulerItem &&item) final;
// Enqueue the op in the front of the high priority queue
void enqueue_front(OpSchedulerItem &&item) final;
// Return an op to be dispatch
WorkItem dequeue() final;
// Returns if the queue is empty
bool empty() const final {
return scheduler.empty() && high_priority.empty();
}
// Formatted output of the queue
void dump(ceph::Formatter &f) const final;
void print(std::ostream &ostream) const final {
ostream << "mClockScheduler";
}
// Update data associated with the modified mclock config key(s)
void update_configuration() final;
const char** get_tracked_conf_keys() const final;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed) final;
private:
// Enqueue the op to the high priority queue
void enqueue_high(unsigned prio, OpSchedulerItem &&item, bool front = false);
};
}
| 8,102 | 29.809886 | 79 | h |
null | ceph-main/src/osd/scrubber/PrimaryLogScrub.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
// the './' includes are marked this way to affect clang-format
#include "./pg_scrubber.h"
#include "debug.h"
#include "common/errno.h"
#include "common/scrub_types.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDRepScrub.h"
#include "messages/MOSDRepScrubMap.h"
#include "messages/MOSDScrubReserve.h"
#include "osd/OSD.h"
#include "scrub_machine.h"
class PrimaryLogPG;
/**
* The derivative of PgScrubber that is used by PrimaryLogPG.
*/
class PrimaryLogScrub : public PgScrubber {
public:
explicit PrimaryLogScrub(PrimaryLogPG* pg);
void _scrub_finish() final;
bool get_store_errors(const scrub_ls_arg_t& arg,
scrub_ls_result_t& res_inout) const final;
void stats_of_handled_objects(const object_stat_sum_t& delta_stats,
const hobject_t& soid) final;
// the interface used by the scrubber-backend:
void add_to_stats(const object_stat_sum_t& stat) final;
void submit_digest_fixes(const digests_fixes_t& fixes) final;
private:
// we know our PG is actually a PrimaryLogPG. Let's alias the pointer to that
// object:
PrimaryLogPG* const m_pl_pg;
// handle our part in stats collection
object_stat_collection_t m_scrub_cstat;
void _scrub_clear_state() final; // which just clears the stats
};
| 1,360 | 25.173077 | 79 | h |
null | ceph-main/src/osd/scrubber/ScrubStore.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_SCRUB_RESULT_H
#define CEPH_SCRUB_RESULT_H
#include "common/map_cacher.hpp"
#include "osd/SnapMapper.h" // for OSDriver
namespace librados {
struct object_id_t;
}
struct inconsistent_obj_wrapper;
struct inconsistent_snapset_wrapper;
namespace Scrub {
class Store {
public:
~Store();
static Store* create(ObjectStore* store,
ObjectStore::Transaction* t,
const spg_t& pgid,
const coll_t& coll);
void add_object_error(int64_t pool, const inconsistent_obj_wrapper& e);
void add_snap_error(int64_t pool, const inconsistent_snapset_wrapper& e);
// and a variant-friendly interface:
void add_error(int64_t pool, const inconsistent_obj_wrapper& e);
void add_error(int64_t pool, const inconsistent_snapset_wrapper& e);
bool empty() const;
void flush(ObjectStore::Transaction*);
void cleanup(ObjectStore::Transaction*);
std::vector<ceph::buffer::list> get_snap_errors(
int64_t pool,
const librados::object_id_t& start,
uint64_t max_return) const;
std::vector<ceph::buffer::list> get_object_errors(
int64_t pool,
const librados::object_id_t& start,
uint64_t max_return) const;
private:
Store(const coll_t& coll, const ghobject_t& oid, ObjectStore* store);
std::vector<ceph::buffer::list> get_errors(const std::string& start,
const std::string& end,
uint64_t max_return) const;
private:
const coll_t coll;
const ghobject_t hoid;
// a temp object holding mappings from seq-id to inconsistencies found in
// scrubbing
OSDriver driver;
mutable MapCacher::MapCacher<std::string, ceph::buffer::list> backend;
std::map<std::string, ceph::buffer::list> results;
};
} // namespace Scrub
#endif // CEPH_SCRUB_RESULT_H
| 1,838 | 27.734375 | 75 | h |
null | ceph-main/src/osd/scrubber/osd_scrub_sched.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
// clang-format off
/*
┌───────────────────────┐
│ OSD │
│ OSDService ─┼───┐
│ │ │
│ │ │
└───────────────────────┘ │ Ownes & uses the following
│ ScrubQueue interfaces:
│
│
│ - resource management (*1)
│
│ - environment conditions (*2)
│
│ - scrub scheduling (*3)
│
│
│
│
│
│
ScrubQueue │
┌───────────────────────────▼────────────┐
│ │
│ │
│ ScrubQContainer to_scrub <>────────┼────────┐
│ ScrubQContainer penalized │ │
│ │ │
│ │ │
│ OSD_wide resource counters │ │
│ │ │
│ │ │
│ "env scrub conditions" monitoring │ │
│ │ │
│ │ │
│ │ │
│ │ │
└─▲──────────────────────────────────────┘ │
│ │
│ │
│uses interface <4> │
│ │
│ │
│ ┌──────────────────────────────────┘
│ │ shared ownership of jobs
│ │
│ ┌─────▼──────┐
│ │ScrubJob │
│ │ ├┐
│ │ ││
│ │ │┼┐
│ │ │┼│
└──────┤ │┼┤◄──────┐
│ │┼│ │
│ │┼│ │
│ │┼│ │
└┬───────────┼┼│ │shared ownership
└─┼┼┼┼┼┼┼┼┼┼┼┼│ │
└───────────┘ │
│
│
│
│
┌───────────────────────────────┼─┐
│ <>│
│PgScrubber │
│ │
│ │
│ │
│ │
│ │
└─────────────────────────────────┘
ScrubQueue interfaces (main functions):
<1> - OSD/PG resources management:
- can_inc_scrubs()
- {inc/dec}_scrubs_{local/remote}()
- dump_scrub_reservations()
- {set/clear/is}_reserving_now()
<2> - environment conditions:
- update_loadavg()
- scrub_load_below_threshold()
- scrub_time_permit()
<3> - scheduling scrubs:
- select_pg_and_scrub()
- dump_scrubs()
<4> - manipulating a job's state:
- register_with_osd()
- remove_from_osd_queue()
- update_job()
*/
// clang-format on
#include <atomic>
#include <chrono>
#include <memory>
#include <optional>
#include <vector>
#include "common/RefCountedObj.h"
#include "common/ceph_atomic.h"
#include "osd/osd_types.h"
#include "osd/scrubber_common.h"
#include "include/utime_fmt.h"
#include "osd/osd_types_fmt.h"
#include "utime.h"
class PG;
namespace Scrub {
using namespace ::std::literals;
// possible outcome when trying to select a PG and scrub it
enum class schedule_result_t {
scrub_initiated, // successfully started a scrub
none_ready, // no pg to scrub
no_local_resources, // failure to secure local OSD scrub resource
already_started, // failed, as already started scrubbing this pg
no_such_pg, // can't find this pg
bad_pg_state, // pg state (clean, active, etc.)
preconditions // time, configuration, etc.
};
// the OSD services provided to the scrub scheduler
class ScrubSchedListener {
public:
virtual int get_nodeid() const = 0; // returns the OSD number ('whoami')
/**
* A callback used by the ScrubQueue object to initiate a scrub on a specific
* PG.
*
* The request might fail for multiple reasons, as ScrubQueue cannot by its
* own check some of the PG-specific preconditions and those are checked here.
* See attempt_t definition.
*
* @return a Scrub::attempt_t detailing either a success, or the failure
* reason.
*/
virtual schedule_result_t initiate_a_scrub(
spg_t pgid,
bool allow_requested_repair_only) = 0;
virtual ~ScrubSchedListener() {}
};
} // namespace Scrub
/**
* the queue of PGs waiting to be scrubbed.
* Main operations are scheduling/unscheduling a PG to be scrubbed at a certain
* time.
*
* A "penalty" queue maintains those PGs that have failed to reserve the
* resources of their replicas. The PGs in this list will be reinstated into the
* scrub queue when all eligible PGs were already handled, or after a timeout
* (or if their deadline has passed [[disabled at this time]]).
*/
class ScrubQueue {
public:
enum class must_scrub_t { not_mandatory, mandatory };
enum class qu_state_t {
not_registered, // not a primary, thus not considered for scrubbing by this
// OSD (also the temporary state when just created)
registered, // in either of the two queues ('to_scrub' or 'penalized')
unregistering // in the process of being unregistered. Will be finalized
// under lock
};
ScrubQueue(CephContext* cct, Scrub::ScrubSchedListener& osds);
virtual ~ScrubQueue() = default;
struct scrub_schedule_t {
utime_t scheduled_at{};
utime_t deadline{0, 0};
};
struct sched_params_t {
utime_t proposed_time{};
double min_interval{0.0};
double max_interval{0.0};
must_scrub_t is_must{ScrubQueue::must_scrub_t::not_mandatory};
};
struct ScrubJob final : public RefCountedObject {
/**
* a time scheduled for scrub, and a deadline: The scrub could be delayed
* if system load is too high (but not if after the deadline),or if trying
* to scrub out of scrub hours.
*/
scrub_schedule_t schedule;
/// pg to be scrubbed
const spg_t pgid;
/// the OSD id (for the log)
const int whoami;
ceph::atomic<qu_state_t> state{qu_state_t::not_registered};
/**
* the old 'is_registered'. Set whenever the job is registered with the OSD,
* i.e. is in either the 'to_scrub' or the 'penalized' vectors.
*/
std::atomic_bool in_queues{false};
/// last scrub attempt failed to secure replica resources
bool resources_failure{false};
/**
* 'updated' is a temporary flag, used to create a barrier after
* 'sched_time' and 'deadline' (or any other job entry) were modified by
* different task.
* 'updated' also signals the need to move a job back from the penalized
* queue to the regular one.
*/
std::atomic_bool updated{false};
/**
* the scrubber is waiting for locked objects to be unlocked.
* Set after a grace period has passed.
*/
bool blocked{false};
utime_t blocked_since{};
utime_t penalty_timeout{0, 0};
CephContext* cct;
ScrubJob(CephContext* cct, const spg_t& pg, int node_id);
utime_t get_sched_time() const { return schedule.scheduled_at; }
/**
* relatively low-cost(*) access to the scrub job's state, to be used in
* logging.
* (*) not a low-cost access on x64 architecture
*/
std::string_view state_desc() const
{
return ScrubQueue::qu_state_text(state.load(std::memory_order_relaxed));
}
void update_schedule(const ScrubQueue::scrub_schedule_t& adjusted);
void dump(ceph::Formatter* f) const;
/*
* as the atomic 'in_queues' appears in many log prints, accessing it for
* display-only should be made less expensive (on ARM. On x86 the _relaxed
* produces the same code as '_cs')
*/
std::string_view registration_state() const
{
return in_queues.load(std::memory_order_relaxed) ? "in-queue"
: "not-queued";
}
/**
* access the 'state' directly, for when a distinction between 'registered'
* and 'unregistering' is needed (both have in_queues() == true)
*/
bool is_state_registered() const { return state == qu_state_t::registered; }
/**
* a text description of the "scheduling intentions" of this PG:
* are we already scheduled for a scrub/deep scrub? when?
*/
std::string scheduling_state(utime_t now_is, bool is_deep_expected) const;
friend std::ostream& operator<<(std::ostream& out, const ScrubJob& pg);
};
friend class TestOSDScrub;
friend class ScrubSchedTestWrapper; ///< unit-tests structure
using ScrubJobRef = ceph::ref_t<ScrubJob>;
using ScrubQContainer = std::vector<ScrubJobRef>;
static std::string_view qu_state_text(qu_state_t st);
/**
* called periodically by the OSD to select the first scrub-eligible PG
* and scrub it.
*
* Selection is affected by:
* - time of day: scheduled scrubbing might be configured to only happen
* during certain hours;
* - same for days of the week, and for the system load;
*
* @param preconds: what types of scrub are allowed, given system status &
* config. Some of the preconditions are calculated here.
* @return Scrub::attempt_t::scrubbing if a scrub session was successfully
* initiated. Otherwise - the failure cause.
*
* locking: locks jobs_lock
*/
Scrub::schedule_result_t select_pg_and_scrub(Scrub::ScrubPreconds& preconds);
/**
* Translate attempt_ values into readable text
*/
static std::string_view attempt_res_text(Scrub::schedule_result_t v);
/**
* remove the pg from set of PGs to be scanned for scrubbing.
* To be used if we are no longer the PG's primary, or if the PG is removed.
*/
void remove_from_osd_queue(ScrubJobRef sjob);
/**
* @return the list (not std::set!) of all scrub jobs registered
* (apart from PGs in the process of being removed)
*/
ScrubQContainer list_registered_jobs() const;
/**
* Add the scrub job to the list of jobs (i.e. list of PGs) to be periodically
* scrubbed by the OSD.
* The registration is active as long as the PG exists and the OSD is its
* primary.
*
* See update_job() for the handling of the 'suggested' parameter.
*
* locking: might lock jobs_lock
*/
void register_with_osd(ScrubJobRef sjob, const sched_params_t& suggested);
/**
* modify a scrub-job's scheduled time and deadline
*
* There are 3 argument combinations to consider:
* - 'must' is asserted, and the suggested time is 'scrub_must_stamp':
* the registration will be with "beginning of time" target, making the
* scrub-job eligible to immediate scrub (given that external conditions
* do not prevent scrubbing)
*
* - 'must' is asserted, and the suggested time is 'now':
* This happens if our stats are unknown. The results are similar to the
* previous scenario.
*
* - not a 'must': we take the suggested time as a basis, and add to it some
* configuration / random delays.
*
* ('must' is sched_params_t.is_must)
*
* locking: not using the jobs_lock
*/
void update_job(ScrubJobRef sjob, const sched_params_t& suggested);
sched_params_t determine_scrub_time(const requested_scrub_t& request_flags,
const pg_info_t& pg_info,
const pool_opts_t& pool_conf) const;
public:
void dump_scrubs(ceph::Formatter* f) const;
/**
* No new scrub session will start while a scrub was initiated on a PG,
* and that PG is trying to acquire replica resources.
*/
void set_reserving_now() { a_pg_is_reserving = true; }
void clear_reserving_now() { a_pg_is_reserving = false; }
bool is_reserving_now() const { return a_pg_is_reserving; }
bool can_inc_scrubs() const;
bool inc_scrubs_local();
void dec_scrubs_local();
bool inc_scrubs_remote();
void dec_scrubs_remote();
void dump_scrub_reservations(ceph::Formatter* f) const;
/// counting the number of PGs stuck while scrubbing, waiting for objects
void mark_pg_scrub_blocked(spg_t blocked_pg);
void clear_pg_scrub_blocked(spg_t blocked_pg);
int get_blocked_pgs_count() const;
/**
* scrub_sleep_time
*
* Returns std::chrono::milliseconds indicating how long to wait between
* chunks.
*
* Implementation Note: Returned value will either osd_scrub_sleep or
* osd_scrub_extended_sleep depending on must_scrub_param and time
* of day (see configs osd_scrub_begin*)
*/
std::chrono::milliseconds scrub_sleep_time(bool must_scrub) const;
/**
* called every heartbeat to update the "daily" load average
*
* @returns a load value for the logger
*/
[[nodiscard]] std::optional<double> update_load_average();
private:
CephContext* cct;
Scrub::ScrubSchedListener& osd_service;
#ifdef WITH_SEASTAR
auto& conf() const { return local_conf(); }
#else
auto& conf() const { return cct->_conf; }
#endif
/**
* jobs_lock protects the job containers and the relevant scrub-jobs state
* variables. Specifically, the following are guaranteed:
* - 'in_queues' is asserted only if the job is in one of the queues;
* - a job will only be in state 'registered' if in one of the queues;
* - no job will be in the two queues simultaneously;
*
* Note that PG locks should not be acquired while holding jobs_lock.
*/
mutable ceph::mutex jobs_lock = ceph::make_mutex("ScrubQueue::jobs_lock");
ScrubQContainer to_scrub; ///< scrub jobs (i.e. PGs) to scrub
ScrubQContainer penalized; ///< those that failed to reserve remote resources
bool restore_penalized{false};
double daily_loadavg{0.0};
static inline constexpr auto registered_job = [](const auto& jobref) -> bool {
return jobref->state == qu_state_t::registered;
};
static inline constexpr auto invalid_state = [](const auto& jobref) -> bool {
return jobref->state == qu_state_t::not_registered;
};
/**
* Are there scrub jobs that should be reinstated?
*/
void scan_penalized(bool forgive_all, utime_t time_now);
/**
* clear dead entries (unregistered, or belonging to removed PGs) from a
* queue. Job state is changed to match new status.
*/
void rm_unregistered_jobs(ScrubQContainer& group);
/**
* the set of all scrub jobs in 'group' which are ready to be scrubbed
* (ready = their scheduled time has passed).
* The scrub jobs in the new collection are sorted according to
* their scheduled time.
*
* Note that the returned container holds independent refs to the
* scrub jobs.
*/
ScrubQContainer collect_ripe_jobs(ScrubQContainer& group, utime_t time_now);
/// scrub resources management lock (guarding scrubs_local & scrubs_remote)
mutable ceph::mutex resource_lock =
ceph::make_mutex("ScrubQueue::resource_lock");
/// the counters used to manage scrub activity parallelism:
int scrubs_local{0};
int scrubs_remote{0};
/**
* The scrubbing of PGs might be delayed if the scrubbed chunk of objects is
* locked by some other operation. A bug might cause this to be an infinite
* delay. If that happens, the OSDs "scrub resources" (i.e. the
* counters that limit the number of concurrent scrub operations) might
* be exhausted.
* We do issue a cluster-log warning in such occasions, but that message is
* easy to miss. The 'some pg is blocked' global flag is used to note the
* existence of such a situation in the scrub-queue log messages.
*/
std::atomic_int_fast16_t blocked_scrubs_cnt{0};
std::atomic_bool a_pg_is_reserving{false};
[[nodiscard]] bool scrub_load_below_threshold() const;
[[nodiscard]] bool scrub_time_permit(utime_t now) const;
/**
* If the scrub job was not explicitly requested, we postpone it by some
* random length of time.
* And if delaying the scrub - we calculate, based on pool parameters, a
* deadline we should scrub before.
*
* @return a pair of values: the determined scrub time, and the deadline
*/
scrub_schedule_t adjust_target_time(
const sched_params_t& recomputed_params) const;
/**
* Look for scrub jobs that have their 'resources_failure' set. These jobs
* have failed to acquire remote resources last time we've initiated a scrub
* session on them. They are now moved from the 'to_scrub' queue to the
* 'penalized' set.
*
* locking: called with job_lock held
*/
void move_failed_pgs(utime_t now_is);
Scrub::schedule_result_t select_from_group(
ScrubQContainer& group,
const Scrub::ScrubPreconds& preconds,
utime_t now_is);
protected: // used by the unit-tests
/**
* unit-tests will override this function to return a mock time
*/
virtual utime_t time_now() const { return ceph_clock_now(); }
};
template <>
struct fmt::formatter<ScrubQueue::qu_state_t>
: fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(const ScrubQueue::qu_state_t& s, FormatContext& ctx)
{
auto out = ctx.out();
out = fmt::formatter<string_view>::format(
std::string{ScrubQueue::qu_state_text(s)}, ctx);
return out;
}
};
template <>
struct fmt::formatter<ScrubQueue::ScrubJob> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const ScrubQueue::ScrubJob& sjob, FormatContext& ctx)
{
return fmt::format_to(
ctx.out(),
"pg[{}] @ {:s} (dl:{:s}) - <{}> / failure: {} / pen. t.o.: {:s} / queue "
"state: {:.7}",
sjob.pgid, sjob.schedule.scheduled_at, sjob.schedule.deadline,
sjob.registration_state(), sjob.resources_failure, sjob.penalty_timeout,
sjob.state.load(std::memory_order_relaxed));
}
};
| 18,331 | 31.677362 | 80 | h |
null | ceph-main/src/osd/scrubber/scrub_backend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
// clang-format off
/*
+------------------------+
| |
| PgScrubber |
| |-----------------------------+
| | |
+------------------------+ | ownes & uses
| PrimaryLogScrub | |
+------------------------+ |
|
|
v
+-------------------------------------------+
|ScrubBackend |
+----------------+ |============ |
| this_chunk | | |
| (scrub_chunk_t)|<-------| + decode_received_map() |
+----------------+ | + scrub_compare_maps() |
| + scan_snaps() |
| ..... |
| |
| |
+--------------------/-------------\--------+
--/ / \
--/ | |
--/ / \
-/ uses | uses |
uses --/ / \
--/ / |
--/ | \
v v v
PgBackend PG/PrimaryLogPG OSD Services
*/
// clang-format on
#include <fmt/core.h>
#include <fmt/format.h>
#include <string_view>
#include "common/LogClient.h"
#include "osd/OSDMap.h"
#include "osd/osd_types_fmt.h"
#include "osd/scrubber_common.h"
#include "osd/SnapMapReaderI.h"
struct ScrubMap;
class PG;
class PgScrubber;
struct PGPool;
using Scrub::PgScrubBeListener;
using data_omap_digests_t =
std::pair<std::optional<uint32_t>, std::optional<uint32_t>>;
/// a list of fixes to be performed on objects' digests
using digests_fixes_t = std::vector<std::pair<hobject_t, data_omap_digests_t>>;
using shard_info_map_t = std::map<pg_shard_t, shard_info_wrapper>;
using shard_to_scrubmap_t = std::map<pg_shard_t, ScrubMap>;
using auth_peers_t = std::vector<std::pair<ScrubMap::object, pg_shard_t>>;
using wrapped_err_t =
std::variant<inconsistent_obj_wrapper, inconsistent_snapset_wrapper>;
using inconsistent_objs_t = std::vector<wrapped_err_t>;
/// omap-specific stats
struct omap_stat_t {
int large_omap_objects{0};
int64_t omap_bytes{0};
int64_t omap_keys{0};
};
struct error_counters_t {
int shallow_errors{0};
int deep_errors{0};
};
// the PgScrubber services used by the backend
struct ScrubBeListener {
virtual std::ostream& gen_prefix(std::ostream& out) const = 0;
virtual CephContext* get_pg_cct() const = 0;
virtual LoggerSinkSet& get_logger() const = 0;
virtual bool is_primary() const = 0;
virtual spg_t get_pgid() const = 0;
virtual const OSDMapRef& get_osdmap() const = 0;
virtual void add_to_stats(const object_stat_sum_t& stat) = 0;
virtual void submit_digest_fixes(const digests_fixes_t& fixes) = 0;
virtual ~ScrubBeListener() = default;
};
// As the main scrub-backend entry point - scrub_compare_maps() - must
// be able to return both a list of snap fixes and a list of inconsistent
// objects:
struct objs_fix_list_t {
inconsistent_objs_t inconsistent_objs;
std::vector<Scrub::snap_mapper_fix_t> snap_fix_list;
};
/**
* A structure used internally by select_auth_object()
*
* Conveys the usability of a specific shard as an auth source.
*/
struct shard_as_auth_t {
// note: 'not_found' differs from 'not_usable' in that 'not_found'
// does not carry an error message to be cluster-logged.
enum class usable_t : uint8_t { not_usable, not_found, usable };
// the ctor used when the shard should not be considered as auth
explicit shard_as_auth_t(std::string err_msg)
: possible_auth{usable_t::not_usable}
, error_text{err_msg}
, oi{}
, auth_iter{}
, digest{std::nullopt}
{}
// the object cannot be found on the shard
explicit shard_as_auth_t()
: possible_auth{usable_t::not_found}
, error_text{}
, oi{}
, auth_iter{}
, digest{std::nullopt}
{}
shard_as_auth_t(std::string err_msg, std::optional<uint32_t> data_digest)
: possible_auth{usable_t::not_usable}
, error_text{err_msg}
, oi{}
, auth_iter{}
, digest{data_digest}
{}
// possible auth candidate
shard_as_auth_t(const object_info_t& anoi,
shard_to_scrubmap_t::iterator it,
std::string err_msg,
std::optional<uint32_t> data_digest)
: possible_auth{usable_t::usable}
, error_text{err_msg}
, oi{anoi}
, auth_iter{it}
, digest{data_digest}
{}
usable_t possible_auth;
std::string error_text;
object_info_t oi;
shard_to_scrubmap_t::iterator auth_iter;
std::optional<uint32_t> digest;
// when used for Crimson, we'll probably want to return 'digest_match' (and
// other in/out arguments) via this struct
};
// the format specifier {D} is used to request debug output
template <>
struct fmt::formatter<shard_as_auth_t> {
template <typename ParseContext>
constexpr auto parse(ParseContext& ctx)
{
auto it = ctx.begin();
if (it != ctx.end()) {
debug_log = (*it++) == 'D';
}
return it;
}
template <typename FormatContext>
auto format(shard_as_auth_t const& as_auth, FormatContext& ctx)
{
if (debug_log) {
// note: 'if' chain, as hard to consistently (on all compilers) avoid some
// warnings for a switch plus multiple return paths
if (as_auth.possible_auth == shard_as_auth_t::usable_t::not_usable) {
return fmt::format_to(ctx.out(),
"{{shard-not-usable:{}}}",
as_auth.error_text);
}
if (as_auth.possible_auth == shard_as_auth_t::usable_t::not_found) {
return fmt::format_to(ctx.out(), "{{shard-not-found}}");
}
return fmt::format_to(ctx.out(),
"{{shard-usable: soid:{} {{txt:{}}} }}",
as_auth.oi.soid,
as_auth.error_text);
} else {
return fmt::format_to(
ctx.out(),
"usable:{} soid:{} {{txt:{}}}",
(as_auth.possible_auth == shard_as_auth_t::usable_t::usable) ? "yes"
: "no",
as_auth.oi.soid,
as_auth.error_text);
}
}
bool debug_log{false};
};
struct auth_selection_t {
shard_to_scrubmap_t::iterator auth; ///< an iter into one of this_chunk->maps
pg_shard_t auth_shard; // set to auth->first
object_info_t auth_oi;
shard_info_map_t shard_map;
bool is_auth_available{false}; ///< managed to select an auth' source?
bool digest_match{true}; ///< do all (existing) digests match?
};
// note: some scrub tests are sensitive to the specific format of
// auth_selection_t listing in the logs
template <>
struct fmt::formatter<auth_selection_t> {
template <typename ParseContext>
constexpr auto parse(ParseContext& ctx)
{
return ctx.begin();
}
template <typename FormatContext>
auto format(auth_selection_t const& aus, FormatContext& ctx)
{
return fmt::format_to(ctx.out(),
" {{AU-S: {}->{:x} OI({:x}:{}) {} dm:{}}} ",
aus.auth->first,
(uint64_t)(&aus.auth->second),
(uint64_t)(&aus.auth_oi),
aus.auth_oi,
aus.shard_map.size(),
aus.digest_match);
}
};
/**
* the back-end data that is per-chunk
*
* Created by the Scrubber after all replicas' maps have arrived.
*/
struct scrub_chunk_t {
explicit scrub_chunk_t(pg_shard_t i_am) { received_maps[i_am] = ScrubMap{}; }
/// the working set of scrub maps: the received maps, plus
/// Primary's own map.
std::map<pg_shard_t, ScrubMap> received_maps;
/// a collection of all objs mentioned in the maps
std::set<hobject_t> authoritative_set;
utime_t started{ceph_clock_now()};
digests_fixes_t missing_digest;
/// Map from object with errors to good peers
std::map<hobject_t, std::list<pg_shard_t>> authoritative;
inconsistent_objs_t m_inconsistent_objs;
/// shallow/deep error counters
error_counters_t m_error_counts;
// these must be reset for each element:
std::set<pg_shard_t> cur_missing;
std::set<pg_shard_t> cur_inconsistent;
bool fix_digest{false};
};
/**
* ScrubBackend wraps the data and operations required for the back-end part of
* the scrubbing (i.e. for comparing the maps and fixing objects).
*
* Created anew upon each initiation of a scrub session.
*/
class ScrubBackend {
public:
// Primary constructor
ScrubBackend(ScrubBeListener& scrubber,
PgScrubBeListener& pg,
pg_shard_t i_am,
bool repair,
scrub_level_t shallow_or_deep,
const std::set<pg_shard_t>& acting);
// Replica constructor: no primary map
ScrubBackend(ScrubBeListener& scrubber,
PgScrubBeListener& pg,
pg_shard_t i_am,
bool repair,
scrub_level_t shallow_or_deep);
friend class PgScrubber;
friend class TestScrubBackend;
/**
* reset the per-chunk data structure (scrub_chunk_t).
* Create an empty scrub-map for this shard, and place it
* in the appropriate entry in 'received_maps'.
*
* @returns a pointer to the newly created ScrubMap.
*/
void new_chunk();
ScrubMap& get_primary_scrubmap();
/**
* sets Backend's m_repair flag (setting m_mode_desc to a corresponding
* string)
*/
void update_repair_status(bool should_repair);
std::vector<Scrub::snap_mapper_fix_t> replica_clean_meta(
ScrubMap& smap,
bool max_reached,
const hobject_t& start,
Scrub::SnapMapReaderI& snaps_getter);
/**
* decode the arriving MOSDRepScrubMap message, placing the replica's
* scrub-map into received_maps[from].
*
* @param from replica
*/
void decode_received_map(pg_shard_t from, const MOSDRepScrubMap& msg);
objs_fix_list_t scrub_compare_maps(bool max_reached,
Scrub::SnapMapReaderI& snaps_getter);
int scrub_process_inconsistent();
const omap_stat_t& this_scrub_omapstats() const { return m_omap_stats; }
int authoritative_peers_count() const { return m_auth_peers.size(); };
std::ostream& logger_prefix(std::ostream* _dout, const ScrubBackend* t);
private:
// set/constructed at the ctor():
ScrubBeListener& m_scrubber;
Scrub::PgScrubBeListener& m_pg;
const pg_shard_t m_pg_whoami;
bool m_repair;
const scrub_level_t m_depth;
const spg_t m_pg_id;
std::vector<pg_shard_t> m_acting_but_me; // primary only
bool m_is_replicated{true};
std::string_view m_mode_desc;
std::string m_formatted_id;
const PGPool& m_pool;
bool m_incomplete_clones_allowed{false};
/// collecting some scrub-session-wide omap stats
omap_stat_t m_omap_stats;
/// Mapping from object with errors to good peers
std::map<hobject_t, auth_peers_t> m_auth_peers;
// shorthands:
ConfigProxy& m_conf;
LoggerSinkSet& clog;
private:
struct auth_and_obj_errs_t {
std::list<pg_shard_t> auth_list;
std::set<pg_shard_t> object_errors;
};
std::optional<scrub_chunk_t> this_chunk;
/// Maps from objects with errors to missing peers
HobjToShardSetMapping m_missing; // used by scrub_process_inconsistent()
/// Maps from objects with errors to inconsistent peers
HobjToShardSetMapping m_inconsistent; // used by scrub_process_inconsistent()
/// Cleaned std::map pending snap metadata scrub
ScrubMap m_cleaned_meta_map{};
/// a reference to the primary map
ScrubMap& my_map();
/// shallow/deep error counters
error_counters_t get_error_counts() const { return this_chunk->m_error_counts; }
/**
* merge_to_authoritative_set() updates
* - this_chunk->maps[from] with the replicas' scrub-maps;
* - this_chunk->authoritative_set as a union of all the maps' objects;
*/
void merge_to_authoritative_set();
// note: used by both Primary & replicas
static ScrubMap clean_meta_map(ScrubMap& cleaned, bool max_reached);
void compare_smaps();
/// might return error messages to be cluster-logged
std::optional<std::string> compare_obj_in_maps(const hobject_t& ho);
void omap_checks();
std::optional<auth_and_obj_errs_t> for_empty_auth_list(
std::list<pg_shard_t>&& auths,
std::set<pg_shard_t>&& obj_errors,
shard_to_scrubmap_t::iterator auth,
const hobject_t& ho,
std::stringstream& errstream);
auth_and_obj_errs_t match_in_shards(const hobject_t& ho,
auth_selection_t& auth_sel,
inconsistent_obj_wrapper& obj_result,
std::stringstream& errstream);
// returns: true if a discrepancy was found
bool compare_obj_details(pg_shard_t auth_shard,
const ScrubMap::object& auth,
const object_info_t& auth_oi,
const ScrubMap::object& candidate,
shard_info_wrapper& shard_result,
inconsistent_obj_wrapper& obj_result,
std::stringstream& errorstream,
bool has_snapset);
void repair_object(const hobject_t& soid,
const auth_peers_t& ok_peers,
const std::set<pg_shard_t>& bad_peers);
/**
* An auxiliary used by select_auth_object() to test a specific shard
* as a possible auth candidate.
* @param ho the hobject for which we are looking for an auth source
* @param srd the candidate shard
* @param shard_map [out] a collection of shard_info-s per shard.
* possible_auth_shard() might set error flags in the relevant (this shard's)
* entry.
*/
shard_as_auth_t possible_auth_shard(const hobject_t& ho,
const pg_shard_t& srd,
shard_info_map_t& shard_map);
auth_selection_t select_auth_object(const hobject_t& ho,
std::stringstream& errstream);
enum class digest_fixing_t { no, if_aged, force };
/*
* an aux used by inconsistents() to determine whether to fix the digest
*/
[[nodiscard]] digest_fixing_t should_fix_digest(
const hobject_t& ho,
const ScrubMap::object& auth_object,
const object_info_t& auth_oi,
bool repair_flag,
std::stringstream& errstream);
void inconsistents(const hobject_t& ho,
ScrubMap::object& auth_object,
object_info_t& auth_oi, // consider moving to object
auth_and_obj_errs_t&& auth_n_errs,
std::stringstream& errstream);
int process_clones_to(const std::optional<hobject_t>& head,
const std::optional<SnapSet>& snapset,
std::optional<snapid_t> target,
std::vector<snapid_t>::reverse_iterator* curclone,
inconsistent_snapset_wrapper& e);
/**
* Validate consistency of the object info and snap sets.
*/
void scrub_snapshot_metadata(ScrubMap& map);
/**
* Updates the "global" (i.e. - not 'per-chunk') databases:
* - in m_authoritative: a list of good peers for each "problem" object in
* the current chunk;
* - in m_cleaned_meta_map: a "cleaned" version of the object (the one from
* the selected shard).
*/
void update_authoritative();
void log_missing(int missing,
const std::optional<hobject_t>& head,
const char* logged_func_name);
/**
* returns a list of snaps "fix orders"
*/
std::vector<Scrub::snap_mapper_fix_t> scan_snaps(
ScrubMap& smap,
Scrub::SnapMapReaderI& snaps_getter);
/**
* an aux used by scan_snaps(), possibly returning a fix-order
* for a specific hobject.
*/
std::optional<Scrub::snap_mapper_fix_t> scan_object_snaps(
const hobject_t& hoid,
const SnapSet& snapset,
Scrub::SnapMapReaderI& snaps_getter);
// accessing the PG backend for this translation service
uint64_t logical_to_ondisk_size(uint64_t logical_size) const;
};
template <>
struct fmt::formatter<data_omap_digests_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const data_omap_digests_t& dg, FormatContext& ctx)
{
// can't use value_or() due to different output types
if (std::get<0>(dg).has_value()) {
fmt::format_to(ctx.out(), "[{:#x}/", std::get<0>(dg).value());
} else {
fmt::format_to(ctx.out(), "[---/");
}
if (std::get<1>(dg).has_value()) {
return fmt::format_to(ctx.out(), "{:#x}]", std::get<1>(dg).value());
} else {
return fmt::format_to(ctx.out(), "---]");
}
}
};
template <>
struct fmt::formatter<std::pair<hobject_t, data_omap_digests_t>> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const std::pair<hobject_t, data_omap_digests_t>& x,
FormatContext& ctx) const
{
return fmt::format_to(ctx.out(),
"{{ {} - {} }}",
std::get<0>(x),
std::get<1>(x));
}
};
| 18,408 | 32.169369 | 82 | h |
null | ceph-main/src/osd/scrubber/scrub_machine.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <boost/statechart/custom_reaction.hpp>
#include <boost/statechart/deferral.hpp>
#include <boost/statechart/event.hpp>
#include <boost/statechart/event_base.hpp>
#include <boost/statechart/in_state_reaction.hpp>
#include <boost/statechart/simple_state.hpp>
#include <boost/statechart/state.hpp>
#include <boost/statechart/state_machine.hpp>
#include <boost/statechart/transition.hpp>
#include "common/version.h"
#include "include/Context.h"
#include "osd/scrubber_common.h"
#include "scrub_machine_lstnr.h"
/// a wrapper that sets the FSM state description used by the
/// PgScrubber
/// \todo consider using the full NamedState as in Peering
struct NamedSimply {
explicit NamedSimply(ScrubMachineListener* scrubber, const char* name);
};
class PG; // holding a pointer to that one - just for testing
class PgScrubber;
namespace Scrub {
namespace sc = ::boost::statechart;
namespace mpl = ::boost::mpl;
//
// EVENTS
//
void on_event_creation(std::string_view nm);
void on_event_discard(std::string_view nm);
#define MEV(E) \
struct E : sc::event<E> { \
inline static int actv{0}; \
E() \
{ \
if (!actv++) \
on_event_creation(#E); \
} \
~E() \
{ \
if (!--actv) \
on_event_discard(#E); \
} \
void print(std::ostream* out) const { *out << #E; } \
std::string_view print() const { return #E; } \
};
/// all replicas have granted our reserve request
MEV(RemotesReserved)
/// a reservation request has failed
MEV(ReservationFailure)
/// reservations have timed out
MEV(ReservationTimeout)
/// initiate a new scrubbing session (relevant if we are a Primary)
MEV(StartScrub)
/// initiate a new scrubbing session. Only triggered at Recovery completion
MEV(AfterRepairScrub)
/// triggered when the PG unblocked an object that was marked for scrubbing.
/// Via the PGScrubUnblocked op
MEV(Unblocked)
MEV(InternalSchedScrub)
MEV(RangeBlockedAlarm)
MEV(SleepComplete)
MEV(SelectedChunkFree)
MEV(ChunkIsBusy)
/// Update to active_pushes. 'active_pushes' represents recovery that
/// is in-flight to the local ObjectStore
MEV(ActivePushesUpd)
/// (Primary only) all updates are committed
MEV(UpdatesApplied)
/// the internal counterpart of UpdatesApplied
MEV(InternalAllUpdates)
/// got a map from a replica
MEV(GotReplicas)
/// internal - BuildMap preempted. Required, as detected within the ctor
MEV(IntBmPreempted)
MEV(InternalError)
MEV(IntLocalMapDone)
/// external. called upon success of a MODIFY op. See
/// scrub_snapshot_metadata()
MEV(DigestUpdate)
/// event emitted when the replica grants a reservation to the primary
MEV(ReplicaGrantReservation)
/// initiating replica scrub
MEV(StartReplica)
/// 'start replica' when there are no pending updates
MEV(StartReplicaNoWait)
MEV(SchedReplica)
/// Update to active_pushes. 'active_pushes' represents recovery
/// that is in-flight to the local ObjectStore
MEV(ReplicaPushesUpd)
/// guarantee that the FSM is in the quiescent state (i.e. NotActive)
MEV(FullReset)
/// finished handling this chunk. Go get the next one
MEV(NextChunk)
/// all chunks handled
MEV(ScrubFinished)
//
// STATES
//
struct NotActive; ///< the quiescent state. No active scrubbing.
struct ReservingReplicas; ///< securing scrub resources from replicas' OSDs
struct ActiveScrubbing; ///< the active state for a Primary. A sub-machine.
struct ReplicaIdle; ///< Initial reserved replica state
struct ReplicaBuildingMap; ///< an active state for a replica.
class ScrubMachine : public sc::state_machine<ScrubMachine, NotActive> {
public:
friend class PgScrubber;
public:
explicit ScrubMachine(PG* pg, ScrubMachineListener* pg_scrub);
~ScrubMachine();
spg_t m_pg_id;
ScrubMachineListener* m_scrbr;
std::ostream& gen_prefix(std::ostream& out) const;
void assert_not_active() const;
[[nodiscard]] bool is_reserving() const;
[[nodiscard]] bool is_accepting_updates() const;
private:
/**
* scheduled_event_state_t
*
* Heap allocated, ref-counted state shared between scheduled event callback
* and timer_event_token_t. Ensures that callback and timer_event_token_t
* can be safetly destroyed in either order while still allowing for
* cancellation.
*/
struct scheduled_event_state_t {
bool canceled = false;
ScrubMachineListener::scrubber_callback_cancel_token_t cb_token = nullptr;
operator bool() const {
return nullptr != cb_token;
}
~scheduled_event_state_t() {
/* For the moment, this assert encodes an assumption that we always
* retain the token until the event either fires or is canceled.
* If a user needs/wants to relaxt that requirement, this assert can
* be removed */
assert(!cb_token);
}
};
public:
/**
* timer_event_token_t
*
* Represents in-flight timer event. Destroying the object or invoking
* release() directly will cancel the in-flight timer event preventing it
* from being delivered. The intended usage is to invoke
* schedule_timer_event_after in the constructor of the state machine state
* intended to handle the event and assign the returned timer_event_token_t
* to a member of that state. That way, exiting the state will implicitely
* cancel the event. See RangedBlocked::m_timeout_token and
* RangeBlockedAlarm for an example usage.
*/
class timer_event_token_t {
friend ScrubMachine;
// invariant: (bool)parent == (bool)event_state
ScrubMachine *parent = nullptr;
std::shared_ptr<scheduled_event_state_t> event_state;
timer_event_token_t(
ScrubMachine *parent,
std::shared_ptr<scheduled_event_state_t> event_state)
: parent(parent), event_state(event_state) {
assert(*this);
}
void swap(timer_event_token_t &rhs) {
std::swap(parent, rhs.parent);
std::swap(event_state, rhs.event_state);
}
public:
timer_event_token_t() = default;
timer_event_token_t(timer_event_token_t &&rhs) {
swap(rhs);
assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
}
timer_event_token_t &operator=(timer_event_token_t &&rhs) {
swap(rhs);
assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
return *this;
}
operator bool() const {
assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
return parent;
}
void release() {
if (*this) {
if (*event_state) {
parent->m_scrbr->cancel_callback(event_state->cb_token);
event_state->canceled = true;
event_state->cb_token = nullptr;
}
event_state.reset();
parent = nullptr;
}
}
~timer_event_token_t() {
release();
}
};
/**
* schedule_timer_event_after
*
* Schedules event EventT{Args...} to be delivered duration in the future.
* The implementation implicitely drops the event on interval change. The
* returned timer_event_token_t can be used to cancel the event prior to
* its delivery -- it should generally be embedded as a member in the state
* intended to handle the event. See the comment on timer_event_token_t
* for further information.
*/
template <typename EventT, typename... Args>
timer_event_token_t schedule_timer_event_after(
ceph::timespan duration, Args&&... args) {
auto token = std::make_shared<scheduled_event_state_t>();
token->cb_token = m_scrbr->schedule_callback_after(
duration,
[this, token, event=EventT(std::forward<Args>(args)...)] {
if (!token->canceled) {
token->cb_token = nullptr;
process_event(std::move(event));
} else {
assert(nullptr == token->cb_token);
}
}
);
return timer_event_token_t{this, token};
}
};
/**
* The Scrubber's base (quiescent) state.
* Scrubbing is triggered by one of the following events:
*
* - (standard scenario for a Primary): 'StartScrub'. Initiates the OSDs
* resources reservation process. Will be issued by PG::scrub(), following a
* queued "PGScrub" op.
*
* - a special end-of-recovery Primary scrub event ('AfterRepairScrub').
*
* - (for a replica) 'StartReplica' or 'StartReplicaNoWait', triggered by
* an incoming MOSDRepScrub message.
*
* note (20.8.21): originally, AfterRepairScrub was triggering a scrub without
* waiting for replica resources to be acquired. But once replicas started
* using the resource-request to identify and tag the scrub session, this
* bypass cannot be supported anymore.
*/
struct NotActive : sc::state<NotActive, ScrubMachine>, NamedSimply {
explicit NotActive(my_context ctx);
using reactions =
mpl::list<sc::custom_reaction<StartScrub>,
// a scrubbing that was initiated at recovery completion:
sc::custom_reaction<AfterRepairScrub>,
sc::transition<ReplicaGrantReservation, ReplicaIdle>>;
sc::result react(const StartScrub&);
sc::result react(const AfterRepairScrub&);
};
struct ReservingReplicas : sc::state<ReservingReplicas, ScrubMachine>,
NamedSimply {
explicit ReservingReplicas(my_context ctx);
~ReservingReplicas();
using reactions = mpl::list<sc::custom_reaction<FullReset>,
// all replicas granted our resources request
sc::transition<RemotesReserved, ActiveScrubbing>,
sc::custom_reaction<ReservationTimeout>,
sc::custom_reaction<ReservationFailure>>;
ceph::coarse_real_clock::time_point entered_at =
ceph::coarse_real_clock::now();
ScrubMachine::timer_event_token_t m_timeout_token;
sc::result react(const FullReset&);
sc::result react(const ReservationTimeout&);
/// at least one replica denied us the scrub resources we've requested
sc::result react(const ReservationFailure&);
};
// the "active" sub-states
/// the objects range is blocked
struct RangeBlocked;
/// either delaying the scrub by some time and requeuing, or just requeue
struct PendingTimer;
/// select a chunk to scrub, and verify its availability
struct NewChunk;
struct WaitPushes;
struct WaitLastUpdate;
struct BuildMap;
/// a problem during BuildMap. Wait for all replicas to report, then restart.
struct DrainReplMaps;
/// wait for all replicas to report
struct WaitReplicas;
struct WaitDigestUpdate;
struct ActiveScrubbing
: sc::state<ActiveScrubbing, ScrubMachine, PendingTimer>, NamedSimply {
explicit ActiveScrubbing(my_context ctx);
~ActiveScrubbing();
using reactions = mpl::list<sc::custom_reaction<InternalError>,
sc::custom_reaction<FullReset>>;
sc::result react(const FullReset&);
sc::result react(const InternalError&);
};
struct RangeBlocked : sc::state<RangeBlocked, ActiveScrubbing>, NamedSimply {
explicit RangeBlocked(my_context ctx);
using reactions = mpl::list<
sc::custom_reaction<RangeBlockedAlarm>,
sc::transition<Unblocked, PendingTimer>>;
ceph::coarse_real_clock::time_point entered_at =
ceph::coarse_real_clock::now();
ScrubMachine::timer_event_token_t m_timeout_token;
sc::result react(const RangeBlockedAlarm &);
};
/**
* PendingTimer
*
* Represents period between chunks. Waits get_scrub_sleep_time() (if non-zero)
* by scheduling a SleepComplete event and then queues an InternalSchedScrub
* to start the next chunk.
*/
struct PendingTimer : sc::state<PendingTimer, ActiveScrubbing>, NamedSimply {
explicit PendingTimer(my_context ctx);
using reactions = mpl::list<
sc::transition<InternalSchedScrub, NewChunk>,
sc::custom_reaction<SleepComplete>>;
ceph::coarse_real_clock::time_point entered_at =
ceph::coarse_real_clock::now();
ScrubMachine::timer_event_token_t m_sleep_timer;
sc::result react(const SleepComplete&);
};
struct NewChunk : sc::state<NewChunk, ActiveScrubbing>, NamedSimply {
explicit NewChunk(my_context ctx);
using reactions = mpl::list<sc::transition<ChunkIsBusy, RangeBlocked>,
sc::custom_reaction<SelectedChunkFree>>;
sc::result react(const SelectedChunkFree&);
};
/**
* initiate the update process for this chunk
*
* Wait fo 'active_pushes' to clear.
* 'active_pushes' represents recovery that is in-flight to the local
* Objectstore, hence scrub waits until the correct data is readable
* (in-flight data to the Objectstore is not readable until written to
* disk, termed 'applied' here)
*/
struct WaitPushes : sc::state<WaitPushes, ActiveScrubbing>, NamedSimply {
explicit WaitPushes(my_context ctx);
using reactions = mpl::list<sc::custom_reaction<ActivePushesUpd>>;
sc::result react(const ActivePushesUpd&);
};
struct WaitLastUpdate : sc::state<WaitLastUpdate, ActiveScrubbing>,
NamedSimply {
explicit WaitLastUpdate(my_context ctx);
void on_new_updates(const UpdatesApplied&);
using reactions =
mpl::list<sc::custom_reaction<InternalAllUpdates>,
sc::in_state_reaction<UpdatesApplied,
WaitLastUpdate,
&WaitLastUpdate::on_new_updates>>;
sc::result react(const InternalAllUpdates&);
};
struct BuildMap : sc::state<BuildMap, ActiveScrubbing>, NamedSimply {
explicit BuildMap(my_context ctx);
// possible error scenarios:
// - an error reported by the backend will trigger an 'InternalError' event,
// handled by our parent state;
// - if preempted, we switch to DrainReplMaps, where we will wait for all
// replicas to send their maps before acknowledging the preemption;
// - an interval change will be handled by the relevant 'send-event'
// functions, and will translated into a 'FullReset' event.
using reactions = mpl::list<sc::transition<IntBmPreempted, DrainReplMaps>,
// looping, waiting for the backend to finish:
sc::transition<InternalSchedScrub, BuildMap>,
sc::custom_reaction<IntLocalMapDone>>;
sc::result react(const IntLocalMapDone&);
};
/*
* "drain" scrub-maps responses from replicas
*/
struct DrainReplMaps : sc::state<DrainReplMaps, ActiveScrubbing>, NamedSimply {
explicit DrainReplMaps(my_context ctx);
using reactions =
// all replicas are accounted for:
mpl::list<sc::custom_reaction<GotReplicas>>;
sc::result react(const GotReplicas&);
};
struct WaitReplicas : sc::state<WaitReplicas, ActiveScrubbing>, NamedSimply {
explicit WaitReplicas(my_context ctx);
using reactions = mpl::list<
// all replicas are accounted for:
sc::custom_reaction<GotReplicas>,
sc::custom_reaction<DigestUpdate>>;
sc::result react(const GotReplicas&);
sc::result react(const DigestUpdate&);
bool all_maps_already_called{false}; // see comment in react code
};
struct WaitDigestUpdate : sc::state<WaitDigestUpdate, ActiveScrubbing>,
NamedSimply {
explicit WaitDigestUpdate(my_context ctx);
using reactions = mpl::list<sc::custom_reaction<DigestUpdate>,
sc::custom_reaction<ScrubFinished>,
sc::transition<NextChunk, PendingTimer>>;
sc::result react(const DigestUpdate&);
sc::result react(const ScrubFinished&);
};
// ----------------------------- the "replica active" states
/**
* ReservedReplica
*
* Parent state for replica states, Controls lifecycle for
* PgScrubber::m_reservations.
*/
struct ReservedReplica : sc::state<ReservedReplica, ScrubMachine, ReplicaIdle>,
NamedSimply {
explicit ReservedReplica(my_context ctx);
~ReservedReplica();
using reactions = mpl::list<sc::transition<FullReset, NotActive>>;
};
struct ReplicaWaitUpdates;
/**
* ReplicaIdle
*
* Replica is waiting for a map request.
*/
struct ReplicaIdle : sc::state<ReplicaIdle, ReservedReplica>,
NamedSimply {
explicit ReplicaIdle(my_context ctx);
~ReplicaIdle();
using reactions = mpl::list<
sc::transition<StartReplica, ReplicaWaitUpdates>,
sc::transition<StartReplicaNoWait, ReplicaBuildingMap>>;
};
/**
* ReservedActiveOp
*
* Lifetime matches handling for a single map request op
*/
struct ReplicaActiveOp
: sc::state<ReplicaActiveOp, ReservedReplica, ReplicaWaitUpdates>,
NamedSimply {
explicit ReplicaActiveOp(my_context ctx);
~ReplicaActiveOp();
};
/*
* Waiting for 'active_pushes' to complete
*
* When in this state:
* - the details of the Primary's request were internalized by PgScrubber;
* - 'active' scrubbing is set
*/
struct ReplicaWaitUpdates : sc::state<ReplicaWaitUpdates, ReservedReplica>,
NamedSimply {
explicit ReplicaWaitUpdates(my_context ctx);
using reactions = mpl::list<sc::custom_reaction<ReplicaPushesUpd>>;
sc::result react(const ReplicaPushesUpd&);
};
struct ReplicaBuildingMap : sc::state<ReplicaBuildingMap, ReservedReplica>
, NamedSimply {
explicit ReplicaBuildingMap(my_context ctx);
using reactions = mpl::list<sc::custom_reaction<SchedReplica>>;
sc::result react(const SchedReplica&);
};
} // namespace Scrub
| 17,326 | 28.977509 | 80 | h |
null | ceph-main/src/osd/scrubber/scrub_machine_lstnr.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
/**
* \file the PgScrubber interface used by the scrub FSM
*/
#include "common/version.h"
#include "include/Context.h"
#include "osd/osd_types.h"
struct ScrubMachineListener;
namespace Scrub {
enum class PreemptionNoted { no_preemption, preempted };
/// the interface exposed by the PgScrubber into its internal
/// preemption_data object
struct preemption_t {
virtual ~preemption_t() = default;
preemption_t() = default;
preemption_t(const preemption_t&) = delete;
preemption_t(preemption_t&&) = delete;
[[nodiscard]] virtual bool is_preemptable() const = 0;
[[nodiscard]] virtual bool was_preempted() const = 0;
virtual void adjust_parameters() = 0;
/**
* Try to preempt the scrub.
* 'true' (i.e. - preempted) if:
* preemptable && not already preempted
*/
virtual bool do_preempt() = 0;
/**
* disables preemptions.
* Returns 'true' if we were already preempted
*/
virtual bool disable_and_test() = 0;
};
} // namespace Scrub
struct ScrubMachineListener {
virtual CephContext *get_cct() const = 0;
virtual LogChannelRef &get_clog() const = 0;
virtual int get_whoami() const = 0;
virtual spg_t get_spgid() const = 0;
using scrubber_callback_t = std::function<void(void)>;
using scrubber_callback_cancel_token_t = Context*;
/**
* schedule_callback_after
*
* cb will be invoked after least duration time has elapsed.
* Interface implementation is responsible for maintaining and locking
* a PG reference. cb will be silently discarded if the interval has changed
* between the call to schedule_callback_after and when the pg is locked.
*
* Returns an associated token to be used in cancel_callback below.
*/
virtual scrubber_callback_cancel_token_t schedule_callback_after(
ceph::timespan duration, scrubber_callback_t &&cb) = 0;
/**
* cancel_callback
*
* Attempts to cancel the callback to whcih the passed token is associated.
* cancel_callback is best effort, the callback may still fire.
* cancel_callback guarrantees that exactly one of the two things will happen:
* - the callback is destroyed and will not be invoked
* - the callback will be invoked
*/
virtual void cancel_callback(scrubber_callback_cancel_token_t) = 0;
virtual ceph::timespan get_range_blocked_grace() = 0;
struct MsgAndEpoch {
MessageRef m_msg;
epoch_t m_epoch;
};
virtual ~ScrubMachineListener() = default;
/// set the string we'd use in logs to convey the current state-machine
/// state.
virtual void set_state_name(const char* name) = 0;
[[nodiscard]] virtual bool is_primary() const = 0;
virtual void select_range_n_notify() = 0;
/// walk the log to find the latest update that affects our chunk
virtual eversion_t search_log_for_updates() const = 0;
virtual eversion_t get_last_update_applied() const = 0;
virtual int pending_active_pushes() const = 0;
virtual int build_primary_map_chunk() = 0;
virtual int build_replica_map_chunk() = 0;
virtual void on_init() = 0;
virtual void on_replica_init() = 0;
virtual void replica_handling_done() = 0;
/// the version of 'scrub_clear_state()' that does not try to invoke FSM
/// services (thus can be called from FSM reactions)
virtual void clear_pgscrub_state() = 0;
/// Get time to sleep before next scrub
virtual std::chrono::milliseconds get_scrub_sleep_time() const = 0;
/// Queues InternalSchedScrub for later
virtual void queue_for_scrub_resched(Scrub::scrub_prio_t prio) = 0;
/**
* Ask all replicas for their scrub maps for the current chunk.
*/
virtual void get_replicas_maps(bool replica_can_preempt) = 0;
virtual void on_digest_updates() = 0;
/// the part that actually finalizes a scrub
virtual void scrub_finish() = 0;
/**
* Prepare a MOSDRepScrubMap message carrying the requested scrub map
* @param was_preempted - were we preempted?
* @return the message, and the current value of 'm_replica_min_epoch' (which
* is used when sending the message, but will be overwritten before that).
*/
[[nodiscard]] virtual MsgAndEpoch prep_replica_map_msg(
Scrub::PreemptionNoted was_preempted) = 0;
/**
* Send to the primary the pre-prepared message containing the requested map
*/
virtual void send_replica_map(const MsgAndEpoch& preprepared) = 0;
/**
* Let the primary know that we were preempted while trying to build the
* requested map.
*/
virtual void send_preempted_replica() = 0;
[[nodiscard]] virtual bool has_pg_marked_new_updates() const = 0;
virtual void set_subset_last_update(eversion_t e) = 0;
[[nodiscard]] virtual bool was_epoch_changed() const = 0;
virtual Scrub::preemption_t& get_preemptor() = 0;
/**
* a "technical" collection of the steps performed once all
* rep maps are available:
* - the maps are compared
* - the scrub region markers (start_ & end_) are advanced
* - callbacks and ops that were pending are allowed to run
*/
virtual void maps_compare_n_cleanup() = 0;
/**
* order the PgScrubber to initiate the process of reserving replicas' scrub
* resources.
*/
virtual void reserve_replicas() = 0;
virtual void unreserve_replicas() = 0;
virtual void on_replica_reservation_timeout() = 0;
virtual void set_scrub_begin_time() = 0;
virtual void set_scrub_duration() = 0;
/**
* No new scrub session will start while a scrub was initiate on a PG,
* and that PG is trying to acquire replica resources.
* set_reserving_now()/clear_reserving_now() let's the OSD scrub-queue know
* we are busy reserving.
*/
virtual void set_reserving_now() = 0;
virtual void clear_reserving_now() = 0;
/**
* Manipulate the 'I am being scrubbed now' Scrubber's flag
*/
virtual void set_queued_or_active() = 0;
virtual void clear_queued_or_active() = 0;
/// Release remote scrub reservation
virtual void dec_scrubs_remote() = 0;
/// Advance replica token
virtual void advance_token() = 0;
/**
* Our scrubbing is blocked, waiting for an excessive length of time for
* our target chunk to be unlocked. We will set the corresponding flags,
* both in the OSD_wide scrub-queue object, and in our own scrub-job object.
* Both flags are used to report the unhealthy state in the log and in
* response to scrub-queue queries.
*/
virtual void set_scrub_blocked(utime_t since) = 0;
virtual void clear_scrub_blocked() = 0;
/**
* the FSM interface into the "are we waiting for maps, either our own or from
* replicas" state.
* The FSM can only:
* - mark the local map as available, and
* - query status
*/
virtual void mark_local_map_ready() = 0;
[[nodiscard]] virtual bool are_all_maps_available() const = 0;
/// a log/debug interface
virtual std::string dump_awaited_maps() const = 0;
/// exposed to be used by the scrub_machine logger
virtual std::ostream& gen_prefix(std::ostream& out) const = 0;
/// sending cluster-log warnings
virtual void log_cluster_warning(const std::string& msg) const = 0;
};
| 7,198 | 29.121339 | 80 | h |
null | ceph-main/src/osdc/Filer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_FILER_H
#define CEPH_FILER_H
/*** Filer
*
* stripe file ranges onto objects.
* build list<ObjectExtent> for the objecter or objectcacher.
*
* also, provide convenience methods that call objecter for you.
*
* "files" are identified by ino.
*/
#include <mutex>
#include "include/types.h"
#include "common/ceph_time.h"
#include "osd/OSDMap.h"
#include "Objecter.h"
#include "Striper.h"
class Context;
class Messenger;
class OSDMap;
class Finisher;
/**** Filer interface ***/
class Filer {
CephContext *cct;
Objecter *objecter;
Finisher *finisher;
// probes
struct Probe {
std::mutex lock;
typedef std::lock_guard<std::mutex> lock_guard;
typedef std::unique_lock<std::mutex> unique_lock;
inodeno_t ino;
file_layout_t layout;
snapid_t snapid;
uint64_t *psize;
ceph::real_time *pmtime;
utime_t *pumtime;
int flags;
bool fwd;
Context *onfinish;
std::vector<ObjectExtent> probing;
uint64_t probing_off, probing_len;
std::map<object_t, uint64_t> known_size;
ceph::real_time max_mtime;
std::set<object_t> ops;
int err;
bool found_size;
Probe(inodeno_t i, const file_layout_t &l, snapid_t sn,
uint64_t f, uint64_t *e, ceph::real_time *m, int fl, bool fw,
Context *c) :
ino(i), layout(l), snapid(sn),
psize(e), pmtime(m), pumtime(nullptr), flags(fl), fwd(fw), onfinish(c),
probing_off(f), probing_len(0),
err(0), found_size(false) {}
Probe(inodeno_t i, const file_layout_t &l, snapid_t sn,
uint64_t f, uint64_t *e, utime_t *m, int fl, bool fw,
Context *c) :
ino(i), layout(l), snapid(sn),
psize(e), pmtime(nullptr), pumtime(m), flags(fl), fwd(fw),
onfinish(c), probing_off(f), probing_len(0),
err(0), found_size(false) {}
};
class C_Probe;
void _probe(Probe *p, Probe::unique_lock& pl);
bool _probed(Probe *p, const object_t& oid, uint64_t size,
ceph::real_time mtime, Probe::unique_lock& pl);
public:
Filer(const Filer& other);
const Filer operator=(const Filer& other);
Filer(Objecter *o, Finisher *f) : cct(o->cct), objecter(o), finisher(f) {}
~Filer() {}
bool is_active() {
return objecter->is_active(); // || (oc && oc->is_active());
}
/*** async file interface. scatter/gather as needed. ***/
void read(inodeno_t ino,
const file_layout_t *layout,
snapid_t snap,
uint64_t offset,
uint64_t len,
ceph::buffer::list *bl, // ptr to data
int flags,
Context *onfinish,
int op_flags = 0) {
ceph_assert(snap); // (until there is a non-NOSNAP write)
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, ino, layout, offset, len, 0, extents);
objecter->sg_read(extents, snap, bl, flags, onfinish, op_flags);
}
void read_trunc(inodeno_t ino,
const file_layout_t *layout,
snapid_t snap,
uint64_t offset,
uint64_t len,
ceph::buffer::list *bl, // ptr to data
int flags,
uint64_t truncate_size,
__u32 truncate_seq,
Context *onfinish,
int op_flags = 0) {
ceph_assert(snap); // (until there is a non-NOSNAP write)
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, ino, layout, offset, len, truncate_size,
extents);
objecter->sg_read_trunc(extents, snap, bl, flags,
truncate_size, truncate_seq, onfinish, op_flags);
}
void write(inodeno_t ino,
const file_layout_t *layout,
const SnapContext& snapc,
uint64_t offset,
uint64_t len,
ceph::buffer::list& bl,
ceph::real_time mtime,
int flags,
Context *oncommit,
int op_flags = 0) {
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, ino, layout, offset, len, 0, extents);
objecter->sg_write(extents, snapc, bl, mtime, flags, oncommit, op_flags);
}
void write_trunc(inodeno_t ino,
const file_layout_t *layout,
const SnapContext& snapc,
uint64_t offset,
uint64_t len,
ceph::buffer::list& bl,
ceph::real_time mtime,
int flags,
uint64_t truncate_size,
__u32 truncate_seq,
Context *oncommit,
int op_flags = 0) {
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, ino, layout, offset, len, truncate_size,
extents);
objecter->sg_write_trunc(extents, snapc, bl, mtime, flags,
truncate_size, truncate_seq, oncommit, op_flags);
}
void truncate(inodeno_t ino,
const file_layout_t *layout,
const SnapContext& snapc,
uint64_t offset,
uint64_t len,
__u32 truncate_seq,
ceph::real_time mtime,
int flags,
Context *oncommit);
void _do_truncate_range(struct TruncRange *pr, int fin);
void zero(inodeno_t ino,
const file_layout_t *layout,
const SnapContext& snapc,
uint64_t offset,
uint64_t len,
ceph::real_time mtime,
int flags,
bool keep_first,
Context *oncommit) {
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, ino, layout, offset, len, 0, extents);
if (extents.size() == 1) {
if (extents[0].offset == 0 && extents[0].length == layout->object_size
&& (!keep_first || extents[0].objectno != 0))
objecter->remove(extents[0].oid, extents[0].oloc,
snapc, mtime, flags, oncommit);
else
objecter->zero(extents[0].oid, extents[0].oloc, extents[0].offset,
extents[0].length, snapc, mtime, flags, oncommit);
} else {
C_GatherBuilder gcom(cct, oncommit);
for (auto p = extents.begin(); p != extents.end(); ++p) {
if (p->offset == 0 && p->length == layout->object_size &&
(!keep_first || p->objectno != 0))
objecter->remove(p->oid, p->oloc,
snapc, mtime, flags,
oncommit ? gcom.new_sub():0);
else
objecter->zero(p->oid, p->oloc, p->offset, p->length,
snapc, mtime, flags,
oncommit ? gcom.new_sub():0);
}
gcom.activate();
}
}
void zero(inodeno_t ino,
const file_layout_t *layout,
const SnapContext& snapc,
uint64_t offset,
uint64_t len,
ceph::real_time mtime,
int flags,
Context *oncommit) {
zero(ino, layout,
snapc, offset,
len, mtime,
flags, false,
oncommit);
}
// purge range of ino.### objects
int purge_range(inodeno_t ino,
const file_layout_t *layout,
const SnapContext& snapc,
uint64_t first_obj, uint64_t num_obj,
ceph::real_time mtime,
int flags, Context *oncommit);
void _do_purge_range(struct PurgeRange *pr, int fin, int err);
/*
* probe
* specify direction,
* and whether we stop when we find data, or hole.
*/
int probe(inodeno_t ino,
const file_layout_t *layout,
snapid_t snapid,
uint64_t start_from,
uint64_t *end,
ceph::real_time *mtime,
bool fwd,
int flags,
Context *onfinish);
int probe(inodeno_t ino,
const file_layout_t *layout,
snapid_t snapid,
uint64_t start_from,
uint64_t *end,
bool fwd,
int flags,
Context *onfinish) {
return probe(ino, layout, snapid, start_from, end,
(ceph::real_time* )0, fwd, flags, onfinish);
}
int probe(inodeno_t ino,
const file_layout_t *layout,
snapid_t snapid,
uint64_t start_from,
uint64_t *end,
utime_t *mtime,
bool fwd,
int flags,
Context *onfinish);
private:
int probe_impl(Probe* probe, const file_layout_t *layout,
uint64_t start_from, uint64_t *end);
};
#endif // !CEPH_FILER_H
| 7,905 | 25.265781 | 77 | h |
null | ceph-main/src/osdc/Journaler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* Journaler
*
* This class stripes a serial log over objects on the store. Four
* logical pointers:
*
* write_pos - where we're writing new entries
* unused_field - where we're reading old entires
* expire_pos - what is deemed "old" by user
* trimmed_pos - where we're expiring old items
*
* trimmed_pos <= expire_pos <= unused_field <= write_pos.
*
* Often, unused_field <= write_pos (as with MDS log). During
* recovery, write_pos is undefined until the end of the log is
* discovered.
*
* A "head" struct at the beginning of the log is used to store
* metadata at regular intervals. The basic invariants include:
*
* head.unused_field <= unused_field -- the head may "lag", since
* it's updated lazily.
* head.write_pos <= write_pos
* head.expire_pos <= expire_pos
* head.trimmed_pos <= trimmed_pos
*
* More significantly,
*
* head.expire_pos >= trimmed_pos -- this ensures we can find the
* "beginning" of the log as last
* recorded, before it is trimmed.
* trimming will block until a
* sufficiently current expire_pos
* is committed.
*
* To recover log state, we simply start at the last write_pos in the
* head, and probe the object sequence sizes until we read the end.
*
* Head struct is stored in the first object. Actual journal starts
* after layout.period() bytes.
*
*/
#ifndef CEPH_JOURNALER_H
#define CEPH_JOURNALER_H
#include <list>
#include <map>
#include "Objecter.h"
#include "Filer.h"
#include "common/Timer.h"
#include "common/Throttle.h"
#include "include/common_fwd.h"
class Context;
class Finisher;
class C_OnFinisher;
typedef __u8 stream_format_t;
// Legacy envelope is leading uint32_t size
enum StreamFormat {
JOURNAL_FORMAT_LEGACY = 0,
JOURNAL_FORMAT_RESILIENT = 1,
// Insert new formats here, before COUNT
JOURNAL_FORMAT_COUNT
};
// Highest journal format version that we support
#define JOURNAL_FORMAT_MAX (JOURNAL_FORMAT_COUNT - 1)
// Legacy envelope is leading uint32_t size
#define JOURNAL_ENVELOPE_LEGACY (sizeof(uint32_t))
// Resilient envelope is leading uint64_t sentinel, uint32_t size,
// trailing uint64_t start_ptr
#define JOURNAL_ENVELOPE_RESILIENT (sizeof(uint32_t) + sizeof(uint64_t) + \
sizeof(uint64_t))
/**
* Represents a collection of entries serialized in a byte stream.
*
* Each entry consists of:
* - a blob (used by the next level up as a serialized LogEvent)
* - a uint64_t (used by the next level up as a pointer to the start
* of the entry in the collection bytestream)
*/
class JournalStream
{
stream_format_t format;
public:
JournalStream(stream_format_t format_) : format(format_) {}
void set_format(stream_format_t format_) {format = format_;}
bool readable(bufferlist &bl, uint64_t *need) const;
size_t read(bufferlist &from, bufferlist *to, uint64_t *start_ptr);
size_t write(bufferlist &entry, bufferlist *to, uint64_t const &start_ptr);
size_t get_envelope_size() const {
if (format >= JOURNAL_FORMAT_RESILIENT) {
return JOURNAL_ENVELOPE_RESILIENT;
} else {
return JOURNAL_ENVELOPE_LEGACY;
}
}
// A magic number for the start of journal entries, so that we can
// identify them in damaged journals.
static const uint64_t sentinel = 0x3141592653589793;
};
class Journaler {
public:
// this goes at the head of the log "file".
class Header {
public:
uint64_t trimmed_pos;
uint64_t expire_pos;
uint64_t unused_field;
uint64_t write_pos;
std::string magic;
file_layout_t layout; //< The mapping from byte stream offsets
// to RADOS objects
stream_format_t stream_format; //< The encoding of LogEvents
// within the journal byte stream
Header(const char *m="") :
trimmed_pos(0), expire_pos(0), unused_field(0), write_pos(0), magic(m),
stream_format(-1) {
}
void encode(bufferlist &bl) const {
ENCODE_START(2, 2, bl);
encode(magic, bl);
encode(trimmed_pos, bl);
encode(expire_pos, bl);
encode(unused_field, bl);
encode(write_pos, bl);
encode(layout, bl, 0); // encode in legacy format
encode(stream_format, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &bl) {
DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
decode(magic, bl);
decode(trimmed_pos, bl);
decode(expire_pos, bl);
decode(unused_field, bl);
decode(write_pos, bl);
decode(layout, bl);
if (struct_v > 1) {
decode(stream_format, bl);
} else {
stream_format = JOURNAL_FORMAT_LEGACY;
}
DECODE_FINISH(bl);
}
void dump(Formatter *f) const {
f->open_object_section("journal_header");
{
f->dump_string("magic", magic);
f->dump_unsigned("write_pos", write_pos);
f->dump_unsigned("expire_pos", expire_pos);
f->dump_unsigned("trimmed_pos", trimmed_pos);
f->dump_unsigned("stream_format", stream_format);
f->dump_object("layout", layout);
}
f->close_section(); // journal_header
}
static void generate_test_instances(std::list<Header*> &ls)
{
ls.push_back(new Header());
ls.push_back(new Header());
ls.back()->trimmed_pos = 1;
ls.back()->expire_pos = 2;
ls.back()->unused_field = 3;
ls.back()->write_pos = 4;
ls.back()->magic = "magique";
ls.push_back(new Header());
ls.back()->stream_format = JOURNAL_FORMAT_RESILIENT;
}
};
WRITE_CLASS_ENCODER(Header)
uint32_t get_stream_format() const {
return stream_format;
}
Header last_committed;
private:
// me
CephContext *cct;
std::mutex lock;
const std::string name;
typedef std::lock_guard<std::mutex> lock_guard;
typedef std::unique_lock<std::mutex> unique_lock;
Finisher *finisher;
Header last_written;
inodeno_t ino;
int64_t pg_pool;
bool readonly;
file_layout_t layout;
uint32_t stream_format;
JournalStream journal_stream;
const char *magic;
Objecter *objecter;
Filer filer;
PerfCounters *logger;
int logger_key_lat;
class C_DelayFlush;
C_DelayFlush *delay_flush_event;
/*
* Do a flush as a result of a C_DelayFlush context.
*/
void _do_delayed_flush()
{
ceph_assert(delay_flush_event != NULL);
lock_guard l(lock);
delay_flush_event = NULL;
_do_flush();
}
// my state
static const int STATE_UNDEF = 0;
static const int STATE_READHEAD = 1;
static const int STATE_PROBING = 2;
static const int STATE_ACTIVE = 3;
static const int STATE_REREADHEAD = 4;
static const int STATE_REPROBING = 5;
static const int STATE_STOPPING = 6;
int state;
int error;
void _write_head(Context *oncommit=NULL);
void _wait_for_flush(Context *onsafe);
void _trim();
// header
ceph::real_time last_wrote_head;
void _finish_write_head(int r, Header &wrote, C_OnFinisher *oncommit);
class C_WriteHead;
friend class C_WriteHead;
void _reread_head(Context *onfinish);
void _set_layout(file_layout_t const *l);
std::list<Context*> waitfor_recover;
void _read_head(Context *on_finish, bufferlist *bl);
void _finish_read_head(int r, bufferlist& bl);
void _finish_reread_head(int r, bufferlist& bl, Context *finish);
void _probe(Context *finish, uint64_t *end);
void _finish_probe_end(int r, uint64_t end);
void _reprobe(C_OnFinisher *onfinish);
void _finish_reprobe(int r, uint64_t end, C_OnFinisher *onfinish);
void _finish_reread_head_and_probe(int r, C_OnFinisher *onfinish);
class C_ReadHead;
friend class C_ReadHead;
class C_ProbeEnd;
friend class C_ProbeEnd;
class C_RereadHead;
friend class C_RereadHead;
class C_ReProbe;
friend class C_ReProbe;
class C_RereadHeadProbe;
friend class C_RereadHeadProbe;
// writer
uint64_t prezeroing_pos;
uint64_t prezero_pos; ///< we zero journal space ahead of write_pos to
// avoid problems with tail probing
uint64_t write_pos; ///< logical write position, where next entry
// will go
uint64_t flush_pos; ///< where we will flush. if
/// write_pos>flush_pos, we're buffering writes.
uint64_t safe_pos; ///< what has been committed safely to disk.
uint64_t next_safe_pos; /// start position of the first entry that isn't
/// being fully flushed. If we don't flush any
// partial entry, it's equal to flush_pos.
bufferlist write_buf; ///< write buffer. flush_pos +
/// write_buf.length() == write_pos.
// protect write_buf from bufferlist _len overflow
Throttle write_buf_throttle;
uint64_t waiting_for_zero_pos;
interval_set<uint64_t> pending_zero; // non-contig bits we've zeroed
std::list<Context*> waitfor_prezero;
std::map<uint64_t, uint64_t> pending_safe; // flush_pos -> safe_pos
// when safe through given offset
std::map<uint64_t, std::list<Context*> > waitfor_safe;
void _flush(C_OnFinisher *onsafe);
void _do_flush(unsigned amount=0);
void _finish_flush(int r, uint64_t start, ceph::real_time stamp);
class C_Flush;
friend class C_Flush;
// reader
uint64_t read_pos; // logical read position, where next entry starts.
uint64_t requested_pos; // what we've requested from OSD.
uint64_t received_pos; // what we've received from OSD.
// read buffer. unused_field + read_buf.length() == prefetch_pos.
bufferlist read_buf;
std::map<uint64_t,bufferlist> prefetch_buf;
uint64_t fetch_len; // how much to read at a time
uint64_t temp_fetch_len;
// for wait_for_readable()
C_OnFinisher *on_readable;
C_OnFinisher *on_write_error;
bool called_write_error;
// read completion callback
void _finish_read(int r, uint64_t offset, uint64_t length, bufferlist &bl);
void _finish_retry_read(int r);
void _assimilate_prefetch();
void _issue_read(uint64_t len); // read some more
void _prefetch(); // maybe read ahead
class C_Read;
friend class C_Read;
class C_RetryRead;
friend class C_RetryRead;
// trimmer
uint64_t expire_pos; // what we're allowed to trim to
uint64_t trimming_pos; // what we've requested to trim through
uint64_t trimmed_pos; // what has been trimmed
bool readable;
void _finish_trim(int r, uint64_t to);
class C_Trim;
friend class C_Trim;
void _issue_prezero();
void _finish_prezero(int r, uint64_t from, uint64_t len);
friend struct C_Journaler_Prezero;
// only init_headers when following or first reading off-disk
void init_headers(Header& h) {
ceph_assert(readonly ||
state == STATE_READHEAD ||
state == STATE_REREADHEAD);
last_written = last_committed = h;
}
/**
* handle a write error
*
* called when we get an objecter error on a write.
*
* @param r error code
*/
void handle_write_error(int r);
bool _have_next_entry();
void _finish_erase(int data_result, C_OnFinisher *completion);
class C_EraseFinish;
friend class C_EraseFinish;
C_OnFinisher *wrap_finisher(Context *c);
uint32_t write_iohint; // the fadvise flags for write op, see
// CEPH_OSD_OP_FADIVSE_*
public:
Journaler(const std::string &name_, inodeno_t ino_, int64_t pool,
const char *mag, Objecter *obj, PerfCounters *l, int lkey, Finisher *f) :
last_committed(mag),
cct(obj->cct), name(name_), finisher(f), last_written(mag),
ino(ino_), pg_pool(pool), readonly(true),
stream_format(-1), journal_stream(-1),
magic(mag),
objecter(obj), filer(objecter, f), logger(l), logger_key_lat(lkey),
delay_flush_event(0),
state(STATE_UNDEF), error(0),
prezeroing_pos(0), prezero_pos(0), write_pos(0), flush_pos(0),
safe_pos(0), next_safe_pos(0),
write_buf_throttle(cct, "write_buf_throttle", UINT_MAX - (UINT_MAX >> 3)),
waiting_for_zero_pos(0),
read_pos(0), requested_pos(0), received_pos(0),
fetch_len(0), temp_fetch_len(0),
on_readable(0), on_write_error(NULL), called_write_error(false),
expire_pos(0), trimming_pos(0), trimmed_pos(0), readable(false),
write_iohint(0)
{
}
/* reset
*
* NOTE: we assume the caller knows/has ensured that any objects in
* our sequence do not exist.. e.g. after a MKFS. this is _not_ an
* "erase" method.
*/
void reset() {
lock_guard l(lock);
ceph_assert(state == STATE_ACTIVE);
readonly = true;
delay_flush_event = NULL;
state = STATE_UNDEF;
error = 0;
prezeroing_pos = 0;
prezero_pos = 0;
write_pos = 0;
flush_pos = 0;
safe_pos = 0;
next_safe_pos = 0;
read_pos = 0;
requested_pos = 0;
received_pos = 0;
fetch_len = 0;
ceph_assert(!on_readable);
expire_pos = 0;
trimming_pos = 0;
trimmed_pos = 0;
waiting_for_zero_pos = 0;
}
// Asynchronous operations
// =======================
void erase(Context *completion);
void create(file_layout_t *layout, stream_format_t const sf);
void recover(Context *onfinish);
void reread_head(Context *onfinish);
void reread_head_and_probe(Context *onfinish);
void write_head(Context *onsave=0);
void wait_for_flush(Context *onsafe = 0);
void flush(Context *onsafe = 0);
void wait_for_readable(Context *onfinish);
void _wait_for_readable(Context *onfinish);
bool have_waiter() const;
void wait_for_prezero(Context *onfinish);
// Synchronous setters
// ===================
void set_layout(file_layout_t const *l);
void set_readonly();
void set_writeable();
void set_write_pos(uint64_t p) {
lock_guard l(lock);
prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = p;
}
void set_read_pos(uint64_t p) {
lock_guard l(lock);
// we can't cope w/ in-progress read right now.
ceph_assert(requested_pos == received_pos);
read_pos = requested_pos = received_pos = p;
read_buf.clear();
}
uint64_t append_entry(bufferlist& bl);
void set_expire_pos(uint64_t ep) {
lock_guard l(lock);
expire_pos = ep;
}
void set_trimmed_pos(uint64_t p) {
lock_guard l(lock);
trimming_pos = trimmed_pos = p;
}
bool _write_head_needed();
bool write_head_needed() {
lock_guard l(lock);
return _write_head_needed();
}
void trim();
void trim_tail() {
lock_guard l(lock);
ceph_assert(!readonly);
_issue_prezero();
}
void set_write_error_handler(Context *c);
void set_write_iohint(uint32_t iohint_flags) {
write_iohint = iohint_flags;
}
/**
* Cause any ongoing waits to error out with -EAGAIN, set error
* to -EAGAIN.
*/
void shutdown();
public:
// Synchronous getters
// ===================
// TODO: need some locks on reads for true safety
uint64_t get_layout_period() const {
return layout.get_period();
}
file_layout_t& get_layout() { return layout; }
bool is_active() { return state == STATE_ACTIVE; }
bool is_stopping() { return state == STATE_STOPPING; }
int get_error() { return error; }
bool is_readonly() { return readonly; }
bool is_readable();
bool _is_readable();
bool try_read_entry(bufferlist& bl);
uint64_t get_write_pos() const { return write_pos; }
uint64_t get_write_safe_pos() const { return safe_pos; }
uint64_t get_read_pos() const { return read_pos; }
uint64_t get_expire_pos() const { return expire_pos; }
uint64_t get_trimmed_pos() const { return trimmed_pos; }
size_t get_journal_envelope_size() const {
return journal_stream.get_envelope_size();
}
void check_isreadable();
};
WRITE_CLASS_ENCODER(Journaler::Header)
#endif
| 16,039 | 28.377289 | 88 | h |
null | ceph-main/src/osdc/ObjectCacher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_OBJECTCACHER_H
#define CEPH_OBJECTCACHER_H
#include "include/types.h"
#include "include/lru.h"
#include "include/Context.h"
#include "include/xlist.h"
#include "include/common_fwd.h"
#include "common/Cond.h"
#include "common/Finisher.h"
#include "common/Thread.h"
#include "common/zipkin_trace.h"
#include "Objecter.h"
#include "Striper.h"
class WritebackHandler;
enum {
l_objectcacher_first = 25000,
l_objectcacher_cache_ops_hit, // ops we satisfy completely from cache
l_objectcacher_cache_ops_miss, // ops we don't satisfy completely from cache
l_objectcacher_cache_bytes_hit, // bytes read directly from cache
l_objectcacher_cache_bytes_miss, // bytes we couldn't read directly
// from cache
l_objectcacher_data_read, // total bytes read out
l_objectcacher_data_written, // bytes written to cache
l_objectcacher_data_flushed, // bytes flushed to WritebackHandler
l_objectcacher_overwritten_in_flush, // bytes overwritten while
// flushing is in progress
l_objectcacher_write_ops_blocked, // total write ops we delayed due
// to dirty limits
l_objectcacher_write_bytes_blocked, // total number of write bytes
// we delayed due to dirty
// limits
l_objectcacher_write_time_blocked, // total time in seconds spent
// blocking a write due to dirty
// limits
l_objectcacher_last,
};
class ObjectCacher {
PerfCounters *perfcounter;
public:
CephContext *cct;
class Object;
struct ObjectSet;
class C_ReadFinish;
typedef void (*flush_set_callback_t) (void *p, ObjectSet *oset);
// read scatter/gather
struct OSDRead {
std::vector<ObjectExtent> extents;
snapid_t snap;
ceph::buffer::list *bl;
int fadvise_flags;
OSDRead(snapid_t s, ceph::buffer::list *b, int f)
: snap(s), bl(b), fadvise_flags(f) {}
};
OSDRead *prepare_read(snapid_t snap, ceph::buffer::list *b, int f) const {
return new OSDRead(snap, b, f);
}
// write scatter/gather
struct OSDWrite {
std::vector<ObjectExtent> extents;
SnapContext snapc;
ceph::buffer::list bl;
ceph::real_time mtime;
int fadvise_flags;
ceph_tid_t journal_tid;
OSDWrite(const SnapContext& sc, const ceph::buffer::list& b, ceph::real_time mt,
int f, ceph_tid_t _journal_tid)
: snapc(sc), bl(b), mtime(mt), fadvise_flags(f),
journal_tid(_journal_tid) {}
};
OSDWrite *prepare_write(const SnapContext& sc,
const ceph::buffer::list &b,
ceph::real_time mt,
int f,
ceph_tid_t journal_tid) const {
return new OSDWrite(sc, b, mt, f, journal_tid);
}
// ******* BufferHead *********
class BufferHead : public LRUObject {
public:
// states
static const int STATE_MISSING = 0;
static const int STATE_CLEAN = 1;
static const int STATE_ZERO = 2; // NOTE: these are *clean* zeros
static const int STATE_DIRTY = 3;
static const int STATE_RX = 4;
static const int STATE_TX = 5;
static const int STATE_ERROR = 6; // a read error occurred
private:
// my fields
int state;
int ref;
struct {
loff_t start, length; // bh extent in object
} ex;
bool dontneed; //indicate bh don't need by anyone
bool nocache; //indicate bh don't need by this caller
public:
Object *ob;
ceph::buffer::list bl;
ceph_tid_t last_write_tid; // version of bh (if non-zero)
ceph_tid_t last_read_tid; // tid of last read op (if any)
ceph::real_time last_write;
SnapContext snapc;
ceph_tid_t journal_tid;
int error; // holds return value for failed reads
std::map<loff_t, std::list<Context*> > waitfor_read;
// cons
explicit BufferHead(Object *o) :
state(STATE_MISSING),
ref(0),
dontneed(false),
nocache(false),
ob(o),
last_write_tid(0),
last_read_tid(0),
journal_tid(0),
error(0) {
ex.start = ex.length = 0;
}
// extent
loff_t start() const { return ex.start; }
void set_start(loff_t s) { ex.start = s; }
loff_t length() const { return ex.length; }
void set_length(loff_t l) { ex.length = l; }
loff_t end() const { return ex.start + ex.length; }
loff_t last() const { return end() - 1; }
// states
void set_state(int s) {
if (s == STATE_RX || s == STATE_TX) get();
if (state == STATE_RX || state == STATE_TX) put();
state = s;
}
int get_state() const { return state; }
inline int get_error() const {
return error;
}
inline void set_error(int _error) {
error = _error;
}
inline ceph_tid_t get_journal_tid() const {
return journal_tid;
}
inline void set_journal_tid(ceph_tid_t _journal_tid) {
journal_tid = _journal_tid;
}
bool is_missing() const { return state == STATE_MISSING; }
bool is_dirty() const { return state == STATE_DIRTY; }
bool is_clean() const { return state == STATE_CLEAN; }
bool is_zero() const { return state == STATE_ZERO; }
bool is_tx() const { return state == STATE_TX; }
bool is_rx() const { return state == STATE_RX; }
bool is_error() const { return state == STATE_ERROR; }
// reference counting
int get() {
ceph_assert(ref >= 0);
if (ref == 0) lru_pin();
return ++ref;
}
int put() {
ceph_assert(ref > 0);
if (ref == 1) lru_unpin();
--ref;
return ref;
}
void set_dontneed(bool v) {
dontneed = v;
}
bool get_dontneed() const {
return dontneed;
}
void set_nocache(bool v) {
nocache = v;
}
bool get_nocache() const {
return nocache;
}
inline bool can_merge_journal(BufferHead *bh) const {
return (get_journal_tid() == bh->get_journal_tid());
}
struct ptr_lt {
bool operator()(const BufferHead* l, const BufferHead* r) const {
const Object *lob = l->ob;
const Object *rob = r->ob;
const ObjectSet *loset = lob->oset;
const ObjectSet *roset = rob->oset;
if (loset != roset)
return loset < roset;
if (lob != rob)
return lob < rob;
if (l->start() != r->start())
return l->start() < r->start();
return l < r;
}
};
};
// ******* Object *********
class Object : public LRUObject {
private:
// ObjectCacher::Object fields
int ref;
ObjectCacher *oc;
sobject_t oid;
friend struct ObjectSet;
public:
uint64_t object_no;
ObjectSet *oset;
xlist<Object*>::item set_item;
object_locator_t oloc;
uint64_t truncate_size, truncate_seq;
bool complete;
bool exists;
std::map<loff_t, BufferHead*> data;
ceph_tid_t last_write_tid; // version of bh (if non-zero)
ceph_tid_t last_commit_tid; // last update committed.
int dirty_or_tx;
std::map< ceph_tid_t, std::list<Context*> > waitfor_commit;
xlist<C_ReadFinish*> reads;
Object(const Object&) = delete;
Object& operator=(const Object&) = delete;
Object(ObjectCacher *_oc, sobject_t o, uint64_t ono, ObjectSet *os,
object_locator_t& l, uint64_t ts, uint64_t tq) :
ref(0),
oc(_oc),
oid(o), object_no(ono), oset(os), set_item(this), oloc(l),
truncate_size(ts), truncate_seq(tq),
complete(false), exists(true),
last_write_tid(0), last_commit_tid(0),
dirty_or_tx(0) {
// add to set
os->objects.push_back(&set_item);
}
~Object() {
reads.clear();
ceph_assert(ref == 0);
ceph_assert(data.empty());
ceph_assert(dirty_or_tx == 0);
set_item.remove_myself();
}
sobject_t get_soid() const { return oid; }
object_t get_oid() { return oid.oid; }
snapid_t get_snap() { return oid.snap; }
ObjectSet *get_object_set() const { return oset; }
std::string get_namespace() { return oloc.nspace; }
uint64_t get_object_number() const { return object_no; }
const object_locator_t& get_oloc() const { return oloc; }
void set_object_locator(object_locator_t& l) { oloc = l; }
bool can_close() const {
if (lru_is_expireable()) {
ceph_assert(data.empty());
ceph_assert(waitfor_commit.empty());
return true;
}
return false;
}
/**
* Check buffers and waiters for consistency
* - no overlapping buffers
* - index in map matches BH
* - waiters fall within BH
*/
void audit_buffers();
/**
* find first buffer that includes or follows an offset
*
* @param offset object byte offset
* @return iterator pointing to buffer, or data.end()
*/
std::map<loff_t,BufferHead*>::const_iterator data_lower_bound(loff_t offset) const {
auto p = data.lower_bound(offset);
if (p != data.begin() &&
(p == data.end() || p->first > offset)) {
--p; // might overlap!
if (p->first + p->second->length() <= offset)
++p; // doesn't overlap.
}
return p;
}
// bh
// add to my map
void add_bh(BufferHead *bh) {
if (data.empty())
get();
ceph_assert(data.count(bh->start()) == 0);
data[bh->start()] = bh;
}
void remove_bh(BufferHead *bh) {
ceph_assert(data.count(bh->start()));
data.erase(bh->start());
if (data.empty())
put();
}
bool is_empty() const { return data.empty(); }
// mid-level
BufferHead *split(BufferHead *bh, loff_t off);
void merge_left(BufferHead *left, BufferHead *right);
bool can_merge_bh(BufferHead *left, BufferHead *right);
void try_merge_bh(BufferHead *bh);
void maybe_rebuild_buffer(BufferHead *bh);
bool is_cached(loff_t off, loff_t len) const;
bool include_all_cached_data(loff_t off, loff_t len);
int map_read(ObjectExtent &ex,
std::map<loff_t, BufferHead*>& hits,
std::map<loff_t, BufferHead*>& missing,
std::map<loff_t, BufferHead*>& rx,
std::map<loff_t, BufferHead*>& errors);
BufferHead *map_write(ObjectExtent &ex, ceph_tid_t tid);
void replace_journal_tid(BufferHead *bh, ceph_tid_t tid);
void truncate(loff_t s);
void discard(loff_t off, loff_t len, C_GatherBuilder* commit_gather);
// reference counting
int get() {
ceph_assert(ref >= 0);
if (ref == 0) lru_pin();
return ++ref;
}
int put() {
ceph_assert(ref > 0);
if (ref == 1) lru_unpin();
--ref;
return ref;
}
};
struct ObjectSet {
void *parent;
inodeno_t ino;
uint64_t truncate_seq, truncate_size;
int64_t poolid;
xlist<Object*> objects;
int dirty_or_tx;
bool return_enoent;
ObjectSet(void *p, int64_t _poolid, inodeno_t i)
: parent(p), ino(i), truncate_seq(0),
truncate_size(0), poolid(_poolid), dirty_or_tx(0),
return_enoent(false) {}
};
// ******* ObjectCacher *********
// ObjectCacher fields
private:
WritebackHandler& writeback_handler;
bool scattered_write;
std::string name;
ceph::mutex& lock;
uint64_t max_dirty, target_dirty, max_size, max_objects;
ceph::timespan max_dirty_age;
bool block_writes_upfront;
ZTracer::Endpoint trace_endpoint;
flush_set_callback_t flush_set_callback;
void *flush_set_callback_arg;
// indexed by pool_id
std::vector<ceph::unordered_map<sobject_t, Object*> > objects;
std::list<Context*> waitfor_read;
ceph_tid_t last_read_tid;
std::set<BufferHead*, BufferHead::ptr_lt> dirty_or_tx_bh;
LRU bh_lru_dirty, bh_lru_rest;
LRU ob_lru;
ceph::condition_variable flusher_cond;
bool flusher_stop;
void flusher_entry();
class FlusherThread : public Thread {
ObjectCacher *oc;
public:
explicit FlusherThread(ObjectCacher *o) : oc(o) {}
void *entry() override {
oc->flusher_entry();
return 0;
}
} flusher_thread;
Finisher finisher;
// objects
Object *get_object_maybe(sobject_t oid, object_locator_t &l) {
// have it?
if (((uint32_t)l.pool < objects.size()) &&
(objects[l.pool].count(oid)))
return objects[l.pool][oid];
return NULL;
}
Object *get_object(sobject_t oid, uint64_t object_no, ObjectSet *oset,
object_locator_t &l, uint64_t truncate_size,
uint64_t truncate_seq);
void close_object(Object *ob);
// bh stats
ceph::condition_variable stat_cond;
loff_t stat_clean;
loff_t stat_zero;
loff_t stat_dirty;
loff_t stat_rx;
loff_t stat_tx;
loff_t stat_missing;
loff_t stat_error;
loff_t stat_dirty_waiting; // bytes that writers are waiting on to write
size_t stat_nr_dirty_waiters;
void verify_stats() const;
void bh_stat_add(BufferHead *bh);
void bh_stat_sub(BufferHead *bh);
loff_t get_stat_tx() const { return stat_tx; }
loff_t get_stat_rx() const { return stat_rx; }
loff_t get_stat_dirty() const { return stat_dirty; }
loff_t get_stat_clean() const { return stat_clean; }
loff_t get_stat_zero() const { return stat_zero; }
loff_t get_stat_dirty_waiting() const { return stat_dirty_waiting; }
size_t get_stat_nr_dirty_waiters() const { return stat_nr_dirty_waiters; }
void touch_bh(BufferHead *bh) {
if (bh->is_dirty())
bh_lru_dirty.lru_touch(bh);
else
bh_lru_rest.lru_touch(bh);
bh->set_dontneed(false);
bh->set_nocache(false);
touch_ob(bh->ob);
}
void touch_ob(Object *ob) {
ob_lru.lru_touch(ob);
}
void bottouch_ob(Object *ob) {
ob_lru.lru_bottouch(ob);
}
// bh states
void bh_set_state(BufferHead *bh, int s);
void copy_bh_state(BufferHead *bh1, BufferHead *bh2) {
bh_set_state(bh2, bh1->get_state());
}
void mark_missing(BufferHead *bh) {
bh_set_state(bh,BufferHead::STATE_MISSING);
}
void mark_clean(BufferHead *bh) {
bh_set_state(bh, BufferHead::STATE_CLEAN);
}
void mark_zero(BufferHead *bh) {
bh_set_state(bh, BufferHead::STATE_ZERO);
}
void mark_rx(BufferHead *bh) {
bh_set_state(bh, BufferHead::STATE_RX);
}
void mark_tx(BufferHead *bh) {
bh_set_state(bh, BufferHead::STATE_TX); }
void mark_error(BufferHead *bh) {
bh_set_state(bh, BufferHead::STATE_ERROR);
}
void mark_dirty(BufferHead *bh) {
bh_set_state(bh, BufferHead::STATE_DIRTY);
bh_lru_dirty.lru_touch(bh);
//bh->set_dirty_stamp(ceph_clock_now());
}
void bh_add(Object *ob, BufferHead *bh);
void bh_remove(Object *ob, BufferHead *bh);
// io
void bh_read(BufferHead *bh, int op_flags,
const ZTracer::Trace &parent_trace);
void bh_write(BufferHead *bh, const ZTracer::Trace &parent_trace);
void bh_write_scattered(std::list<BufferHead*>& blist);
void bh_write_adjacencies(BufferHead *bh, ceph::real_time cutoff,
int64_t *amount, int *max_count);
void trim();
void flush(ZTracer::Trace *trace, loff_t amount=0);
/**
* flush a range of buffers
*
* Flush any buffers that intersect the specified extent. If len==0,
* flush *all* buffers for the object.
*
* @param o object
* @param off start offset
* @param len extent length, or 0 for entire object
* @return true if object was already clean/flushed.
*/
bool flush(Object *o, loff_t off, loff_t len,
ZTracer::Trace *trace);
loff_t release(Object *o);
void purge(Object *o);
int64_t reads_outstanding;
ceph::condition_variable read_cond;
int _readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
bool external_call, ZTracer::Trace *trace);
void retry_waiting_reads();
public:
void bh_read_finish(int64_t poolid, sobject_t oid, ceph_tid_t tid,
loff_t offset, uint64_t length,
ceph::buffer::list &bl, int r,
bool trust_enoent);
void bh_write_commit(int64_t poolid, sobject_t oid,
std::vector<std::pair<loff_t, uint64_t> >& ranges,
ceph_tid_t t, int r);
class C_WriteCommit;
class C_WaitForWrite;
void perf_start();
void perf_stop();
ObjectCacher(CephContext *cct_, std::string name, WritebackHandler& wb, ceph::mutex& l,
flush_set_callback_t flush_callback,
void *flush_callback_arg,
uint64_t max_bytes, uint64_t max_objects,
uint64_t max_dirty, uint64_t target_dirty, double max_age,
bool block_writes_upfront);
~ObjectCacher();
void start() {
flusher_thread.create("flusher");
}
void stop() {
ceph_assert(flusher_thread.is_started());
lock.lock(); // hmm.. watch out for deadlock!
flusher_stop = true;
flusher_cond.notify_all();
lock.unlock();
flusher_thread.join();
}
class C_RetryRead;
// non-blocking. async.
/**
* @note total read size must be <= INT_MAX, since
* the return value is total bytes read
*/
int readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
ZTracer::Trace *parent_trace = nullptr);
int writex(OSDWrite *wr, ObjectSet *oset, Context *onfreespace,
ZTracer::Trace *parent_trace = nullptr);
bool is_cached(ObjectSet *oset, std::vector<ObjectExtent>& extents,
snapid_t snapid);
private:
// write blocking
int _wait_for_write(OSDWrite *wr, uint64_t len, ObjectSet *oset,
ZTracer::Trace *trace, Context *onfreespace);
void _maybe_wait_for_writeback(uint64_t len, ZTracer::Trace *trace);
bool _flush_set_finish(C_GatherBuilder *gather, Context *onfinish);
void _discard(ObjectSet *oset, const std::vector<ObjectExtent>& exls,
C_GatherBuilder* gather);
void _discard_finish(ObjectSet *oset, bool was_dirty, Context* on_finish);
public:
bool set_is_empty(ObjectSet *oset);
bool set_is_cached(ObjectSet *oset);
bool set_is_dirty_or_committing(ObjectSet *oset);
bool flush_set(ObjectSet *oset, Context *onfinish=0);
bool flush_set(ObjectSet *oset, std::vector<ObjectExtent>& ex,
ZTracer::Trace *trace, Context *onfinish = 0);
bool flush_all(Context *onfinish = 0);
void purge_set(ObjectSet *oset);
// returns # of bytes not released (ie non-clean)
loff_t release_set(ObjectSet *oset);
uint64_t release_all();
void discard_set(ObjectSet *oset, const std::vector<ObjectExtent>& ex);
void discard_writeback(ObjectSet *oset, const std::vector<ObjectExtent>& ex,
Context* on_finish);
/**
* Retry any in-flight reads that get -ENOENT instead of marking
* them zero, and get rid of any cached -ENOENTs.
* After this is called and the cache's lock is unlocked,
* any new requests will treat -ENOENT normally.
*/
void clear_nonexistence(ObjectSet *oset);
// cache sizes
void set_max_dirty(uint64_t v) {
max_dirty = v;
}
void set_target_dirty(int64_t v) {
target_dirty = v;
}
void set_max_size(int64_t v) {
max_size = v;
}
void set_max_dirty_age(double a) {
max_dirty_age = ceph::make_timespan(a);
}
void set_max_objects(int64_t v) {
max_objects = v;
}
// file functions
/*** async+caching (non-blocking) file interface ***/
int file_is_cached(ObjectSet *oset, file_layout_t *layout,
snapid_t snapid, loff_t offset, uint64_t len) {
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, oset->ino, layout, offset, len,
oset->truncate_size, extents);
return is_cached(oset, extents, snapid);
}
int file_read(ObjectSet *oset, file_layout_t *layout, snapid_t snapid,
loff_t offset, uint64_t len, ceph::buffer::list *bl, int flags,
Context *onfinish) {
OSDRead *rd = prepare_read(snapid, bl, flags);
Striper::file_to_extents(cct, oset->ino, layout, offset, len,
oset->truncate_size, rd->extents);
return readx(rd, oset, onfinish);
}
int file_write(ObjectSet *oset, file_layout_t *layout,
const SnapContext& snapc, loff_t offset, uint64_t len,
ceph::buffer::list& bl, ceph::real_time mtime, int flags) {
OSDWrite *wr = prepare_write(snapc, bl, mtime, flags, 0);
Striper::file_to_extents(cct, oset->ino, layout, offset, len,
oset->truncate_size, wr->extents);
return writex(wr, oset, nullptr);
}
bool file_flush(ObjectSet *oset, file_layout_t *layout,
const SnapContext& snapc, loff_t offset, uint64_t len,
Context *onfinish) {
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, oset->ino, layout, offset, len,
oset->truncate_size, extents);
ZTracer::Trace trace;
return flush_set(oset, extents, &trace, onfinish);
}
};
inline std::ostream& operator<<(std::ostream &out,
const ObjectCacher::BufferHead &bh)
{
out << "bh[ " << &bh << " "
<< bh.start() << "~" << bh.length()
<< " " << bh.ob
<< " (" << bh.bl.length() << ")"
<< " v " << bh.last_write_tid;
if (bh.get_journal_tid() != 0) {
out << " j " << bh.get_journal_tid();
}
if (bh.is_tx()) out << " tx";
if (bh.is_rx()) out << " rx";
if (bh.is_dirty()) out << " dirty";
if (bh.is_clean()) out << " clean";
if (bh.is_zero()) out << " zero";
if (bh.is_missing()) out << " missing";
if (bh.bl.length() > 0) out << " firstbyte=" << (int)bh.bl[0];
if (bh.error) out << " error=" << bh.error;
out << "]";
out << " waiters = {";
for (auto it = bh.waitfor_read.begin(); it != bh.waitfor_read.end(); ++it) {
out << " " << it->first << "->[";
for (auto lit = it->second.begin();
lit != it->second.end(); ++lit) {
out << *lit << ", ";
}
out << "]";
}
out << "}";
return out;
}
inline std::ostream& operator<<(std::ostream &out,
const ObjectCacher::ObjectSet &os)
{
return out << "objectset[" << os.ino
<< " ts " << os.truncate_seq << "/" << os.truncate_size
<< " objects " << os.objects.size()
<< " dirty_or_tx " << os.dirty_or_tx
<< "]";
}
inline std::ostream& operator<<(std::ostream &out,
const ObjectCacher::Object &ob)
{
out << "object["
<< ob.get_soid() << " oset " << ob.oset << std::dec
<< " wr " << ob.last_write_tid << "/" << ob.last_commit_tid;
if (ob.complete)
out << " COMPLETE";
if (!ob.exists)
out << " !EXISTS";
out << "]";
return out;
}
#endif
| 21,970 | 27.095908 | 89 | h |
null | ceph-main/src/osdc/Striper.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_STRIPER_H
#define CEPH_STRIPER_H
#include "include/common_fwd.h"
#include "include/types.h"
#include "osd/osd_types.h"
#include "osdc/StriperTypes.h"
//namespace ceph {
class Striper {
public:
static void file_to_extents(
CephContext *cct, const file_layout_t *layout, uint64_t offset,
uint64_t len, uint64_t trunc_size, uint64_t buffer_offset,
striper::LightweightObjectExtents* object_extents);
/*
* std::map (ino, layout, offset, len) to a (list of) ObjectExtents (byte
* ranges in objects on (primary) osds)
*/
static void file_to_extents(CephContext *cct, const char *object_format,
const file_layout_t *layout,
uint64_t offset, uint64_t len,
uint64_t trunc_size,
std::map<object_t, std::vector<ObjectExtent> >& extents,
uint64_t buffer_offset=0);
static void file_to_extents(CephContext *cct, const char *object_format,
const file_layout_t *layout,
uint64_t offset, uint64_t len,
uint64_t trunc_size,
std::vector<ObjectExtent>& extents,
uint64_t buffer_offset=0);
static void file_to_extents(CephContext *cct, inodeno_t ino,
const file_layout_t *layout,
uint64_t offset, uint64_t len,
uint64_t trunc_size,
std::vector<ObjectExtent>& extents) {
// generate prefix/format
char buf[32];
snprintf(buf, sizeof(buf), "%llx.%%08llx", (long long unsigned)ino);
file_to_extents(cct, buf, layout, offset, len, trunc_size, extents);
}
/**
* reverse std::map an object extent to file extents
*/
static void extent_to_file(CephContext *cct, file_layout_t *layout,
uint64_t objectno, uint64_t off, uint64_t len,
std::vector<std::pair<uint64_t, uint64_t> >& extents);
static uint64_t object_truncate_size(
CephContext *cct, const file_layout_t *layout,
uint64_t objectno, uint64_t trunc_size);
static uint64_t get_num_objects(const file_layout_t& layout,
uint64_t size);
static uint64_t get_file_offset(CephContext *cct,
const file_layout_t *layout, uint64_t objectno, uint64_t off);
/*
* helper to assemble a striped result
*/
class StripedReadResult {
// offset -> (data, intended length)
std::map<uint64_t, std::pair<ceph::buffer::list, uint64_t> > partial;
uint64_t total_intended_len = 0; //sum of partial.second.second
public:
void add_partial_result(
CephContext *cct, ceph::buffer::list& bl,
const std::vector<std::pair<uint64_t,uint64_t> >& buffer_extents);
void add_partial_result(
CephContext *cct, ceph::buffer::list&& bl,
const striper::LightweightBufferExtents& buffer_extents);
/**
* add sparse read into results
*
* @param bl buffer
* @param bl_map std::map of which logical source extents this covers
* @param bl_off logical buffer offset (e.g., first bl_map key
* if the buffer is not sparse)
* @param buffer_extents output buffer extents the data maps to
*/
void add_partial_sparse_result(
CephContext *cct, ceph::buffer::list& bl,
const std::map<uint64_t, uint64_t>& bl_map, uint64_t bl_off,
const std::vector<std::pair<uint64_t,uint64_t> >& buffer_extents);
void add_partial_sparse_result(
CephContext *cct, ceph::buffer::list&& bl,
const std::vector<std::pair<uint64_t, uint64_t>>& bl_map,
uint64_t bl_off,
const striper::LightweightBufferExtents& buffer_extents);
void assemble_result(CephContext *cct, ceph::buffer::list& bl,
bool zero_tail);
/**
* @buffer copy read data into buffer
* @len the length of buffer
*/
void assemble_result(CephContext *cct, char *buffer, size_t len);
uint64_t assemble_result(CephContext *cct,
std::map<uint64_t, uint64_t> *extent_map,
ceph::buffer::list *bl);
};
};
//};
#endif
| 4,429 | 32.308271 | 77 | h |
null | ceph-main/src/osdc/StriperTypes.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_OSDC_STRIPER_TYPES_H
#define CEPH_OSDC_STRIPER_TYPES_H
#include "include/types.h"
#include <boost/container/small_vector.hpp>
#include <ios>
#include <utility>
namespace striper {
// off -> len extents in (striped) buffer being mapped
typedef std::pair<uint64_t,uint64_t> BufferExtent;
typedef boost::container::small_vector<
BufferExtent, 4> LightweightBufferExtents;
struct LightweightObjectExtent {
LightweightObjectExtent() = delete;
LightweightObjectExtent(uint64_t object_no, uint64_t offset,
uint64_t length, uint64_t truncate_size)
: object_no(object_no), offset(offset), length(length),
truncate_size(truncate_size) {
}
uint64_t object_no;
uint64_t offset; // in-object
uint64_t length; // in-object
uint64_t truncate_size; // in-object
LightweightBufferExtents buffer_extents;
};
typedef boost::container::small_vector<
LightweightObjectExtent, 4> LightweightObjectExtents;
inline std::ostream& operator<<(std::ostream& os,
const LightweightObjectExtent& ex) {
return os << "extent("
<< ex.object_no << " "
<< ex.offset << "~" << ex.length
<< " -> " << ex.buffer_extents
<< ")";
}
} // namespace striper
#endif // CEPH_OSDC_STRIPER_TYPES_H
| 1,427 | 28.142857 | 70 | h |
null | ceph-main/src/osdc/WritebackHandler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_OSDC_WRITEBACKHANDLER_H
#define CEPH_OSDC_WRITEBACKHANDLER_H
#include "include/Context.h"
#include "include/types.h"
#include "common/zipkin_trace.h"
#include "osd/osd_types.h"
class WritebackHandler {
public:
WritebackHandler() {}
virtual ~WritebackHandler() {}
virtual void read(const object_t& oid, uint64_t object_no,
const object_locator_t& oloc, uint64_t off, uint64_t len,
snapid_t snapid, ceph::buffer::list *pbl, uint64_t trunc_size,
__u32 trunc_seq, int op_flags,
const ZTracer::Trace &parent_trace, Context *onfinish) = 0;
/**
* check if a given extent read result may change due to a write
*
* Check if the content we see at the given read offset may change
* due to a write to this object.
*
* @param oid object
* @param read_off read offset
* @param read_len read length
* @param snapid read snapid
*/
virtual bool may_copy_on_write(const object_t& oid, uint64_t read_off,
uint64_t read_len, snapid_t snapid) = 0;
virtual ceph_tid_t write(const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len,
const SnapContext& snapc,
const ceph::buffer::list &bl, ceph::real_time mtime,
uint64_t trunc_size, __u32 trunc_seq,
ceph_tid_t journal_tid,
const ZTracer::Trace &parent_trace,
Context *oncommit) = 0;
virtual void overwrite_extent(const object_t& oid, uint64_t off, uint64_t len,
ceph_tid_t original_journal_tid,
ceph_tid_t new_journal_tid) {}
virtual bool can_scattered_write() { return false; }
virtual ceph_tid_t write(const object_t& oid, const object_locator_t& oloc,
std::vector<std::pair<uint64_t, ceph::buffer::list> >& io_vec,
const SnapContext& snapc, ceph::real_time mtime,
uint64_t trunc_size, __u32 trunc_seq,
Context *oncommit) {
return 0;
}
};
#endif
| 2,106 | 35.327586 | 80 | h |
null | ceph-main/src/osdc/error_code.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat <[email protected]>
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <boost/system/error_code.hpp>
#include "include/rados.h"
const boost::system::error_category& osdc_category() noexcept;
enum class osdc_errc {
pool_dne = 1,
pool_exists,
// Come the revolution, we'll just kill your program. Maybe.
precondition_violated,
not_supported,
snapshot_exists,
snapshot_dne,
timed_out,
pool_eio
};
namespace boost::system {
template<>
struct is_error_code_enum<::osdc_errc> {
static const bool value = true;
};
template<>
struct is_error_condition_enum<::osdc_errc> {
static const bool value = false;
};
}
// implicit conversion:
inline boost::system::error_code make_error_code(osdc_errc e) noexcept {
return { static_cast<int>(e), osdc_category() };
}
// explicit conversion:
inline boost::system::error_condition make_error_condition(osdc_errc e) noexcept {
return { static_cast<int>(e), osdc_category() };
}
| 1,363 | 22.929825 | 82 | h |
null | ceph-main/src/perfglue/cpu_profiler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_PERFGLUE_CPU_PROFILER
/*
* Ceph glue for the Google Perftools CPU profiler
*/
#include <string>
#include <vector>
void cpu_profiler_handle_command(const std::vector<std::string> &cmd,
std::ostream& out);
#endif
| 642 | 23.730769 | 70 | h |
null | ceph-main/src/perfglue/heap_profiler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network/Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef HEAP_PROFILER_H_
#define HEAP_PROFILER_H_
#include <string>
#include <vector>
#include "common/config.h"
class LogClient;
/*
* Ceph glue for the Google perftools heap profiler, included
* as part of tcmalloc. This replaces ugly function pointers
* and #ifdef hacks!
*/
bool ceph_using_tcmalloc();
/*
* Configure the heap profiler
*/
void ceph_heap_profiler_init();
void ceph_heap_profiler_stats(char *buf, int length);
void ceph_heap_release_free_memory();
double ceph_heap_get_release_rate();
void ceph_heap_get_release_rate(double value);
bool ceph_heap_profiler_running();
void ceph_heap_profiler_start();
void ceph_heap_profiler_stop();
void ceph_heap_profiler_dump(const char *reason);
bool ceph_heap_get_numeric_property(const char *property, size_t *value);
bool ceph_heap_set_numeric_property(const char *property, size_t value);
void ceph_heap_profiler_handle_command(const std::vector<std::string> &cmd,
std::ostream& out);
#endif /* HEAP_PROFILER_H_ */
| 1,468 | 24.327586 | 75 | h |
null | ceph-main/src/rbd_replay/ActionTypes.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_REPLAY_ACTION_TYPES_H
#define CEPH_RBD_REPLAY_ACTION_TYPES_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/encoding.h"
#include <iosfwd>
#include <list>
#include <string>
#include <vector>
#include <boost/variant/variant.hpp>
namespace ceph { class Formatter; }
namespace rbd_replay {
namespace action {
typedef uint64_t imagectx_id_t;
typedef uint64_t thread_id_t;
/// Even IDs are normal actions, odd IDs are completions.
typedef uint32_t action_id_t;
static const std::string BANNER("rbd-replay-trace");
/**
* Dependencies link actions to earlier actions or completions.
* If an action has a dependency \c d then it waits until \c d.time_delta
* nanoseconds after the action or completion with ID \c d.id has fired.
*/
struct Dependency {
/// ID of the action or completion to wait for.
action_id_t id;
/// Nanoseconds of delay to wait until after the action or completion fires.
uint64_t time_delta;
/**
* @param id ID of the action or completion to wait for.
* @param time_delta Nanoseconds of delay to wait after the action or
* completion fires.
*/
Dependency() : id(0), time_delta(0) {
}
Dependency(action_id_t id, uint64_t time_delta)
: id(id), time_delta(time_delta) {
}
void encode(bufferlist &bl) const;
void decode(bufferlist::const_iterator &it);
void decode(__u8 version, bufferlist::const_iterator &it);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<Dependency *> &o);
};
WRITE_CLASS_ENCODER(Dependency);
typedef std::vector<Dependency> Dependencies;
enum ActionType {
ACTION_TYPE_START_THREAD = 0,
ACTION_TYPE_STOP_THREAD = 1,
ACTION_TYPE_READ = 2,
ACTION_TYPE_WRITE = 3,
ACTION_TYPE_AIO_READ = 4,
ACTION_TYPE_AIO_WRITE = 5,
ACTION_TYPE_OPEN_IMAGE = 6,
ACTION_TYPE_CLOSE_IMAGE = 7,
ACTION_TYPE_AIO_OPEN_IMAGE = 8,
ACTION_TYPE_AIO_CLOSE_IMAGE = 9,
ACTION_TYPE_DISCARD = 10,
ACTION_TYPE_AIO_DISCARD = 11
};
struct ActionBase {
action_id_t id;
thread_id_t thread_id;
Dependencies dependencies;
ActionBase() : id(0), thread_id(0) {
}
ActionBase(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies)
: id(id), thread_id(thread_id), dependencies(dependencies) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &it);
void dump(Formatter *f) const;
};
struct StartThreadAction : public ActionBase {
static const ActionType ACTION_TYPE = ACTION_TYPE_START_THREAD;
StartThreadAction() {
}
StartThreadAction(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies)
: ActionBase(id, thread_id, dependencies) {
}
};
struct StopThreadAction : public ActionBase {
static const ActionType ACTION_TYPE = ACTION_TYPE_STOP_THREAD;
StopThreadAction() {
}
StopThreadAction(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies)
: ActionBase(id, thread_id, dependencies) {
}
};
struct ImageActionBase : public ActionBase {
imagectx_id_t imagectx_id;
ImageActionBase() : imagectx_id(0) {
}
ImageActionBase(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies, imagectx_id_t imagectx_id)
: ActionBase(id, thread_id, dependencies), imagectx_id(imagectx_id) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &it);
void dump(Formatter *f) const;
};
struct IoActionBase : public ImageActionBase {
uint64_t offset;
uint64_t length;
IoActionBase() : offset(0), length(0) {
}
IoActionBase(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies, imagectx_id_t imagectx_id,
uint64_t offset, uint64_t length)
: ImageActionBase(id, thread_id, dependencies, imagectx_id),
offset(offset), length(length) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &it);
void dump(Formatter *f) const;
};
struct ReadAction : public IoActionBase {
static const ActionType ACTION_TYPE = ACTION_TYPE_READ;
ReadAction() {
}
ReadAction(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies, imagectx_id_t imagectx_id,
uint64_t offset, uint64_t length)
: IoActionBase(id, thread_id, dependencies, imagectx_id, offset, length) {
}
};
struct WriteAction : public IoActionBase {
static const ActionType ACTION_TYPE = ACTION_TYPE_WRITE;
WriteAction() {
}
WriteAction(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies, imagectx_id_t imagectx_id,
uint64_t offset, uint64_t length)
: IoActionBase(id, thread_id, dependencies, imagectx_id, offset, length) {
}
};
struct DiscardAction : public IoActionBase {
static const ActionType ACTION_TYPE = ACTION_TYPE_DISCARD;
DiscardAction() {
}
DiscardAction(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies, imagectx_id_t imagectx_id,
uint64_t offset, uint64_t length)
: IoActionBase(id, thread_id, dependencies, imagectx_id, offset, length) {
}
};
struct AioReadAction : public IoActionBase {
static const ActionType ACTION_TYPE = ACTION_TYPE_AIO_READ;
AioReadAction() {
}
AioReadAction(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies, imagectx_id_t imagectx_id,
uint64_t offset, uint64_t length)
: IoActionBase(id, thread_id, dependencies, imagectx_id, offset, length) {
}
};
struct AioWriteAction : public IoActionBase {
static const ActionType ACTION_TYPE = ACTION_TYPE_AIO_WRITE;
AioWriteAction() {
}
AioWriteAction(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies, imagectx_id_t imagectx_id,
uint64_t offset, uint64_t length)
: IoActionBase(id, thread_id, dependencies, imagectx_id, offset, length) {
}
};
struct AioDiscardAction : public IoActionBase {
static const ActionType ACTION_TYPE = ACTION_TYPE_AIO_DISCARD;
AioDiscardAction() {
}
AioDiscardAction(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies, imagectx_id_t imagectx_id,
uint64_t offset, uint64_t length)
: IoActionBase(id, thread_id, dependencies, imagectx_id, offset, length) {
}
};
struct OpenImageAction : public ImageActionBase {
static const ActionType ACTION_TYPE = ACTION_TYPE_OPEN_IMAGE;
std::string name;
std::string snap_name;
bool read_only;
OpenImageAction() : read_only(false) {
}
OpenImageAction(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies, imagectx_id_t imagectx_id,
const std::string &name, const std::string &snap_name,
bool read_only)
: ImageActionBase(id, thread_id, dependencies, imagectx_id),
name(name), snap_name(snap_name), read_only(read_only) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &it);
void dump(Formatter *f) const;
};
struct CloseImageAction : public ImageActionBase {
static const ActionType ACTION_TYPE = ACTION_TYPE_CLOSE_IMAGE;
CloseImageAction() {
}
CloseImageAction(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies, imagectx_id_t imagectx_id)
: ImageActionBase(id, thread_id, dependencies, imagectx_id) {
}
};
struct AioOpenImageAction : public ImageActionBase {
static const ActionType ACTION_TYPE = ACTION_TYPE_AIO_OPEN_IMAGE;
std::string name;
std::string snap_name;
bool read_only;
AioOpenImageAction() : read_only(false) {
}
AioOpenImageAction(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies, imagectx_id_t imagectx_id,
const std::string &name, const std::string &snap_name,
bool read_only)
: ImageActionBase(id, thread_id, dependencies, imagectx_id),
name(name), snap_name(snap_name), read_only(read_only) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &it);
void dump(Formatter *f) const;
};
struct AioCloseImageAction : public ImageActionBase {
static const ActionType ACTION_TYPE = ACTION_TYPE_AIO_CLOSE_IMAGE;
AioCloseImageAction() {
}
AioCloseImageAction(action_id_t id, thread_id_t thread_id,
const Dependencies &dependencies, imagectx_id_t imagectx_id)
: ImageActionBase(id, thread_id, dependencies, imagectx_id) {
}
};
struct UnknownAction {
static const ActionType ACTION_TYPE = static_cast<ActionType>(-1);
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &it);
void dump(Formatter *f) const;
};
typedef boost::variant<StartThreadAction,
StopThreadAction,
ReadAction,
WriteAction,
DiscardAction,
AioReadAction,
AioWriteAction,
AioDiscardAction,
OpenImageAction,
CloseImageAction,
AioOpenImageAction,
AioCloseImageAction,
UnknownAction> Action;
class ActionEntry {
public:
Action action;
ActionEntry() : action(UnknownAction()) {
}
ActionEntry(const Action &action) : action(action) {
}
void encode(bufferlist &bl) const;
void decode(bufferlist::const_iterator &it);
void decode_unversioned(bufferlist::const_iterator &it);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<ActionEntry *> &o);
private:
void decode_versioned(__u8 version, bufferlist::const_iterator &it);
};
WRITE_CLASS_ENCODER(ActionEntry);
std::ostream &operator<<(std::ostream &out,
const rbd_replay::action::ActionType &type);
} // namespace action
} // namespace rbd_replay
#endif // CEPH_RBD_REPLAY_ACTION_TYPES_H
| 10,290 | 29.267647 | 79 | h |
null | ceph-main/src/rbd_replay/BufferReader.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_RBD_REPLAY_BUFFER_READER_H
#define CEPH_RBD_REPLAY_BUFFER_READER_H
#include "include/int_types.h"
#include "include/buffer.h"
namespace rbd_replay {
class BufferReader {
public:
static const size_t DEFAULT_MIN_BYTES = 1<<20;
static const size_t DEFAULT_MAX_BYTES = 1<<22;
BufferReader(int fd, size_t min_bytes = DEFAULT_MIN_BYTES,
size_t max_bytes = DEFAULT_MAX_BYTES);
int fetch(bufferlist::const_iterator **it);
private:
int m_fd;
size_t m_min_bytes;
size_t m_max_bytes;
bufferlist m_bl;
bufferlist::const_iterator m_bl_it;
bool m_eof_reached;
};
} // namespace rbd_replay
#endif // CEPH_RBD_REPLAY_BUFFER_READER_H
| 773 | 21.114286 | 70 | h |
null | ceph-main/src/rgw/rgw_acl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <map>
#include <string>
#include <string_view>
#include <include/types.h>
#include <boost/optional.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include "common/debug.h"
#include "rgw_basic_types.h" //includes rgw_acl_types.h
class ACLGrant
{
protected:
ACLGranteeType type;
rgw_user id;
std::string email;
mutable rgw_user email_id;
ACLPermission permission;
std::string name;
ACLGroupTypeEnum group;
std::string url_spec;
public:
ACLGrant() : group(ACL_GROUP_NONE) {}
virtual ~ACLGrant() {}
/* there's an assumption here that email/uri/id encodings are
different and there can't be any overlap */
bool get_id(rgw_user& _id) const {
switch(type.get_type()) {
case ACL_TYPE_EMAIL_USER:
_id = email; // implies from_str() that parses the 't:u' syntax
return true;
case ACL_TYPE_GROUP:
case ACL_TYPE_REFERER:
return false;
default:
_id = id;
return true;
}
}
const rgw_user* get_id() const {
switch(type.get_type()) {
case ACL_TYPE_EMAIL_USER:
email_id.from_str(email);
return &email_id;
case ACL_TYPE_GROUP:
case ACL_TYPE_REFERER:
return nullptr;
default:
return &id;
}
}
ACLGranteeType& get_type() { return type; }
const ACLGranteeType& get_type() const { return type; }
ACLPermission& get_permission() { return permission; }
const ACLPermission& get_permission() const { return permission; }
ACLGroupTypeEnum get_group() const { return group; }
const std::string& get_referer() const { return url_spec; }
void encode(bufferlist& bl) const {
ENCODE_START(5, 3, bl);
encode(type, bl);
std::string s;
id.to_str(s);
encode(s, bl);
std::string uri;
encode(uri, bl);
encode(email, bl);
encode(permission, bl);
encode(name, bl);
__u32 g = (__u32)group;
encode(g, bl);
encode(url_spec, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(5, 3, 3, bl);
decode(type, bl);
std::string s;
decode(s, bl);
id.from_str(s);
std::string uri;
decode(uri, bl);
decode(email, bl);
decode(permission, bl);
decode(name, bl);
if (struct_v > 1) {
__u32 g;
decode(g, bl);
group = (ACLGroupTypeEnum)g;
} else {
group = uri_to_group(uri);
}
if (struct_v >= 5) {
decode(url_spec, bl);
} else {
url_spec.clear();
}
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
static void generate_test_instances(std::list<ACLGrant*>& o);
ACLGroupTypeEnum uri_to_group(std::string& uri);
void set_canon(const rgw_user& _id, const std::string& _name, const uint32_t perm) {
type.set(ACL_TYPE_CANON_USER);
id = _id;
name = _name;
permission.set_permissions(perm);
}
void set_group(ACLGroupTypeEnum _group, const uint32_t perm) {
type.set(ACL_TYPE_GROUP);
group = _group;
permission.set_permissions(perm);
}
void set_referer(const std::string& _url_spec, const uint32_t perm) {
type.set(ACL_TYPE_REFERER);
url_spec = _url_spec;
permission.set_permissions(perm);
}
friend bool operator==(const ACLGrant& lhs, const ACLGrant& rhs);
friend bool operator!=(const ACLGrant& lhs, const ACLGrant& rhs);
};
WRITE_CLASS_ENCODER(ACLGrant)
struct ACLReferer {
std::string url_spec;
uint32_t perm;
ACLReferer() : perm(0) {}
ACLReferer(const std::string& url_spec,
const uint32_t perm)
: url_spec(url_spec),
perm(perm) {
}
bool is_match(std::string_view http_referer) const {
const auto http_host = get_http_host(http_referer);
if (!http_host || http_host->length() < url_spec.length()) {
return false;
}
if ("*" == url_spec) {
return true;
}
if (http_host->compare(url_spec) == 0) {
return true;
}
if ('.' == url_spec[0]) {
/* Wildcard support: a referer matches the spec when its last char are
* perfectly equal to spec. */
return boost::algorithm::ends_with(http_host.value(), url_spec);
}
return false;
}
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(url_spec, bl);
encode(perm, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl);
decode(url_spec, bl);
decode(perm, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
friend bool operator==(const ACLReferer& lhs, const ACLReferer& rhs);
friend bool operator!=(const ACLReferer& lhs, const ACLReferer& rhs);
private:
boost::optional<std::string_view> get_http_host(const std::string_view url) const {
size_t pos = url.find("://");
if (pos == std::string_view::npos || boost::algorithm::starts_with(url, "://") ||
boost::algorithm::ends_with(url, "://") || boost::algorithm::ends_with(url, "@")) {
return boost::none;
}
std::string_view url_sub = url.substr(pos + strlen("://"));
pos = url_sub.find('@');
if (pos != std::string_view::npos) {
url_sub = url_sub.substr(pos + 1);
}
pos = url_sub.find_first_of("/:");
if (pos == std::string_view::npos) {
/* no port or path exists */
return url_sub;
}
return url_sub.substr(0, pos);
}
};
WRITE_CLASS_ENCODER(ACLReferer)
namespace rgw {
namespace auth {
class Identity;
}
}
using ACLGrantMap = std::multimap<std::string, ACLGrant>;
class RGWAccessControlList
{
protected:
CephContext *cct;
/* FIXME: in the feature we should consider switching to uint32_t also
* in data structures. */
std::map<std::string, int> acl_user_map;
std::map<uint32_t, int> acl_group_map;
std::list<ACLReferer> referer_list;
ACLGrantMap grant_map;
void _add_grant(ACLGrant *grant);
public:
explicit RGWAccessControlList(CephContext *_cct) : cct(_cct) {}
RGWAccessControlList() : cct(NULL) {}
void set_ctx(CephContext *ctx) {
cct = ctx;
}
virtual ~RGWAccessControlList() {}
uint32_t get_perm(const DoutPrefixProvider* dpp,
const rgw::auth::Identity& auth_identity,
uint32_t perm_mask);
uint32_t get_group_perm(const DoutPrefixProvider *dpp, ACLGroupTypeEnum group, uint32_t perm_mask) const;
uint32_t get_referer_perm(const DoutPrefixProvider *dpp, uint32_t current_perm,
std::string http_referer,
uint32_t perm_mask);
void encode(bufferlist& bl) const {
ENCODE_START(4, 3, bl);
bool maps_initialized = true;
encode(maps_initialized, bl);
encode(acl_user_map, bl);
encode(grant_map, bl);
encode(acl_group_map, bl);
encode(referer_list, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(4, 3, 3, bl);
bool maps_initialized;
decode(maps_initialized, bl);
decode(acl_user_map, bl);
decode(grant_map, bl);
if (struct_v >= 2) {
decode(acl_group_map, bl);
} else if (!maps_initialized) {
ACLGrantMap::iterator iter;
for (iter = grant_map.begin(); iter != grant_map.end(); ++iter) {
ACLGrant& grant = iter->second;
_add_grant(&grant);
}
}
if (struct_v >= 4) {
decode(referer_list, bl);
}
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
static void generate_test_instances(std::list<RGWAccessControlList*>& o);
void add_grant(ACLGrant *grant);
void remove_canon_user_grant(rgw_user& user_id);
ACLGrantMap& get_grant_map() { return grant_map; }
const ACLGrantMap& get_grant_map() const { return grant_map; }
void create_default(const rgw_user& id, std::string name) {
acl_user_map.clear();
acl_group_map.clear();
referer_list.clear();
ACLGrant grant;
grant.set_canon(id, name, RGW_PERM_FULL_CONTROL);
add_grant(&grant);
}
friend bool operator==(const RGWAccessControlList& lhs, const RGWAccessControlList& rhs);
friend bool operator!=(const RGWAccessControlList& lhs, const RGWAccessControlList& rhs);
};
WRITE_CLASS_ENCODER(RGWAccessControlList)
class ACLOwner
{
protected:
rgw_user id;
std::string display_name;
public:
ACLOwner() {}
ACLOwner(const rgw_user& _id) : id(_id) {}
~ACLOwner() {}
void encode(bufferlist& bl) const {
ENCODE_START(3, 2, bl);
std::string s;
id.to_str(s);
encode(s, bl);
encode(display_name, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(3, 2, 2, bl);
std::string s;
decode(s, bl);
id.from_str(s);
decode(display_name, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
static void generate_test_instances(std::list<ACLOwner*>& o);
void set_id(const rgw_user& _id) { id = _id; }
void set_name(const std::string& name) { display_name = name; }
rgw_user& get_id() { return id; }
const rgw_user& get_id() const { return id; }
std::string& get_display_name() { return display_name; }
const std::string& get_display_name() const { return display_name; }
friend bool operator==(const ACLOwner& lhs, const ACLOwner& rhs);
friend bool operator!=(const ACLOwner& lhs, const ACLOwner& rhs);
};
WRITE_CLASS_ENCODER(ACLOwner)
class RGWAccessControlPolicy
{
protected:
CephContext *cct;
RGWAccessControlList acl;
ACLOwner owner;
public:
explicit RGWAccessControlPolicy(CephContext *_cct) : cct(_cct), acl(_cct) {}
RGWAccessControlPolicy() : cct(NULL), acl(NULL) {}
virtual ~RGWAccessControlPolicy() {}
void set_ctx(CephContext *ctx) {
cct = ctx;
acl.set_ctx(ctx);
}
uint32_t get_perm(const DoutPrefixProvider* dpp,
const rgw::auth::Identity& auth_identity,
uint32_t perm_mask,
const char * http_referer,
bool ignore_public_acls=false);
bool verify_permission(const DoutPrefixProvider* dpp,
const rgw::auth::Identity& auth_identity,
uint32_t user_perm_mask,
uint32_t perm,
const char * http_referer = nullptr,
bool ignore_public_acls=false);
void encode(bufferlist& bl) const {
ENCODE_START(2, 2, bl);
encode(owner, bl);
encode(acl, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
decode(owner, bl);
decode(acl, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
static void generate_test_instances(std::list<RGWAccessControlPolicy*>& o);
void decode_owner(bufferlist::const_iterator& bl) { // sometimes we only need that, should be faster
DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
decode(owner, bl);
DECODE_FINISH(bl);
}
void set_owner(ACLOwner& o) { owner = o; }
ACLOwner& get_owner() {
return owner;
}
void create_default(const rgw_user& id, std::string& name) {
acl.create_default(id, name);
owner.set_id(id);
owner.set_name(name);
}
RGWAccessControlList& get_acl() {
return acl;
}
const RGWAccessControlList& get_acl() const {
return acl;
}
virtual bool compare_group_name(std::string& id, ACLGroupTypeEnum group) { return false; }
bool is_public(const DoutPrefixProvider *dpp) const;
friend bool operator==(const RGWAccessControlPolicy& lhs, const RGWAccessControlPolicy& rhs);
friend bool operator!=(const RGWAccessControlPolicy& lhs, const RGWAccessControlPolicy& rhs);
};
WRITE_CLASS_ENCODER(RGWAccessControlPolicy)
| 11,762 | 27.344578 | 107 | h |
null | ceph-main/src/rgw/rgw_acl_s3.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <map>
#include <string>
#include <iosfwd>
#include <include/types.h>
#include "include/str_list.h"
#include "rgw_xml.h"
#include "rgw_acl.h"
#include "rgw_sal_fwd.h"
class RGWUserCtl;
class ACLPermission_S3 : public ACLPermission, public XMLObj
{
public:
ACLPermission_S3() {}
virtual ~ACLPermission_S3() override {}
bool xml_end(const char *el) override;
void to_xml(std::ostream& out);
};
class ACLGrantee_S3 : public ACLGrantee, public XMLObj
{
public:
ACLGrantee_S3() {}
virtual ~ACLGrantee_S3() override {}
bool xml_start(const char *el, const char **attr);
};
class ACLGrant_S3 : public ACLGrant, public XMLObj
{
public:
ACLGrant_S3() {}
virtual ~ACLGrant_S3() override {}
void to_xml(CephContext *cct, std::ostream& out);
bool xml_end(const char *el) override;
bool xml_start(const char *el, const char **attr);
static ACLGroupTypeEnum uri_to_group(std::string& uri);
static bool group_to_uri(ACLGroupTypeEnum group, std::string& uri);
};
class RGWAccessControlList_S3 : public RGWAccessControlList, public XMLObj
{
public:
explicit RGWAccessControlList_S3(CephContext *_cct) : RGWAccessControlList(_cct) {}
virtual ~RGWAccessControlList_S3() override {}
bool xml_end(const char *el) override;
void to_xml(std::ostream& out);
int create_canned(ACLOwner& owner, ACLOwner& bucket_owner, const std::string& canned_acl);
int create_from_grants(std::list<ACLGrant>& grants);
};
class ACLOwner_S3 : public ACLOwner, public XMLObj
{
public:
ACLOwner_S3() {}
virtual ~ACLOwner_S3() override {}
bool xml_end(const char *el) override;
void to_xml(std::ostream& out);
};
class RGWEnv;
class RGWAccessControlPolicy_S3 : public RGWAccessControlPolicy, public XMLObj
{
public:
explicit RGWAccessControlPolicy_S3(CephContext *_cct) : RGWAccessControlPolicy(_cct) {}
virtual ~RGWAccessControlPolicy_S3() override {}
bool xml_end(const char *el) override;
void to_xml(std::ostream& out);
int rebuild(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, ACLOwner *owner,
RGWAccessControlPolicy& dest, std::string &err_msg);
bool compare_group_name(std::string& id, ACLGroupTypeEnum group) override;
virtual int create_canned(ACLOwner& _owner, ACLOwner& bucket_owner, const std::string& canned_acl) {
RGWAccessControlList_S3& _acl = static_cast<RGWAccessControlList_S3 &>(acl);
if (_owner.get_id() == rgw_user("anonymous")) {
owner = bucket_owner;
} else {
owner = _owner;
}
int ret = _acl.create_canned(owner, bucket_owner, canned_acl);
return ret;
}
int create_from_headers(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
const RGWEnv *env, ACLOwner& _owner);
};
/**
* Interfaces with the webserver's XML handling code
* to parse it in a way that makes sense for the rgw.
*/
class RGWACLXMLParser_S3 : public RGWXMLParser
{
CephContext *cct;
XMLObj *alloc_obj(const char *el) override;
public:
explicit RGWACLXMLParser_S3(CephContext *_cct) : cct(_cct) {}
};
| 3,136 | 26.043103 | 102 | h |
null | ceph-main/src/rgw/rgw_acl_swift.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <map>
#include <vector>
#include <string>
#include <include/types.h>
#include <boost/optional.hpp>
#include "rgw_acl.h"
class RGWUserCtl;
class RGWAccessControlPolicy_SWIFT : public RGWAccessControlPolicy
{
int add_grants(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
const std::vector<std::string>& uids,
uint32_t perm);
public:
explicit RGWAccessControlPolicy_SWIFT(CephContext* const cct)
: RGWAccessControlPolicy(cct) {
}
~RGWAccessControlPolicy_SWIFT() override = default;
int create(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
const rgw_user& id,
const std::string& name,
const char* read_list,
const char* write_list,
uint32_t& rw_mask);
void filter_merge(uint32_t mask, RGWAccessControlPolicy_SWIFT *policy);
void to_str(std::string& read, std::string& write);
};
class RGWAccessControlPolicy_SWIFTAcct : public RGWAccessControlPolicy
{
public:
explicit RGWAccessControlPolicy_SWIFTAcct(CephContext * const cct)
: RGWAccessControlPolicy(cct) {
}
~RGWAccessControlPolicy_SWIFTAcct() override {}
void add_grants(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
const std::vector<std::string>& uids,
uint32_t perm);
bool create(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
const rgw_user& id,
const std::string& name,
const std::string& acl_str);
boost::optional<std::string> to_str() const;
};
| 1,709 | 27.983051 | 73 | h |
null | ceph-main/src/rgw/rgw_acl_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* N.B., this header defines fundamental serialized types. Do not
* introduce changes or include files which can only be compiled in
* radosgw or OSD contexts (e.g., rgw_sal.h, rgw_common.h)
*/
#pragma once
#include <string>
#include <list>
#include <fmt/format.h>
#include "include/types.h"
#include "common/Formatter.h"
#define RGW_PERM_NONE 0x00
#define RGW_PERM_READ 0x01
#define RGW_PERM_WRITE 0x02
#define RGW_PERM_READ_ACP 0x04
#define RGW_PERM_WRITE_ACP 0x08
#define RGW_PERM_READ_OBJS 0x10
#define RGW_PERM_WRITE_OBJS 0x20
#define RGW_PERM_FULL_CONTROL ( RGW_PERM_READ | RGW_PERM_WRITE | \
RGW_PERM_READ_ACP | RGW_PERM_WRITE_ACP )
#define RGW_PERM_ALL_S3 RGW_PERM_FULL_CONTROL
#define RGW_PERM_INVALID 0xFF00
static constexpr char RGW_REFERER_WILDCARD[] = "*";
struct RGWAccessKey {
std::string id; // AccessKey
std::string key; // SecretKey
std::string subuser;
RGWAccessKey() {}
RGWAccessKey(std::string _id, std::string _key)
: id(std::move(_id)), key(std::move(_key)) {}
void encode(bufferlist& bl) const {
ENCODE_START(2, 2, bl);
encode(id, bl);
encode(key, bl);
encode(subuser, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN_32(2, 2, 2, bl);
decode(id, bl);
decode(key, bl);
decode(subuser, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
void dump_plain(Formatter *f) const;
void dump(Formatter *f, const std::string& user, bool swift) const;
static void generate_test_instances(std::list<RGWAccessKey*>& o);
void decode_json(JSONObj *obj);
void decode_json(JSONObj *obj, bool swift);
};
WRITE_CLASS_ENCODER(RGWAccessKey)
struct RGWSubUser {
std::string name;
uint32_t perm_mask;
RGWSubUser() : perm_mask(0) {}
void encode(bufferlist& bl) const {
ENCODE_START(2, 2, bl);
encode(name, bl);
encode(perm_mask, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN_32(2, 2, 2, bl);
decode(name, bl);
decode(perm_mask, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
void dump(Formatter *f, const std::string& user) const;
static void generate_test_instances(std::list<RGWSubUser*>& o);
void decode_json(JSONObj *obj);
};
WRITE_CLASS_ENCODER(RGWSubUser)
class RGWUserCaps
{
std::map<std::string, uint32_t> caps;
int get_cap(const std::string& cap, std::string& type, uint32_t *perm);
int add_cap(const std::string& cap);
int remove_cap(const std::string& cap);
public:
static int parse_cap_perm(const std::string& str, uint32_t *perm);
int add_from_string(const std::string& str);
int remove_from_string(const std::string& str);
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(caps, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(caps, bl);
DECODE_FINISH(bl);
}
int check_cap(const std::string& cap, uint32_t perm) const;
bool is_valid_cap_type(const std::string& tp);
void dump(Formatter *f) const;
void dump(Formatter *f, const char *name) const;
void decode_json(JSONObj *obj);
};
WRITE_CLASS_ENCODER(RGWUserCaps)
enum ACLGranteeTypeEnum {
/* numbers are encoded, should not change */
ACL_TYPE_CANON_USER = 0,
ACL_TYPE_EMAIL_USER = 1,
ACL_TYPE_GROUP = 2,
ACL_TYPE_UNKNOWN = 3,
ACL_TYPE_REFERER = 4,
};
enum ACLGroupTypeEnum {
/* numbers are encoded should not change */
ACL_GROUP_NONE = 0,
ACL_GROUP_ALL_USERS = 1,
ACL_GROUP_AUTHENTICATED_USERS = 2,
};
class ACLPermission
{
protected:
int flags;
public:
ACLPermission() : flags(0) {}
~ACLPermission() {}
uint32_t get_permissions() const { return flags; }
void set_permissions(uint32_t perm) { flags = perm; }
void encode(bufferlist& bl) const {
ENCODE_START(2, 2, bl);
encode(flags, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
decode(flags, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
static void generate_test_instances(std::list<ACLPermission*>& o);
friend bool operator==(const ACLPermission& lhs, const ACLPermission& rhs);
friend bool operator!=(const ACLPermission& lhs, const ACLPermission& rhs);
};
WRITE_CLASS_ENCODER(ACLPermission)
class ACLGranteeType
{
protected:
__u32 type;
public:
ACLGranteeType() : type(ACL_TYPE_UNKNOWN) {}
virtual ~ACLGranteeType() {}
// virtual const char *to_string() = 0;
ACLGranteeTypeEnum get_type() const { return (ACLGranteeTypeEnum)type; }
void set(ACLGranteeTypeEnum t) { type = t; }
// virtual void set(const char *s) = 0;
void encode(bufferlist& bl) const {
ENCODE_START(2, 2, bl);
encode(type, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
decode(type, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
static void generate_test_instances(std::list<ACLGranteeType*>& o);
friend bool operator==(const ACLGranteeType& lhs, const ACLGranteeType& rhs);
friend bool operator!=(const ACLGranteeType& lhs, const ACLGranteeType& rhs);
};
WRITE_CLASS_ENCODER(ACLGranteeType)
class ACLGrantee
{
public:
ACLGrantee() {}
~ACLGrantee() {}
};
| 5,909 | 26.616822 | 79 | h |
null | ceph-main/src/rgw/rgw_aio.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <cstdint>
#include <memory>
#include <type_traits>
#include <boost/intrusive/list.hpp>
#include "include/rados/librados_fwd.hpp"
#include "common/async/yield_context.h"
#include "rgw_common.h"
#include "include/function2.hpp"
struct D3nGetObjData;
namespace rgw {
struct AioResult {
rgw_raw_obj obj;
uint64_t id = 0; // id allows caller to associate a result with its request
bufferlist data; // result buffer for reads
int result = 0;
std::aligned_storage_t<3 * sizeof(void*)> user_data;
AioResult() = default;
AioResult(const AioResult&) = delete;
AioResult& operator =(const AioResult&) = delete;
AioResult(AioResult&&) = delete;
AioResult& operator =(AioResult&&) = delete;
};
struct AioResultEntry : AioResult, boost::intrusive::list_base_hook<> {
virtual ~AioResultEntry() {}
};
// a list of polymorphic entries that frees them on destruction
template <typename T, typename ...Args>
struct OwningList : boost::intrusive::list<T, Args...> {
OwningList() = default;
~OwningList() { this->clear_and_dispose(std::default_delete<T>{}); }
OwningList(OwningList&&) = default;
OwningList& operator=(OwningList&&) = default;
OwningList(const OwningList&) = delete;
OwningList& operator=(const OwningList&) = delete;
};
using AioResultList = OwningList<AioResultEntry>;
// returns the first error code or 0 if all succeeded
inline int check_for_errors(const AioResultList& results) {
for (auto& e : results) {
if (e.result < 0) {
return e.result;
}
}
return 0;
}
// interface to submit async librados operations and wait on their completions.
// each call returns a list of results from prior completions
class Aio {
public:
using OpFunc = fu2::unique_function<void(Aio*, AioResult&) &&>;
virtual ~Aio() {}
virtual AioResultList get(rgw_raw_obj obj,
OpFunc&& f,
uint64_t cost, uint64_t id) = 0;
virtual void put(AioResult& r) = 0;
// poll for any ready completions without waiting
virtual AioResultList poll() = 0;
// return any ready completions. if there are none, wait for the next
virtual AioResultList wait() = 0;
// wait for all outstanding completions and return their results
virtual AioResultList drain() = 0;
static OpFunc librados_op(librados::IoCtx ctx,
librados::ObjectReadOperation&& op,
optional_yield y);
static OpFunc librados_op(librados::IoCtx ctx,
librados::ObjectWriteOperation&& op,
optional_yield y);
static OpFunc d3n_cache_op(const DoutPrefixProvider *dpp, optional_yield y,
off_t read_ofs, off_t read_len, std::string& location);
};
} // namespace rgw
| 3,161 | 29.114286 | 84 | h |
null | ceph-main/src/rgw/rgw_aio_throttle.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <memory>
#include "common/ceph_mutex.h"
#include "common/async/completion.h"
#include "common/async/yield_context.h"
#include "rgw_aio.h"
namespace rgw {
class Throttle {
protected:
const uint64_t window;
uint64_t pending_size = 0;
AioResultList pending;
AioResultList completed;
bool is_available() const { return pending_size <= window; }
bool has_completion() const { return !completed.empty(); }
bool is_drained() const { return pending.empty(); }
enum class Wait { None, Available, Completion, Drained };
Wait waiter = Wait::None;
bool waiter_ready() const;
public:
Throttle(uint64_t window) : window(window) {}
virtual ~Throttle() {
// must drain before destructing
ceph_assert(pending.empty());
ceph_assert(completed.empty());
}
};
// a throttle for aio operations. all public functions must be called from
// the same thread
class BlockingAioThrottle final : public Aio, private Throttle {
ceph::mutex mutex = ceph::make_mutex("AioThrottle");
ceph::condition_variable cond;
struct Pending : AioResultEntry {
BlockingAioThrottle *parent = nullptr;
uint64_t cost = 0;
};
public:
BlockingAioThrottle(uint64_t window) : Throttle(window) {}
virtual ~BlockingAioThrottle() override {};
AioResultList get(rgw_raw_obj obj, OpFunc&& f,
uint64_t cost, uint64_t id) override final;
void put(AioResult& r) override final;
AioResultList poll() override final;
AioResultList wait() override final;
AioResultList drain() override final;
};
// a throttle that yields the coroutine instead of blocking. all public
// functions must be called within the coroutine strand
class YieldingAioThrottle final : public Aio, private Throttle {
boost::asio::io_context& context;
yield_context yield;
struct Handler;
// completion callback associated with the waiter
using Completion = ceph::async::Completion<void(boost::system::error_code)>;
std::unique_ptr<Completion> completion;
template <typename CompletionToken>
auto async_wait(CompletionToken&& token);
struct Pending : AioResultEntry { uint64_t cost = 0; };
public:
YieldingAioThrottle(uint64_t window, boost::asio::io_context& context,
yield_context yield)
: Throttle(window), context(context), yield(yield)
{}
virtual ~YieldingAioThrottle() override {};
AioResultList get(rgw_raw_obj obj, OpFunc&& f,
uint64_t cost, uint64_t id) override final;
void put(AioResult& r) override final;
AioResultList poll() override final;
AioResultList wait() override final;
AioResultList drain() override final;
};
// return a smart pointer to Aio
inline auto make_throttle(uint64_t window_size, optional_yield y)
{
std::unique_ptr<Aio> aio;
if (y) {
aio = std::make_unique<YieldingAioThrottle>(window_size,
y.get_io_context(),
y.get_yield_context());
} else {
aio = std::make_unique<BlockingAioThrottle>(window_size);
}
return aio;
}
} // namespace rgw
| 3,530 | 25.954198 | 78 | h |
null | ceph-main/src/rgw/rgw_amqp.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <string>
#include <functional>
#include <boost/optional.hpp>
#include "include/common_fwd.h"
struct amqp_connection_info;
namespace rgw::amqp {
// the reply callback is expected to get an integer parameter
// indicating the result, and not to return anything
typedef std::function<void(int)> reply_callback_t;
// initialize the amqp manager
bool init(CephContext* cct);
// shutdown the amqp manager
void shutdown();
// key class for the connection list
struct connection_id_t {
std::string host;
int port;
std::string vhost;
std::string exchange;
bool ssl;
connection_id_t() = default;
connection_id_t(const amqp_connection_info& info, const std::string& _exchange);
};
std::string to_string(const connection_id_t& id);
// connect to an amqp endpoint
bool connect(connection_id_t& conn_id, const std::string& url, const std::string& exchange, bool mandatory_delivery, bool verify_ssl,
boost::optional<const std::string&> ca_location);
// publish a message over a connection that was already created
int publish(const connection_id_t& conn_id,
const std::string& topic,
const std::string& message);
// publish a message over a connection that was already created
// and pass a callback that will be invoked (async) when broker confirms
// receiving the message
int publish_with_confirm(const connection_id_t& conn_id,
const std::string& topic,
const std::string& message,
reply_callback_t cb);
// convert the integer status returned from the "publish" function to a string
std::string status_to_string(int s);
// number of connections
size_t get_connection_count();
// return the number of messages that were sent
// to broker, but were not yet acked/nacked/timedout
size_t get_inflight();
// running counter of successfully queued messages
size_t get_queued();
// running counter of dequeued messages
size_t get_dequeued();
// number of maximum allowed connections
size_t get_max_connections();
// number of maximum allowed inflight messages
size_t get_max_inflight();
// maximum number of messages in the queue
size_t get_max_queue();
}
| 2,234 | 25.927711 | 133 | h |
null | ceph-main/src/rgw/rgw_arn.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <string>
#include <boost/optional.hpp>
class rgw_obj;
class rgw_bucket;
namespace rgw {
enum struct Partition {
aws, aws_cn, aws_us_gov, wildcard
// If we wanted our own ARNs for principal type unique to us
// (maybe to integrate better with Swift) or for anything else we
// provide that doesn't map onto S3, we could add an 'rgw'
// partition type.
};
enum struct Service {
apigateway, appstream, artifact, autoscaling, aws_portal, acm,
cloudformation, cloudfront, cloudhsm, cloudsearch, cloudtrail,
cloudwatch, events, logs, codebuild, codecommit, codedeploy,
codepipeline, cognito_idp, cognito_identity, cognito_sync,
config, datapipeline, dms, devicefarm, directconnect,
ds, dynamodb, ec2, ecr, ecs, ssm, elasticbeanstalk, elasticfilesystem,
elasticloadbalancing, elasticmapreduce, elastictranscoder, elasticache,
es, gamelift, glacier, health, iam, importexport, inspector, iot,
kms, kinesisanalytics, firehose, kinesis, lambda, lightsail,
machinelearning, aws_marketplace, aws_marketplace_management,
mobileanalytics, mobilehub, opsworks, opsworks_cm, polly,
redshift, rds, route53, route53domains, sts, servicecatalog,
ses, sns, sqs, s3, swf, sdb, states, storagegateway, support,
trustedadvisor, waf, workmail, workspaces, wildcard
};
/* valid format:
* 'arn:partition:service:region:account-id:resource'
* The 'resource' part can be further broken down via ARNResource
*/
struct ARN {
Partition partition;
Service service;
std::string region;
// Once we refit tenant, we should probably use that instead of a
// string.
std::string account;
std::string resource;
ARN()
: partition(Partition::wildcard), service(Service::wildcard) {}
ARN(Partition partition, Service service, std::string region,
std::string account, std::string resource)
: partition(partition), service(service), region(std::move(region)),
account(std::move(account)), resource(std::move(resource)) {}
ARN(const rgw_obj& o);
ARN(const rgw_bucket& b);
ARN(const rgw_bucket& b, const std::string& o);
ARN(const std::string& resource_name, const std::string& type, const std::string& tenant, bool has_path=false);
static boost::optional<ARN> parse(const std::string& s,
bool wildcard = false);
std::string to_string() const;
// `this` is the pattern
bool match(const ARN& candidate) const;
};
inline std::string to_string(const ARN& a) {
return a.to_string();
}
inline std::ostream& operator <<(std::ostream& m, const ARN& a) {
return m << to_string(a);
}
bool operator ==(const ARN& l, const ARN& r);
bool operator <(const ARN& l, const ARN& r);
/* valid formats (only resource part):
* 'resource'
* 'resourcetype/resource'
* 'resourcetype/resource/qualifier'
* 'resourcetype/resource:qualifier'
* 'resourcetype:resource'
* 'resourcetype:resource:qualifier'
* Note that 'resourceType' cannot be wildcard
*/
struct ARNResource {
constexpr static const char* const wildcard = "*";
std::string resource_type;
std::string resource;
std::string qualifier;
ARNResource() : resource_type(""), resource(wildcard), qualifier("") {}
ARNResource(const std::string& _resource_type, const std::string& _resource, const std::string& _qualifier) :
resource_type(std::move(_resource_type)), resource(std::move(_resource)), qualifier(std::move(_qualifier)) {}
static boost::optional<ARNResource> parse(const std::string& s);
std::string to_string() const;
};
inline std::string to_string(const ARNResource& r) {
return r.to_string();
}
} // namespace rgw
namespace std {
template<>
struct hash<::rgw::Service> {
size_t operator()(const ::rgw::Service& s) const noexcept {
// Invoke a default-constructed hash object for int.
return hash<int>()(static_cast<int>(s));
}
};
} // namespace std
| 3,948 | 31.368852 | 113 | h |
null | ceph-main/src/rgw/rgw_asio_client.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <boost/asio/ip/tcp.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/http.hpp>
#include "include/ceph_assert.h"
#include "rgw_client_io.h"
namespace rgw {
namespace asio {
namespace beast = boost::beast;
using parser_type = beast::http::request_parser<beast::http::buffer_body>;
class ClientIO : public io::RestfulClient,
public io::BuffererSink {
protected:
parser_type& parser;
private:
const bool is_ssl;
using endpoint_type = boost::asio::ip::tcp::endpoint;
endpoint_type local_endpoint;
endpoint_type remote_endpoint;
RGWEnv env;
rgw::io::StaticOutputBufferer<> txbuf;
bool sent100continue = false;
public:
ClientIO(parser_type& parser, bool is_ssl,
const endpoint_type& local_endpoint,
const endpoint_type& remote_endpoint);
~ClientIO() override;
int init_env(CephContext *cct) override;
size_t complete_request() override;
void flush() override;
size_t send_status(int status, const char *status_name) override;
size_t send_100_continue() override;
size_t send_header(const std::string_view& name,
const std::string_view& value) override;
size_t send_content_length(uint64_t len) override;
size_t complete_header() override;
size_t send_body(const char* buf, size_t len) override {
return write_data(buf, len);
}
RGWEnv& get_env() noexcept override {
return env;
}
bool sent_100_continue() const { return sent100continue; }
};
} // namespace asio
} // namespace rgw
| 1,640 | 25.047619 | 74 | h |
null | ceph-main/src/rgw/rgw_asio_frontend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <memory>
#include "rgw_frontend.h"
#define REQUEST_TIMEOUT 65000
class RGWAsioFrontend : public RGWFrontend {
class Impl;
std::unique_ptr<Impl> impl;
public:
RGWAsioFrontend(RGWProcessEnv& env, RGWFrontendConfig* conf,
rgw::dmclock::SchedulerCtx& sched_ctx);
~RGWAsioFrontend() override;
int init() override;
int run() override;
void stop() override;
void join() override;
void pause_for_new_config() override;
void unpause_with_new_config() override;
};
| 611 | 22.538462 | 70 | h |
null | ceph-main/src/rgw/rgw_asio_frontend_timer.h | #pragma once
#include <boost/asio/basic_waitable_timer.hpp>
#include <boost/intrusive_ptr.hpp>
#include "common/ceph_time.h"
namespace rgw {
// a WaitHandler that closes a stream if the timeout expires
template <typename Stream>
struct timeout_handler {
// this handler may outlive the timer/stream, so we need to hold a reference
// to keep the stream alive
boost::intrusive_ptr<Stream> stream;
explicit timeout_handler(boost::intrusive_ptr<Stream> stream) noexcept
: stream(std::move(stream)) {}
void operator()(boost::system::error_code ec) {
if (!ec) { // wait was not canceled
boost::system::error_code ec_ignored;
stream->get_socket().cancel();
stream->get_socket().shutdown(boost::asio::ip::tcp::socket::shutdown_both, ec_ignored);
}
}
};
// a timeout timer for stream operations
template <typename Clock, typename Executor, typename Stream>
class basic_timeout_timer {
public:
using clock_type = Clock;
using duration = typename clock_type::duration;
using executor_type = Executor;
explicit basic_timeout_timer(const executor_type& ex, duration dur,
boost::intrusive_ptr<Stream> stream)
: timer(ex), dur(dur), stream(std::move(stream))
{}
basic_timeout_timer(const basic_timeout_timer&) = delete;
basic_timeout_timer& operator=(const basic_timeout_timer&) = delete;
void start() {
if (dur.count() > 0) {
timer.expires_after(dur);
timer.async_wait(timeout_handler{stream});
}
}
void cancel() {
if (dur.count() > 0) {
timer.cancel();
}
}
private:
using Timer = boost::asio::basic_waitable_timer<clock_type,
boost::asio::wait_traits<clock_type>, executor_type>;
Timer timer;
duration dur;
boost::intrusive_ptr<Stream> stream;
};
} // namespace rgw
| 1,822 | 26.208955 | 93 | h |
null | ceph-main/src/rgw/rgw_auth_filters.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <type_traits>
#include <boost/logic/tribool.hpp>
#include <boost/optional.hpp>
#include "rgw_service.h"
#include "rgw_common.h"
#include "rgw_auth.h"
#include "rgw_user.h"
namespace rgw {
namespace auth {
/* Abstract decorator over any implementation of rgw::auth::IdentityApplier
* which could be provided both as a pointer-to-object or the object itself. */
template <typename DecorateeT>
class DecoratedApplier : public rgw::auth::IdentityApplier {
typedef typename std::remove_pointer<DecorateeT>::type DerefedDecorateeT;
static_assert(std::is_base_of<rgw::auth::IdentityApplier,
DerefedDecorateeT>::value,
"DecorateeT must be a subclass of rgw::auth::IdentityApplier");
DecorateeT decoratee;
/* There is an indirection layer over accessing decoratee to share the same
* code base between dynamic and static decorators. The difference is about
* what we store internally: pointer to a decorated object versus the whole
* object itself. Googling for "SFINAE" can help to understand the code. */
template <typename T = void,
typename std::enable_if<
std::is_pointer<DecorateeT>::value, T>::type* = nullptr>
DerefedDecorateeT& get_decoratee() {
return *decoratee;
}
template <typename T = void,
typename std::enable_if<
! std::is_pointer<DecorateeT>::value, T>::type* = nullptr>
DerefedDecorateeT& get_decoratee() {
return decoratee;
}
template <typename T = void,
typename std::enable_if<
std::is_pointer<DecorateeT>::value, T>::type* = nullptr>
const DerefedDecorateeT& get_decoratee() const {
return *decoratee;
}
template <typename T = void,
typename std::enable_if<
! std::is_pointer<DecorateeT>::value, T>::type* = nullptr>
const DerefedDecorateeT& get_decoratee() const {
return decoratee;
}
public:
explicit DecoratedApplier(DecorateeT&& decoratee)
: decoratee(std::forward<DecorateeT>(decoratee)) {
}
uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override {
return get_decoratee().get_perms_from_aclspec(dpp, aclspec);
}
bool is_admin_of(const rgw_user& uid) const override {
return get_decoratee().is_admin_of(uid);
}
bool is_owner_of(const rgw_user& uid) const override {
return get_decoratee().is_owner_of(uid);
}
bool is_anonymous() const override {
return get_decoratee().is_anonymous();
}
uint32_t get_perm_mask() const override {
return get_decoratee().get_perm_mask();
}
uint32_t get_identity_type() const override {
return get_decoratee().get_identity_type();
}
std::string get_acct_name() const override {
return get_decoratee().get_acct_name();
}
std::string get_subuser() const override {
return get_decoratee().get_subuser();
}
bool is_identity(
const boost::container::flat_set<Principal>& ids) const override {
return get_decoratee().is_identity(ids);
}
void to_str(std::ostream& out) const override {
get_decoratee().to_str(out);
}
std::string get_role_tenant() const override { /* in/out */
return get_decoratee().get_role_tenant();
}
void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override { /* out */
return get_decoratee().load_acct_info(dpp, user_info);
}
void modify_request_state(const DoutPrefixProvider* dpp, req_state * s) const override { /* in/out */
return get_decoratee().modify_request_state(dpp, s);
}
void write_ops_log_entry(rgw_log_entry& entry) const override {
return get_decoratee().write_ops_log_entry(entry);
}
};
template <typename T>
class ThirdPartyAccountApplier : public DecoratedApplier<T> {
rgw::sal::Driver* driver;
const rgw_user acct_user_override;
public:
/* A value representing situations where there is no requested account
* override. In other words, acct_user_override will be equal to this
* constant where the request isn't a cross-tenant one. */
static const rgw_user UNKNOWN_ACCT;
template <typename U>
ThirdPartyAccountApplier(rgw::sal::Driver* driver,
const rgw_user &acct_user_override,
U&& decoratee)
: DecoratedApplier<T>(std::move(decoratee)),
driver(driver),
acct_user_override(acct_user_override) {
}
void to_str(std::ostream& out) const override;
void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */
};
/* static declaration: UNKNOWN_ACCT will be an empty rgw_user that is a result
* of the default construction. */
template <typename T>
const rgw_user ThirdPartyAccountApplier<T>::UNKNOWN_ACCT;
template <typename T>
void ThirdPartyAccountApplier<T>::to_str(std::ostream& out) const
{
out << "rgw::auth::ThirdPartyAccountApplier(" + acct_user_override.to_str() + ")"
<< " -> ";
DecoratedApplier<T>::to_str(out);
}
template <typename T>
void ThirdPartyAccountApplier<T>::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const
{
if (UNKNOWN_ACCT == acct_user_override) {
/* There is no override specified by the upper layer. This means that we'll
* load the account owned by the authenticated identity (aka auth_user). */
DecoratedApplier<T>::load_acct_info(dpp, user_info);
} else if (DecoratedApplier<T>::is_owner_of(acct_user_override)) {
/* The override has been specified but the account belongs to the authenticated
* identity. We may safely forward the call to a next stage. */
DecoratedApplier<T>::load_acct_info(dpp, user_info);
} else if (this->is_anonymous()) {
/* If the user was authed by the anonymous engine then scope the ANON user
* to the correct tenant */
if (acct_user_override.tenant.empty())
user_info.user_id = rgw_user(acct_user_override.id, RGW_USER_ANON_ID);
else
user_info.user_id = rgw_user(acct_user_override.tenant, RGW_USER_ANON_ID);
} else {
/* Compatibility mechanism for multi-tenancy. For more details refer to
* load_acct_info method of rgw::auth::RemoteApplier. */
std::unique_ptr<rgw::sal::User> user;
if (acct_user_override.tenant.empty()) {
const rgw_user tenanted_uid(acct_user_override.id, acct_user_override.id);
user = driver->get_user(tenanted_uid);
if (user->load_user(dpp, null_yield) >= 0) {
user_info = user->get_info();
/* Succeeded. */
return;
}
}
user = driver->get_user(acct_user_override);
const int ret = user->load_user(dpp, null_yield);
if (ret < 0) {
/* We aren't trying to recover from ENOENT here. It's supposed that creating
* someone else's account isn't a thing we want to support in this filter. */
if (ret == -ENOENT) {
throw -EACCES;
} else {
throw ret;
}
}
user_info = user->get_info();
}
}
template <typename T> static inline
ThirdPartyAccountApplier<T> add_3rdparty(rgw::sal::Driver* driver,
const rgw_user &acct_user_override,
T&& t) {
return ThirdPartyAccountApplier<T>(driver, acct_user_override,
std::forward<T>(t));
}
template <typename T>
class SysReqApplier : public DecoratedApplier<T> {
CephContext* const cct;
rgw::sal::Driver* driver;
const RGWHTTPArgs& args;
mutable boost::tribool is_system;
public:
template <typename U>
SysReqApplier(CephContext* const cct,
rgw::sal::Driver* driver,
const req_state* const s,
U&& decoratee)
: DecoratedApplier<T>(std::forward<T>(decoratee)),
cct(cct),
driver(driver),
args(s->info.args),
is_system(boost::logic::indeterminate) {
}
void to_str(std::ostream& out) const override;
void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */
void modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const override; /* in/out */
};
template <typename T>
void SysReqApplier<T>::to_str(std::ostream& out) const
{
out << "rgw::auth::SysReqApplier" << " -> ";
DecoratedApplier<T>::to_str(out);
}
template <typename T>
void SysReqApplier<T>::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const
{
DecoratedApplier<T>::load_acct_info(dpp, user_info);
is_system = user_info.system;
if (is_system) {
//ldpp_dout(dpp, 20) << "system request" << dendl;
rgw_user effective_uid(args.sys_get(RGW_SYS_PARAM_PREFIX "uid"));
if (! effective_uid.empty()) {
/* We aren't writing directly to user_info for consistency and security
* reasons. rgw_get_user_info_by_uid doesn't trigger the operator=() but
* calls ::decode instead. */
std::unique_ptr<rgw::sal::User> user = driver->get_user(effective_uid);
if (user->load_user(dpp, null_yield) < 0) {
//ldpp_dout(dpp, 0) << "User lookup failed!" << dendl;
throw -EACCES;
}
user_info = user->get_info();
}
}
}
template <typename T>
void SysReqApplier<T>::modify_request_state(const DoutPrefixProvider* dpp, req_state* const s) const
{
if (boost::logic::indeterminate(is_system)) {
RGWUserInfo unused_info;
load_acct_info(dpp, unused_info);
}
if (is_system) {
s->info.args.set_system();
s->system_request = true;
}
DecoratedApplier<T>::modify_request_state(dpp, s);
}
template <typename T> static inline
SysReqApplier<T> add_sysreq(CephContext* const cct,
rgw::sal::Driver* driver,
const req_state* const s,
T&& t) {
return SysReqApplier<T>(cct, driver, s, std::forward<T>(t));
}
} /* namespace auth */
} /* namespace rgw */
| 9,946 | 31.828383 | 109 | h |
null | ceph-main/src/rgw/rgw_auth_keystone.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <string_view>
#include <utility>
#include <boost/optional.hpp>
#include "rgw_auth.h"
#include "rgw_rest_s3.h"
#include "rgw_common.h"
#include "rgw_keystone.h"
namespace rgw {
namespace auth {
namespace keystone {
/* Dedicated namespace for Keystone-related auth engines. We need it because
* Keystone offers three different authentication mechanisms (token, EC2 and
* regular user/pass). RadosGW actually does support the first two. */
class TokenEngine : public rgw::auth::Engine {
CephContext* const cct;
using acl_strategy_t = rgw::auth::RemoteApplier::acl_strategy_t;
using auth_info_t = rgw::auth::RemoteApplier::AuthInfo;
using result_t = rgw::auth::Engine::result_t;
using token_envelope_t = rgw::keystone::TokenEnvelope;
const rgw::auth::TokenExtractor* const auth_token_extractor;
const rgw::auth::TokenExtractor* const service_token_extractor;
const rgw::auth::RemoteApplier::Factory* const apl_factory;
rgw::keystone::Config& config;
rgw::keystone::TokenCache& token_cache;
/* Helper methods. */
bool is_applicable(const std::string& token) const noexcept;
boost::optional<token_envelope_t>
get_from_keystone(const DoutPrefixProvider* dpp, const std::string& token, bool allow_expired) const;
acl_strategy_t get_acl_strategy(const token_envelope_t& token) const;
auth_info_t get_creds_info(const token_envelope_t& token) const noexcept;
result_t authenticate(const DoutPrefixProvider* dpp,
const std::string& token,
const std::string& service_token,
const req_state* s) const;
public:
TokenEngine(CephContext* const cct,
const rgw::auth::TokenExtractor* const auth_token_extractor,
const rgw::auth::TokenExtractor* const service_token_extractor,
const rgw::auth::RemoteApplier::Factory* const apl_factory,
rgw::keystone::Config& config,
rgw::keystone::TokenCache& token_cache)
: cct(cct),
auth_token_extractor(auth_token_extractor),
service_token_extractor(service_token_extractor),
apl_factory(apl_factory),
config(config),
token_cache(token_cache) {
}
const char* get_name() const noexcept override {
return "rgw::auth::keystone::TokenEngine";
}
result_t authenticate(const DoutPrefixProvider* dpp, const req_state* const s,
optional_yield y) const override {
return authenticate(dpp, auth_token_extractor->get_token(s), service_token_extractor->get_token(s), s);
}
}; /* class TokenEngine */
class SecretCache {
using token_envelope_t = rgw::keystone::TokenEnvelope;
struct secret_entry {
token_envelope_t token;
std::string secret;
utime_t expires;
std::list<std::string>::iterator lru_iter;
};
const boost::intrusive_ptr<CephContext> cct;
std::map<std::string, secret_entry> secrets;
std::list<std::string> secrets_lru;
std::mutex lock;
const size_t max;
const utime_t s3_token_expiry_length;
SecretCache()
: cct(g_ceph_context),
lock(),
max(cct->_conf->rgw_keystone_token_cache_size),
s3_token_expiry_length(300, 0) {
}
~SecretCache() {}
public:
SecretCache(const SecretCache&) = delete;
void operator=(const SecretCache&) = delete;
static SecretCache& get_instance() {
/* In C++11 this is thread safe. */
static SecretCache instance;
return instance;
}
bool find(const std::string& token_id, token_envelope_t& token, std::string& secret);
boost::optional<boost::tuple<token_envelope_t, std::string>> find(const std::string& token_id) {
token_envelope_t token_envlp;
std::string secret;
if (find(token_id, token_envlp, secret)) {
return boost::make_tuple(token_envlp, secret);
}
return boost::none;
}
void add(const std::string& token_id, const token_envelope_t& token, const std::string& secret);
}; /* class SecretCache */
class EC2Engine : public rgw::auth::s3::AWSEngine {
using acl_strategy_t = rgw::auth::RemoteApplier::acl_strategy_t;
using auth_info_t = rgw::auth::RemoteApplier::AuthInfo;
using result_t = rgw::auth::Engine::result_t;
using token_envelope_t = rgw::keystone::TokenEnvelope;
const rgw::auth::RemoteApplier::Factory* const apl_factory;
rgw::keystone::Config& config;
rgw::keystone::TokenCache& token_cache;
rgw::auth::keystone::SecretCache& secret_cache;
/* Helper methods. */
acl_strategy_t get_acl_strategy(const token_envelope_t& token) const;
auth_info_t get_creds_info(const token_envelope_t& token,
const std::vector<std::string>& admin_roles,
const std::string& access_key_id
) const noexcept;
std::pair<boost::optional<token_envelope_t>, int>
get_from_keystone(const DoutPrefixProvider* dpp,
const std::string_view& access_key_id,
const std::string& string_to_sign,
const std::string_view& signature) const;
struct access_token_result {
boost::optional<token_envelope_t> token;
boost::optional<std::string> secret_key;
int failure_reason = 0;
};
access_token_result
get_access_token(const DoutPrefixProvider* dpp,
const std::string_view& access_key_id,
const std::string& string_to_sign,
const std::string_view& signature,
const signature_factory_t& signature_factory) const;
result_t authenticate(const DoutPrefixProvider* dpp,
const std::string_view& access_key_id,
const std::string_view& signature,
const std::string_view& session_token,
const string_to_sign_t& string_to_sign,
const signature_factory_t& signature_factory,
const completer_factory_t& completer_factory,
const req_state* s,
optional_yield y) const override;
std::pair<boost::optional<std::string>, int> get_secret_from_keystone(const DoutPrefixProvider* dpp,
const std::string& user_id,
const std::string_view& access_key_id) const;
public:
EC2Engine(CephContext* const cct,
const rgw::auth::s3::AWSEngine::VersionAbstractor* const ver_abstractor,
const rgw::auth::RemoteApplier::Factory* const apl_factory,
rgw::keystone::Config& config,
/* The token cache is used ONLY for the retrieving admin token.
* Due to the architecture of AWS Auth S3 credentials cannot be
* cached at all. */
rgw::keystone::TokenCache& token_cache,
rgw::auth::keystone::SecretCache& secret_cache)
: AWSEngine(cct, *ver_abstractor),
apl_factory(apl_factory),
config(config),
token_cache(token_cache),
secret_cache(secret_cache) {
}
using AWSEngine::authenticate;
const char* get_name() const noexcept override {
return "rgw::auth::keystone::EC2Engine";
}
}; /* class EC2Engine */
}; /* namespace keystone */
}; /* namespace auth */
}; /* namespace rgw */
| 7,368 | 35.661692 | 117 | h |
null | ceph-main/src/rgw/rgw_auth_registry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <functional>
#include <memory>
#include <ostream>
#include <type_traits>
#include <utility>
#include "rgw_auth.h"
#include "rgw_auth_s3.h"
#include "rgw_swift_auth.h"
#include "rgw_rest_sts.h"
namespace rgw {
namespace auth {
/* A class aggregating the knowledge about all Strategies in RadosGW. It is
* responsible for handling the dynamic reconfiguration on e.g. realm update. */
class StrategyRegistry {
template <class AbstractorT,
bool AllowAnonAccessT = false>
using s3_strategy_t = \
rgw::auth::s3::AWSAuthStrategy<AbstractorT, AllowAnonAccessT>;
struct s3_main_strategy_t : public Strategy {
using s3_main_strategy_plain_t = \
s3_strategy_t<rgw::auth::s3::AWSGeneralAbstractor, true>;
using s3_main_strategy_boto2_t = \
s3_strategy_t<rgw::auth::s3::AWSGeneralBoto2Abstractor>;
s3_main_strategy_plain_t s3_main_strategy_plain;
s3_main_strategy_boto2_t s3_main_strategy_boto2;
s3_main_strategy_t(CephContext* const cct,
const ImplicitTenants& implicit_tenant_context,
rgw::sal::Driver* driver)
: s3_main_strategy_plain(cct, implicit_tenant_context, driver),
s3_main_strategy_boto2(cct, implicit_tenant_context, driver) {
add_engine(Strategy::Control::SUFFICIENT, s3_main_strategy_plain);
add_engine(Strategy::Control::FALLBACK, s3_main_strategy_boto2);
}
const char* get_name() const noexcept override {
return "rgw::auth::StrategyRegistry::s3_main_strategy_t";
}
} s3_main_strategy;
using s3_post_strategy_t = \
s3_strategy_t<rgw::auth::s3::AWSBrowserUploadAbstractor>;
s3_post_strategy_t s3_post_strategy;
rgw::auth::swift::DefaultStrategy swift_strategy;
rgw::auth::sts::DefaultStrategy sts_strategy;
public:
StrategyRegistry(CephContext* const cct,
const ImplicitTenants& implicit_tenant_context,
rgw::sal::Driver* driver)
: s3_main_strategy(cct, implicit_tenant_context, driver),
s3_post_strategy(cct, implicit_tenant_context, driver),
swift_strategy(cct, implicit_tenant_context, driver),
sts_strategy(cct, implicit_tenant_context, driver) {
}
const s3_main_strategy_t& get_s3_main() const {
return s3_main_strategy;
}
const s3_post_strategy_t& get_s3_post() const {
return s3_post_strategy;
}
const rgw::auth::swift::DefaultStrategy& get_swift() const {
return swift_strategy;
}
const rgw::auth::sts::DefaultStrategy& get_sts() const {
return sts_strategy;
}
static std::unique_ptr<StrategyRegistry>
create(CephContext* const cct,
const ImplicitTenants& implicit_tenant_context,
rgw::sal::Driver* driver) {
return std::make_unique<StrategyRegistry>(cct, implicit_tenant_context, driver);
}
};
} /* namespace auth */
} /* namespace rgw */
using rgw_auth_registry_t = rgw::auth::StrategyRegistry;
using rgw_auth_registry_ptr_t = std::unique_ptr<rgw_auth_registry_t>;
| 3,080 | 30.438776 | 84 | h |
null | ceph-main/src/rgw/rgw_auth_s3.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <array>
#include <memory>
#include <string>
#include <string_view>
#include <tuple>
#include <boost/algorithm/string.hpp>
#include <boost/container/static_vector.hpp>
#include "common/sstring.hh"
#include "rgw_common.h"
#include "rgw_rest_s3.h"
#include "rgw_auth.h"
#include "rgw_auth_filters.h"
#include "rgw_auth_keystone.h"
namespace rgw {
namespace auth {
namespace s3 {
static constexpr auto RGW_AUTH_GRACE = std::chrono::minutes{15};
// returns true if the request time is within RGW_AUTH_GRACE of the current time
bool is_time_skew_ok(time_t t);
class STSAuthStrategy : public rgw::auth::Strategy,
public rgw::auth::RemoteApplier::Factory,
public rgw::auth::LocalApplier::Factory,
public rgw::auth::RoleApplier::Factory {
typedef rgw::auth::IdentityApplier::aplptr_t aplptr_t;
rgw::sal::Driver* driver;
const rgw::auth::ImplicitTenants& implicit_tenant_context;
STSEngine sts_engine;
aplptr_t create_apl_remote(CephContext* const cct,
const req_state* const s,
rgw::auth::RemoteApplier::acl_strategy_t&& acl_alg,
const rgw::auth::RemoteApplier::AuthInfo &info) const override {
auto apl = rgw::auth::add_sysreq(cct, driver, s,
rgw::auth::RemoteApplier(cct, driver, std::move(acl_alg), info,
implicit_tenant_context,
rgw::auth::ImplicitTenants::IMPLICIT_TENANTS_S3));
return aplptr_t(new decltype(apl)(std::move(apl)));
}
aplptr_t create_apl_local(CephContext* const cct,
const req_state* const s,
const RGWUserInfo& user_info,
const std::string& subuser,
const std::optional<uint32_t>& perm_mask,
const std::string& access_key_id) const override {
auto apl = rgw::auth::add_sysreq(cct, driver, s,
rgw::auth::LocalApplier(cct, user_info, subuser, perm_mask, access_key_id));
return aplptr_t(new decltype(apl)(std::move(apl)));
}
aplptr_t create_apl_role(CephContext* const cct,
const req_state* const s,
const rgw::auth::RoleApplier::Role& role,
const rgw::auth::RoleApplier::TokenAttrs& token_attrs) const override {
auto apl = rgw::auth::add_sysreq(cct, driver, s,
rgw::auth::RoleApplier(cct, role, token_attrs));
return aplptr_t(new decltype(apl)(std::move(apl)));
}
public:
STSAuthStrategy(CephContext* const cct,
rgw::sal::Driver* driver,
const rgw::auth::ImplicitTenants& implicit_tenant_context,
AWSEngine::VersionAbstractor* const ver_abstractor)
: driver(driver),
implicit_tenant_context(implicit_tenant_context),
sts_engine(cct, driver, *ver_abstractor,
static_cast<rgw::auth::LocalApplier::Factory*>(this),
static_cast<rgw::auth::RemoteApplier::Factory*>(this),
static_cast<rgw::auth::RoleApplier::Factory*>(this)) {
if (cct->_conf->rgw_s3_auth_use_sts) {
add_engine(Control::SUFFICIENT, sts_engine);
}
}
const char* get_name() const noexcept override {
return "rgw::auth::s3::STSAuthStrategy";
}
};
class ExternalAuthStrategy : public rgw::auth::Strategy,
public rgw::auth::RemoteApplier::Factory {
typedef rgw::auth::IdentityApplier::aplptr_t aplptr_t;
rgw::sal::Driver* driver;
const rgw::auth::ImplicitTenants& implicit_tenant_context;
using keystone_config_t = rgw::keystone::CephCtxConfig;
using keystone_cache_t = rgw::keystone::TokenCache;
using secret_cache_t = rgw::auth::keystone::SecretCache;
using EC2Engine = rgw::auth::keystone::EC2Engine;
boost::optional <EC2Engine> keystone_engine;
LDAPEngine ldap_engine;
aplptr_t create_apl_remote(CephContext* const cct,
const req_state* const s,
rgw::auth::RemoteApplier::acl_strategy_t&& acl_alg,
const rgw::auth::RemoteApplier::AuthInfo &info) const override {
auto apl = rgw::auth::add_sysreq(cct, driver, s,
rgw::auth::RemoteApplier(cct, driver, std::move(acl_alg), info,
implicit_tenant_context,
rgw::auth::ImplicitTenants::IMPLICIT_TENANTS_S3));
/* TODO(rzarzynski): replace with static_ptr. */
return aplptr_t(new decltype(apl)(std::move(apl)));
}
public:
ExternalAuthStrategy(CephContext* const cct,
rgw::sal::Driver* driver,
const rgw::auth::ImplicitTenants& implicit_tenant_context,
AWSEngine::VersionAbstractor* const ver_abstractor)
: driver(driver),
implicit_tenant_context(implicit_tenant_context),
ldap_engine(cct, driver, *ver_abstractor,
static_cast<rgw::auth::RemoteApplier::Factory*>(this)) {
if (cct->_conf->rgw_s3_auth_use_keystone &&
! cct->_conf->rgw_keystone_url.empty()) {
keystone_engine.emplace(cct, ver_abstractor,
static_cast<rgw::auth::RemoteApplier::Factory*>(this),
keystone_config_t::get_instance(),
keystone_cache_t::get_instance<keystone_config_t>(),
secret_cache_t::get_instance());
add_engine(Control::SUFFICIENT, *keystone_engine);
}
if (ldap_engine.valid()) {
add_engine(Control::SUFFICIENT, ldap_engine);
}
}
const char* get_name() const noexcept override {
return "rgw::auth::s3::AWSv2ExternalAuthStrategy";
}
};
template <class AbstractorT,
bool AllowAnonAccessT = false>
class AWSAuthStrategy : public rgw::auth::Strategy,
public rgw::auth::LocalApplier::Factory {
typedef rgw::auth::IdentityApplier::aplptr_t aplptr_t;
static_assert(std::is_base_of<rgw::auth::s3::AWSEngine::VersionAbstractor,
AbstractorT>::value,
"AbstractorT must be a subclass of rgw::auth::s3::VersionAbstractor");
rgw::sal::Driver* driver;
AbstractorT ver_abstractor;
S3AnonymousEngine anonymous_engine;
ExternalAuthStrategy external_engines;
STSAuthStrategy sts_engine;
LocalEngine local_engine;
aplptr_t create_apl_local(CephContext* const cct,
const req_state* const s,
const RGWUserInfo& user_info,
const std::string& subuser,
const std::optional<uint32_t>& perm_mask,
const std::string& access_key_id) const override {
auto apl = rgw::auth::add_sysreq(cct, driver, s,
rgw::auth::LocalApplier(cct, user_info, subuser, perm_mask, access_key_id));
/* TODO(rzarzynski): replace with static_ptr. */
return aplptr_t(new decltype(apl)(std::move(apl)));
}
public:
using engine_map_t = std::map <std::string, std::reference_wrapper<const Engine>>;
void add_engines(const std::vector <std::string>& auth_order,
engine_map_t eng_map)
{
auto ctrl_flag = Control::SUFFICIENT;
for (const auto &eng : auth_order) {
// fallback to the last engine, in case of multiple engines, since ctrl
// flag is sufficient for others, error from earlier engine is returned
if (&eng == &auth_order.back() && eng_map.size() > 1) {
ctrl_flag = Control::FALLBACK;
}
if (const auto kv = eng_map.find(eng);
kv != eng_map.end()) {
add_engine(ctrl_flag, kv->second);
}
}
}
auto parse_auth_order(CephContext* const cct)
{
std::vector <std::string> result;
const std::set <std::string_view> allowed_auth = { "sts", "external", "local" };
std::vector <std::string> default_order = { "sts", "external", "local" };
// supplied strings may contain a space, so let's bypass that
boost::split(result, cct->_conf->rgw_s3_auth_order,
boost::is_any_of(", "), boost::token_compress_on);
if (std::any_of(result.begin(), result.end(),
[allowed_auth](std::string_view s)
{ return allowed_auth.find(s) == allowed_auth.end();})){
return default_order;
}
return result;
}
AWSAuthStrategy(CephContext* const cct,
const rgw::auth::ImplicitTenants& implicit_tenant_context,
rgw::sal::Driver* driver)
: driver(driver),
ver_abstractor(cct),
anonymous_engine(cct,
static_cast<rgw::auth::LocalApplier::Factory*>(this)),
external_engines(cct, driver, implicit_tenant_context, &ver_abstractor),
sts_engine(cct, driver, implicit_tenant_context, &ver_abstractor),
local_engine(cct, driver, ver_abstractor,
static_cast<rgw::auth::LocalApplier::Factory*>(this)) {
/* The anonymous auth. */
if (AllowAnonAccessT) {
add_engine(Control::SUFFICIENT, anonymous_engine);
}
auto auth_order = parse_auth_order(cct);
engine_map_t engine_map;
/* STS Auth*/
if (! sts_engine.is_empty()) {
engine_map.insert(std::make_pair("sts", std::cref(sts_engine)));
}
/* The external auth. */
if (! external_engines.is_empty()) {
engine_map.insert(std::make_pair("external", std::cref(external_engines)));
}
/* The local auth. */
if (cct->_conf->rgw_s3_auth_use_rados) {
engine_map.insert(std::make_pair("local", std::cref(local_engine)));
}
add_engines(auth_order, engine_map);
}
const char* get_name() const noexcept override {
return "rgw::auth::s3::AWSAuthStrategy";
}
};
class AWSv4ComplMulti : public rgw::auth::Completer,
public rgw::io::DecoratedRestfulClient<rgw::io::RestfulClient*>,
public std::enable_shared_from_this<AWSv4ComplMulti> {
using io_base_t = rgw::io::DecoratedRestfulClient<rgw::io::RestfulClient*>;
using signing_key_t = sha256_digest_t;
CephContext* const cct;
const std::string_view date;
const std::string_view credential_scope;
const signing_key_t signing_key;
class ChunkMeta {
size_t data_offset_in_stream = 0;
size_t data_length = 0;
std::string signature;
ChunkMeta(const size_t data_starts_in_stream,
const size_t data_length,
const std::string_view signature)
: data_offset_in_stream(data_starts_in_stream),
data_length(data_length),
signature(std::string(signature)) {
}
explicit ChunkMeta(const std::string_view& signature)
: signature(std::string(signature)) {
}
public:
static constexpr size_t SIG_SIZE = 64;
/* Let's suppose the data length fields can't exceed uint64_t. */
static constexpr size_t META_MAX_SIZE = \
sarrlen("\r\nffffffffffffffff;chunk-signature=") + SIG_SIZE + sarrlen("\r\n");
/* The metadata size of for the last, empty chunk. */
static constexpr size_t META_MIN_SIZE = \
sarrlen("0;chunk-signature=") + SIG_SIZE + sarrlen("\r\n");
/* Detect whether a given stream_pos fits in boundaries of a chunk. */
bool is_new_chunk_in_stream(size_t stream_pos) const;
/* Get the remaining data size. */
size_t get_data_size(size_t stream_pos) const;
const std::string& get_signature() const {
return signature;
}
/* Factory: create an object representing metadata of first, initial chunk
* in a stream. */
static ChunkMeta create_first(const std::string_view& seed_signature) {
return ChunkMeta(seed_signature);
}
/* Factory: parse a block of META_MAX_SIZE bytes and creates an object
* representing non-first chunk in a stream. As the process is sequential
* and depends on the previous chunk, caller must pass it. */
static std::pair<ChunkMeta, size_t> create_next(CephContext* cct,
ChunkMeta&& prev,
const char* metabuf,
size_t metabuf_len);
} chunk_meta;
size_t stream_pos;
boost::container::static_vector<char, ChunkMeta::META_MAX_SIZE> parsing_buf;
ceph::crypto::SHA256* sha256_hash;
std::string prev_chunk_signature;
bool is_signature_mismatched();
std::string calc_chunk_signature(const std::string& payload_hash) const;
public:
/* We need the constructor to be public because of the std::make_shared that
* is employed by the create() method. */
AWSv4ComplMulti(const req_state* const s,
std::string_view date,
std::string_view credential_scope,
std::string_view seed_signature,
const signing_key_t& signing_key)
: io_base_t(nullptr),
cct(s->cct),
date(std::move(date)),
credential_scope(std::move(credential_scope)),
signing_key(signing_key),
/* The evolving state. */
chunk_meta(ChunkMeta::create_first(seed_signature)),
stream_pos(0),
sha256_hash(calc_hash_sha256_open_stream()),
prev_chunk_signature(std::move(seed_signature)) {
}
~AWSv4ComplMulti() {
if (sha256_hash) {
calc_hash_sha256_close_stream(&sha256_hash);
}
}
/* rgw::io::DecoratedRestfulClient. */
size_t recv_body(char* buf, size_t max) override;
/* rgw::auth::Completer. */
void modify_request_state(const DoutPrefixProvider* dpp, req_state* s_rw) override;
bool complete() override;
/* Factories. */
static cmplptr_t create(const req_state* s,
std::string_view date,
std::string_view credential_scope,
std::string_view seed_signature,
const boost::optional<std::string>& secret_key);
};
class AWSv4ComplSingle : public rgw::auth::Completer,
public rgw::io::DecoratedRestfulClient<rgw::io::RestfulClient*>,
public std::enable_shared_from_this<AWSv4ComplSingle> {
using io_base_t = rgw::io::DecoratedRestfulClient<rgw::io::RestfulClient*>;
CephContext* const cct;
const char* const expected_request_payload_hash;
ceph::crypto::SHA256* sha256_hash = nullptr;
public:
/* Defined in rgw_auth_s3.cc because of get_v4_exp_payload_hash(). We need
* the constructor to be public because of the std::make_shared employed by
* the create() method. */
explicit AWSv4ComplSingle(const req_state* const s);
~AWSv4ComplSingle() {
if (sha256_hash) {
calc_hash_sha256_close_stream(&sha256_hash);
}
}
/* rgw::io::DecoratedRestfulClient. */
size_t recv_body(char* buf, size_t max) override;
/* rgw::auth::Completer. */
void modify_request_state(const DoutPrefixProvider* dpp, req_state* s_rw) override;
bool complete() override;
/* Factories. */
static cmplptr_t create(const req_state* s,
const boost::optional<std::string>&);
};
} /* namespace s3 */
} /* namespace auth */
} /* namespace rgw */
void rgw_create_s3_canonical_header(
const DoutPrefixProvider *dpp,
const char *method,
const char *content_md5,
const char *content_type,
const char *date,
const meta_map_t& meta_map,
const meta_map_t& qs_map,
const char *request_uri,
const std::map<std::string, std::string>& sub_resources,
std::string& dest_str);
bool rgw_create_s3_canonical_header(const DoutPrefixProvider *dpp,
const req_info& info,
utime_t *header_time, /* out */
std::string& dest, /* out */
bool qsr);
static inline std::tuple<bool, std::string, utime_t>
rgw_create_s3_canonical_header(const DoutPrefixProvider *dpp, const req_info& info, const bool qsr) {
std::string dest;
utime_t header_time;
const bool ok = rgw_create_s3_canonical_header(dpp, info, &header_time, dest, qsr);
return std::make_tuple(ok, dest, header_time);
}
namespace rgw {
namespace auth {
namespace s3 {
static constexpr char AWS4_HMAC_SHA256_STR[] = "AWS4-HMAC-SHA256";
static constexpr char AWS4_HMAC_SHA256_PAYLOAD_STR[] = "AWS4-HMAC-SHA256-PAYLOAD";
static constexpr char AWS4_EMPTY_PAYLOAD_HASH[] = \
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
static constexpr char AWS4_UNSIGNED_PAYLOAD_HASH[] = "UNSIGNED-PAYLOAD";
static constexpr char AWS4_STREAMING_PAYLOAD_HASH[] = \
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
bool is_non_s3_op(RGWOpType op_type);
int parse_v4_credentials(const req_info& info, /* in */
std::string_view& access_key_id, /* out */
std::string_view& credential_scope, /* out */
std::string_view& signedheaders, /* out */
std::string_view& signature, /* out */
std::string_view& date, /* out */
std::string_view& session_token, /* out */
const bool using_qs, /* in */
const DoutPrefixProvider *dpp); /* in */
string gen_v4_scope(const ceph::real_time& timestamp,
const string& region,
const string& service);
static inline bool char_needs_aws4_escaping(const char c, bool encode_slash)
{
if ((c >= 'a' && c <= 'z') ||
(c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9')) {
return false;
}
switch (c) {
case '-':
case '_':
case '.':
case '~':
return false;
}
if (c == '/' && !encode_slash)
return false;
return true;
}
static inline std::string aws4_uri_encode(const std::string& src, bool encode_slash)
{
std::string result;
for (const std::string::value_type c : src) {
if (char_needs_aws4_escaping(c, encode_slash)) {
rgw_uri_escape_char(c, result);
} else {
result.push_back(c);
}
}
return result;
}
static inline std::string aws4_uri_recode(const std::string_view& src, bool encode_slash)
{
std::string decoded = url_decode(src);
return aws4_uri_encode(decoded, encode_slash);
}
static inline std::string get_v4_canonical_uri(const req_info& info) {
/* The code should normalize according to RFC 3986 but S3 does NOT do path
* normalization that SigV4 typically does. This code follows the same
* approach that boto library. See auth.py:canonical_uri(...). */
std::string canonical_uri = aws4_uri_recode(info.request_uri_aws4, false);
if (canonical_uri.empty()) {
canonical_uri = "/";
} else {
boost::replace_all(canonical_uri, "+", "%20");
}
return canonical_uri;
}
static inline std::string gen_v4_canonical_uri(const req_info& info) {
/* The code should normalize according to RFC 3986 but S3 does NOT do path
* normalization that SigV4 typically does. This code follows the same
* approach that boto library. See auth.py:canonical_uri(...). */
std::string canonical_uri = aws4_uri_recode(info.request_uri, false);
if (canonical_uri.empty()) {
canonical_uri = "/";
} else {
boost::replace_all(canonical_uri, "+", "%20");
}
return canonical_uri;
}
static inline const string calc_v4_payload_hash(const string& payload)
{
ceph::crypto::SHA256* sha256_hash = calc_hash_sha256_open_stream();
calc_hash_sha256_update_stream(sha256_hash, payload.c_str(), payload.length());
const auto payload_hash = calc_hash_sha256_close_stream(&sha256_hash);
return payload_hash;
}
static inline const char* get_v4_exp_payload_hash(const req_info& info)
{
/* In AWSv4 the hash of real, transferred payload IS NOT necessary to form
* a Canonical Request, and thus verify a Signature. x-amz-content-sha256
* header lets get the information very early -- before seeing first byte
* of HTTP body. As a consequence, we can decouple Signature verification
* from payload's fingerprint check. */
const char *expected_request_payload_hash = \
info.env->get("HTTP_X_AMZ_CONTENT_SHA256");
if (!expected_request_payload_hash) {
/* An HTTP client MUST send x-amz-content-sha256. The single exception
* is the case of using the Query Parameters where "UNSIGNED-PAYLOAD"
* literals are used for crafting Canonical Request:
*
* You don't include a payload hash in the Canonical Request, because
* when you create a presigned URL, you don't know the payload content
* because the URL is used to upload an arbitrary payload. Instead, you
* use a constant string UNSIGNED-PAYLOAD. */
expected_request_payload_hash = AWS4_UNSIGNED_PAYLOAD_HASH;
}
return expected_request_payload_hash;
}
static inline bool is_v4_payload_unsigned(const char* const exp_payload_hash)
{
return boost::equals(exp_payload_hash, AWS4_UNSIGNED_PAYLOAD_HASH);
}
static inline bool is_v4_payload_empty(const req_state* const s)
{
/* from rfc2616 - 4.3 Message Body
*
* "The presence of a message-body in a request is signaled by the inclusion
* of a Content-Length or Transfer-Encoding header field in the request's
* message-headers." */
return s->content_length == 0 &&
s->info.env->get("HTTP_TRANSFER_ENCODING") == nullptr;
}
static inline bool is_v4_payload_streamed(const char* const exp_payload_hash)
{
return boost::equals(exp_payload_hash, AWS4_STREAMING_PAYLOAD_HASH);
}
std::string get_v4_canonical_qs(const req_info& info, bool using_qs);
std::string gen_v4_canonical_qs(const req_info& info, bool is_non_s3_op);
boost::optional<std::string>
get_v4_canonical_headers(const req_info& info,
const std::string_view& signedheaders,
bool using_qs,
bool force_boto2_compat);
std::string gen_v4_canonical_headers(const req_info& info,
const std::map<std::string, std::string>& extra_headers,
string *signed_hdrs);
extern sha256_digest_t
get_v4_canon_req_hash(CephContext* cct,
const std::string_view& http_verb,
const std::string& canonical_uri,
const std::string& canonical_qs,
const std::string& canonical_hdrs,
const std::string_view& signed_hdrs,
const std::string_view& request_payload_hash,
const DoutPrefixProvider *dpp);
AWSEngine::VersionAbstractor::string_to_sign_t
get_v4_string_to_sign(CephContext* cct,
const std::string_view& algorithm,
const std::string_view& request_date,
const std::string_view& credential_scope,
const sha256_digest_t& canonreq_hash,
const DoutPrefixProvider *dpp);
extern AWSEngine::VersionAbstractor::server_signature_t
get_v4_signature(const std::string_view& credential_scope,
CephContext* const cct,
const std::string_view& secret_key,
const AWSEngine::VersionAbstractor::string_to_sign_t& string_to_sign,
const DoutPrefixProvider *dpp);
extern AWSEngine::VersionAbstractor::server_signature_t
get_v2_signature(CephContext*,
const std::string& secret_key,
const AWSEngine::VersionAbstractor::string_to_sign_t& string_to_sign);
} /* namespace s3 */
} /* namespace auth */
} /* namespace rgw */
| 23,640 | 35.539413 | 101 | h |
null | ceph-main/src/rgw/rgw_b64.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <boost/archive/iterators/base64_from_binary.hpp>
#include <boost/archive/iterators/binary_from_base64.hpp>
#include <boost/archive/iterators/insert_linebreaks.hpp>
#include <boost/archive/iterators/transform_width.hpp>
#include <boost/archive/iterators/remove_whitespace.hpp>
#include <limits>
#include <string>
#include <string_view>
namespace rgw {
/*
* A header-only Base64 encoder built on boost::archive. The
* formula is based on a class poposed for inclusion in boost in
* 2011 by Denis Shevchenko (abandoned), updated slightly
* (e.g., uses std::string_view).
*
* Also, wrap_width added as template argument, based on
* feedback from Marcus.
*/
template<int wrap_width = std::numeric_limits<int>::max()>
inline std::string to_base64(std::string_view sview)
{
using namespace boost::archive::iterators;
// output must be =padded modulo 3
auto psize = sview.size();
while ((psize % 3) != 0) {
++psize;
}
/* RFC 2045 requires linebreaks to be present in the output
* sequence every at-most 76 characters (MIME-compliance),
* but we could likely omit it. */
typedef
insert_linebreaks<
base64_from_binary<
transform_width<
std::string_view::const_iterator
,6,8>
>
,wrap_width
> b64_iter;
std::string outstr(b64_iter(sview.data()),
b64_iter(sview.data() + sview.size()));
// pad outstr with '=' to a length that is a multiple of 3
for (size_t ix = 0; ix < (psize-sview.size()); ++ix)
outstr.push_back('=');
return outstr;
}
inline std::string from_base64(std::string_view sview)
{
using namespace boost::archive::iterators;
if (sview.empty())
return std::string();
/* MIME-compliant input will have line-breaks, so we have to
* filter WS */
typedef
transform_width<
binary_from_base64<
remove_whitespace<
std::string_view::const_iterator>>
,8,6
> b64_iter;
while (sview.back() == '=')
sview.remove_suffix(1);
std::string outstr(b64_iter(sview.data()),
b64_iter(sview.data() + sview.size()));
return outstr;
}
} /* namespace */
| 2,336 | 26.494118 | 70 | h |
null | ceph-main/src/rgw/rgw_basic_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* N.B., this header defines fundamental serialized types. Do not
* introduce changes or include files which can only be compiled in
* radosgw or OSD contexts (e.g., rgw_sal.h, rgw_common.h)
*/
#pragma once
#include <string>
#include <fmt/format.h>
#include "include/types.h"
#include "rgw_compression_types.h"
#include "rgw_pool_types.h"
#include "rgw_acl_types.h"
#include "rgw_zone_types.h"
#include "rgw_user_types.h"
#include "rgw_bucket_types.h"
#include "rgw_obj_types.h"
#include "rgw_obj_manifest.h"
#include "common/Formatter.h"
class JSONObj;
class cls_user_bucket;
enum RGWIntentEvent {
DEL_OBJ = 0,
DEL_DIR = 1,
};
/** Store error returns for output at a different point in the program */
struct rgw_err {
rgw_err();
void clear();
bool is_clear() const;
bool is_err() const;
friend std::ostream& operator<<(std::ostream& oss, const rgw_err &err);
int http_ret;
int ret;
std::string err_code;
std::string message;
}; /* rgw_err */
struct rgw_zone_id {
std::string id;
rgw_zone_id() {}
rgw_zone_id(const std::string& _id) : id(_id) {}
rgw_zone_id(std::string&& _id) : id(std::move(_id)) {}
void encode(ceph::buffer::list& bl) const {
/* backward compatiblity, not using ENCODE_{START,END} macros */
ceph::encode(id, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
/* backward compatiblity, not using DECODE_{START,END} macros */
ceph::decode(id, bl);
}
void clear() {
id.clear();
}
bool operator==(const std::string& _id) const {
return (id == _id);
}
bool operator==(const rgw_zone_id& zid) const {
return (id == zid.id);
}
bool operator!=(const rgw_zone_id& zid) const {
return (id != zid.id);
}
bool operator<(const rgw_zone_id& zid) const {
return (id < zid.id);
}
bool operator>(const rgw_zone_id& zid) const {
return (id > zid.id);
}
bool empty() const {
return id.empty();
}
};
WRITE_CLASS_ENCODER(rgw_zone_id)
inline std::ostream& operator<<(std::ostream& os, const rgw_zone_id& zid) {
os << zid.id;
return os;
}
struct obj_version;
struct rgw_placement_rule;
struct RGWAccessKey;
class RGWUserCaps;
extern void encode_json(const char *name, const obj_version& v, Formatter *f);
extern void encode_json(const char *name, const RGWUserCaps& val, Formatter *f);
extern void encode_json(const char *name, const rgw_pool& pool, Formatter *f);
extern void encode_json(const char *name, const rgw_placement_rule& r, Formatter *f);
extern void encode_json_impl(const char *name, const rgw_zone_id& zid, ceph::Formatter *f);
extern void encode_json_plain(const char *name, const RGWAccessKey& val, Formatter *f);
extern void decode_json_obj(obj_version& v, JSONObj *obj);
extern void decode_json_obj(rgw_zone_id& zid, JSONObj *obj);
extern void decode_json_obj(rgw_pool& pool, JSONObj *obj);
extern void decode_json_obj(rgw_placement_rule& v, JSONObj *obj);
// Represents an identity. This is more wide-ranging than a
// 'User'. Its purposes is to be matched against by an
// IdentityApplier. The internal representation will doubtless change as
// more types are added. We may want to expose the type enum and make
// the member public so people can switch/case on it.
namespace rgw {
namespace auth {
class Principal {
enum types { User, Role, Tenant, Wildcard, OidcProvider, AssumedRole };
types t;
rgw_user u;
std::string idp_url;
explicit Principal(types t)
: t(t) {}
Principal(types t, std::string&& n, std::string i)
: t(t), u(std::move(n), std::move(i)) {}
Principal(std::string&& idp_url)
: t(OidcProvider), idp_url(std::move(idp_url)) {}
public:
static Principal wildcard() {
return Principal(Wildcard);
}
static Principal user(std::string&& t, std::string&& u) {
return Principal(User, std::move(t), std::move(u));
}
static Principal role(std::string&& t, std::string&& u) {
return Principal(Role, std::move(t), std::move(u));
}
static Principal tenant(std::string&& t) {
return Principal(Tenant, std::move(t), {});
}
static Principal oidc_provider(std::string&& idp_url) {
return Principal(std::move(idp_url));
}
static Principal assumed_role(std::string&& t, std::string&& u) {
return Principal(AssumedRole, std::move(t), std::move(u));
}
bool is_wildcard() const {
return t == Wildcard;
}
bool is_user() const {
return t == User;
}
bool is_role() const {
return t == Role;
}
bool is_tenant() const {
return t == Tenant;
}
bool is_oidc_provider() const {
return t == OidcProvider;
}
bool is_assumed_role() const {
return t == AssumedRole;
}
const std::string& get_tenant() const {
return u.tenant;
}
const std::string& get_id() const {
return u.id;
}
const std::string& get_idp_url() const {
return idp_url;
}
const std::string& get_role_session() const {
return u.id;
}
const std::string& get_role() const {
return u.id;
}
bool operator ==(const Principal& o) const {
return (t == o.t) && (u == o.u);
}
bool operator <(const Principal& o) const {
return (t < o.t) || ((t == o.t) && (u < o.u));
}
};
std::ostream& operator <<(std::ostream& m, const Principal& p);
}
}
class JSONObj;
void decode_json_obj(rgw_user& val, JSONObj *obj);
void encode_json(const char *name, const rgw_user& val, ceph::Formatter *f);
void encode_xml(const char *name, const rgw_user& val, ceph::Formatter *f);
inline std::ostream& operator<<(std::ostream& out, const rgw_user &u) {
std::string s;
u.to_str(s);
return out << s;
}
struct RGWUploadPartInfo {
uint32_t num;
uint64_t size;
uint64_t accounted_size{0};
std::string etag;
ceph::real_time modified;
RGWObjManifest manifest;
RGWCompressionInfo cs_info;
// Previous part obj prefixes. Recorded here for later cleanup.
std::set<std::string> past_prefixes;
RGWUploadPartInfo() : num(0), size(0) {}
void encode(bufferlist& bl) const {
ENCODE_START(5, 2, bl);
encode(num, bl);
encode(size, bl);
encode(etag, bl);
encode(modified, bl);
encode(manifest, bl);
encode(cs_info, bl);
encode(accounted_size, bl);
encode(past_prefixes, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(5, 2, 2, bl);
decode(num, bl);
decode(size, bl);
decode(etag, bl);
decode(modified, bl);
if (struct_v >= 3)
decode(manifest, bl);
if (struct_v >= 4) {
decode(cs_info, bl);
decode(accounted_size, bl);
} else {
accounted_size = size;
}
if (struct_v >= 5) {
decode(past_prefixes, bl);
}
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
static void generate_test_instances(std::list<RGWUploadPartInfo*>& o);
};
WRITE_CLASS_ENCODER(RGWUploadPartInfo)
| 7,263 | 23.876712 | 91 | h |
null | ceph-main/src/rgw/rgw_bucket.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <string>
#include <memory>
#include <variant>
#include <boost/container/flat_map.hpp>
#include <boost/container/flat_set.hpp>
#include "include/types.h"
#include "rgw_common.h"
#include "rgw_sal.h"
extern void init_bucket(rgw_bucket *b, const char *t, const char *n, const char *dp, const char *ip, const char *m, const char *id);
extern int rgw_bucket_parse_bucket_key(CephContext *cct, const std::string& key,
rgw_bucket* bucket, int *shard_id);
extern std::string rgw_make_bucket_entry_name(const std::string& tenant_name,
const std::string& bucket_name);
[[nodiscard]] int rgw_parse_url_bucket(const std::string& bucket,
const std::string& auth_tenant,
std::string &tenant_name,
std::string &bucket_name);
extern int rgw_chown_bucket_and_objects(rgw::sal::Driver* driver,
rgw::sal::Bucket* bucket,
rgw::sal::User* new_user,
const std::string& marker,
std::string *err_msg,
const DoutPrefixProvider *dpp,
optional_yield y);
| 1,295 | 34.027027 | 132 | h |
null | ceph-main/src/rgw/rgw_bucket_encryption.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <include/types.h>
class XMLObj;
class ApplyServerSideEncryptionByDefault
{
std::string kmsMasterKeyID;
std::string sseAlgorithm;
public:
ApplyServerSideEncryptionByDefault() {};
ApplyServerSideEncryptionByDefault(const std::string &algorithm,
const std::string &key_id)
: kmsMasterKeyID(key_id), sseAlgorithm(algorithm) {};
const std::string& kms_master_key_id() const {
return kmsMasterKeyID;
}
const std::string& sse_algorithm() const {
return sseAlgorithm;
}
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(kmsMasterKeyID, bl);
encode(sseAlgorithm, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(kmsMasterKeyID, bl);
decode(sseAlgorithm, bl);
DECODE_FINISH(bl);
}
void decode_xml(XMLObj *obj);
void dump_xml(Formatter *f) const;
};
WRITE_CLASS_ENCODER(ApplyServerSideEncryptionByDefault)
class ServerSideEncryptionConfiguration
{
protected:
ApplyServerSideEncryptionByDefault applyServerSideEncryptionByDefault;
bool bucketKeyEnabled;
public:
ServerSideEncryptionConfiguration(): bucketKeyEnabled(false) {};
ServerSideEncryptionConfiguration(const std::string &algorithm,
const std::string &keyid="", bool enabled = false)
: applyServerSideEncryptionByDefault(algorithm, keyid),
bucketKeyEnabled(enabled) {}
const std::string& kms_master_key_id() const {
return applyServerSideEncryptionByDefault.kms_master_key_id();
}
const std::string& sse_algorithm() const {
return applyServerSideEncryptionByDefault.sse_algorithm();
}
bool bucket_key_enabled() const {
return bucketKeyEnabled;
}
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(applyServerSideEncryptionByDefault, bl);
encode(bucketKeyEnabled, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(applyServerSideEncryptionByDefault, bl);
decode(bucketKeyEnabled, bl);
DECODE_FINISH(bl);
}
void decode_xml(XMLObj *obj);
void dump_xml(Formatter *f) const;
};
WRITE_CLASS_ENCODER(ServerSideEncryptionConfiguration)
class RGWBucketEncryptionConfig
{
protected:
bool rule_exist;
ServerSideEncryptionConfiguration rule;
public:
RGWBucketEncryptionConfig(): rule_exist(false) {}
RGWBucketEncryptionConfig(const std::string &algorithm,
const std::string &keyid = "", bool enabled = false)
: rule_exist(true), rule(algorithm, keyid, enabled) {}
const std::string& kms_master_key_id() const {
return rule.kms_master_key_id();
}
const std::string& sse_algorithm() const {
return rule.sse_algorithm();
}
bool bucket_key_enabled() const {
return rule.bucket_key_enabled();
}
bool has_rule() const {
return rule_exist;
}
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(rule_exist, bl);
if (rule_exist) {
encode(rule, bl);
}
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(rule_exist, bl);
if (rule_exist) {
decode(rule, bl);
}
DECODE_FINISH(bl);
}
void decode_xml(XMLObj *obj);
void dump_xml(Formatter *f) const;
void dump(Formatter *f) const;
static void generate_test_instances(std::list<RGWBucketEncryptionConfig*>& o);
};
WRITE_CLASS_ENCODER(RGWBucketEncryptionConfig)
| 3,562 | 23.916084 | 80 | h |
null | ceph-main/src/rgw/rgw_bucket_layout.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* N.B., this header defines fundamental serialized types. Do not
* introduce changes or include files which can only be compiled in
* radosgw or OSD contexts (e.g., rgw_sal.h, rgw_common.h)
*/
#pragma once
#include <optional>
#include <string>
#include "include/encoding.h"
#include "common/ceph_json.h"
namespace rgw {
enum class BucketIndexType : uint8_t {
Normal, // normal hash-based sharded index layout
Indexless, // no bucket index, so listing is unsupported
};
std::string_view to_string(const BucketIndexType& t);
bool parse(std::string_view str, BucketIndexType& t);
void encode_json_impl(const char *name, const BucketIndexType& t, ceph::Formatter *f);
void decode_json_obj(BucketIndexType& t, JSONObj *obj);
inline std::ostream& operator<<(std::ostream& out, const BucketIndexType& t)
{
return out << to_string(t);
}
enum class BucketHashType : uint8_t {
Mod, // rjenkins hash of object name, modulo num_shards
};
std::string_view to_string(const BucketHashType& t);
bool parse(std::string_view str, BucketHashType& t);
void encode_json_impl(const char *name, const BucketHashType& t, ceph::Formatter *f);
void decode_json_obj(BucketHashType& t, JSONObj *obj);
struct bucket_index_normal_layout {
uint32_t num_shards = 1;
BucketHashType hash_type = BucketHashType::Mod;
friend std::ostream& operator<<(std::ostream& out, const bucket_index_normal_layout& l) {
out << "num_shards=" << l.num_shards << ", hash_type=" << to_string(l.hash_type);
return out;
}
};
inline bool operator==(const bucket_index_normal_layout& l,
const bucket_index_normal_layout& r) {
return l.num_shards == r.num_shards
&& l.hash_type == r.hash_type;
}
inline bool operator!=(const bucket_index_normal_layout& l,
const bucket_index_normal_layout& r) {
return !(l == r);
}
void encode(const bucket_index_normal_layout& l, bufferlist& bl, uint64_t f=0);
void decode(bucket_index_normal_layout& l, bufferlist::const_iterator& bl);
void encode_json_impl(const char *name, const bucket_index_normal_layout& l, ceph::Formatter *f);
void decode_json_obj(bucket_index_normal_layout& l, JSONObj *obj);
struct bucket_index_layout {
BucketIndexType type = BucketIndexType::Normal;
// TODO: variant of layout types?
bucket_index_normal_layout normal;
friend std::ostream& operator<<(std::ostream& out, const bucket_index_layout& l) {
out << "type=" << to_string(l.type) << ", normal=" << l.normal;
return out;
}
};
inline bool operator==(const bucket_index_layout& l,
const bucket_index_layout& r) {
return l.type == r.type && l.normal == r.normal;
}
inline bool operator!=(const bucket_index_layout& l,
const bucket_index_layout& r) {
return !(l == r);
}
void encode(const bucket_index_layout& l, bufferlist& bl, uint64_t f=0);
void decode(bucket_index_layout& l, bufferlist::const_iterator& bl);
void encode_json_impl(const char *name, const bucket_index_layout& l, ceph::Formatter *f);
void decode_json_obj(bucket_index_layout& l, JSONObj *obj);
struct bucket_index_layout_generation {
uint64_t gen = 0;
bucket_index_layout layout;
friend std::ostream& operator<<(std::ostream& out, const bucket_index_layout_generation& g) {
out << "gen=" << g.gen;
return out;
}
};
inline bool operator==(const bucket_index_layout_generation& l,
const bucket_index_layout_generation& r) {
return l.gen == r.gen && l.layout == r.layout;
}
inline bool operator!=(const bucket_index_layout_generation& l,
const bucket_index_layout_generation& r) {
return !(l == r);
}
void encode(const bucket_index_layout_generation& l, bufferlist& bl, uint64_t f=0);
void decode(bucket_index_layout_generation& l, bufferlist::const_iterator& bl);
void encode_json_impl(const char *name, const bucket_index_layout_generation& l, ceph::Formatter *f);
void decode_json_obj(bucket_index_layout_generation& l, JSONObj *obj);
enum class BucketLogType : uint8_t {
// colocated with bucket index, so the log layout matches the index layout
InIndex,
};
std::string_view to_string(const BucketLogType& t);
bool parse(std::string_view str, BucketLogType& t);
void encode_json_impl(const char *name, const BucketLogType& t, ceph::Formatter *f);
void decode_json_obj(BucketLogType& t, JSONObj *obj);
inline std::ostream& operator<<(std::ostream& out, const BucketLogType &log_type)
{
switch (log_type) {
case BucketLogType::InIndex:
return out << "InIndex";
default:
return out << "Unknown";
}
}
struct bucket_index_log_layout {
uint64_t gen = 0;
bucket_index_normal_layout layout;
operator bucket_index_layout_generation() const {
bucket_index_layout_generation bilg;
bilg.gen = gen;
bilg.layout.type = BucketIndexType::Normal;
bilg.layout.normal = layout;
return bilg;
}
};
void encode(const bucket_index_log_layout& l, bufferlist& bl, uint64_t f=0);
void decode(bucket_index_log_layout& l, bufferlist::const_iterator& bl);
void encode_json_impl(const char *name, const bucket_index_log_layout& l, ceph::Formatter *f);
void decode_json_obj(bucket_index_log_layout& l, JSONObj *obj);
struct bucket_log_layout {
BucketLogType type = BucketLogType::InIndex;
bucket_index_log_layout in_index;
friend std::ostream& operator<<(std::ostream& out, const bucket_log_layout& l) {
out << "type=" << to_string(l.type);
return out;
}
};
void encode(const bucket_log_layout& l, bufferlist& bl, uint64_t f=0);
void decode(bucket_log_layout& l, bufferlist::const_iterator& bl);
void encode_json_impl(const char *name, const bucket_log_layout& l, ceph::Formatter *f);
void decode_json_obj(bucket_log_layout& l, JSONObj *obj);
struct bucket_log_layout_generation {
uint64_t gen = 0;
bucket_log_layout layout;
friend std::ostream& operator<<(std::ostream& out, const bucket_log_layout_generation& g) {
out << "gen=" << g.gen << ", layout=[ " << g.layout << " ]";
return out;
}
};
void encode(const bucket_log_layout_generation& l, bufferlist& bl, uint64_t f=0);
void decode(bucket_log_layout_generation& l, bufferlist::const_iterator& bl);
void encode_json_impl(const char *name, const bucket_log_layout_generation& l, ceph::Formatter *f);
void decode_json_obj(bucket_log_layout_generation& l, JSONObj *obj);
// return a log layout that shares its layout with the index
inline bucket_log_layout_generation log_layout_from_index(
uint64_t gen, const bucket_index_layout_generation& index)
{
return {gen, {BucketLogType::InIndex, {index.gen, index.layout.normal}}};
}
inline auto matches_gen(uint64_t gen)
{
return [gen] (const bucket_log_layout_generation& l) { return l.gen == gen; };
}
inline bucket_index_layout_generation log_to_index_layout(const bucket_log_layout_generation& log_layout)
{
ceph_assert(log_layout.layout.type == BucketLogType::InIndex);
bucket_index_layout_generation index;
index.gen = log_layout.layout.in_index.gen;
index.layout.normal = log_layout.layout.in_index.layout;
return index;
}
enum class BucketReshardState : uint8_t {
None,
InProgress,
};
std::string_view to_string(const BucketReshardState& s);
bool parse(std::string_view str, BucketReshardState& s);
void encode_json_impl(const char *name, const BucketReshardState& s, ceph::Formatter *f);
void decode_json_obj(BucketReshardState& s, JSONObj *obj);
// describes the layout of bucket index objects
struct BucketLayout {
BucketReshardState resharding = BucketReshardState::None;
// current bucket index layout
bucket_index_layout_generation current_index;
// target index layout of a resharding operation
std::optional<bucket_index_layout_generation> target_index;
// history of untrimmed bucket log layout generations, with the current
// generation at the back()
std::vector<bucket_log_layout_generation> logs;
friend std::ostream& operator<<(std::ostream& out, const BucketLayout& l) {
std::stringstream ss;
if (l.target_index) {
ss << *l.target_index;
} else {
ss << "none";
}
out << "resharding=" << to_string(l.resharding) <<
", current_index=[" << l.current_index << "], target_index=[" <<
ss.str() << "], logs.size()=" << l.logs.size();
return out;
}
};
void encode(const BucketLayout& l, bufferlist& bl, uint64_t f=0);
void decode(BucketLayout& l, bufferlist::const_iterator& bl);
void encode_json_impl(const char *name, const BucketLayout& l, ceph::Formatter *f);
void decode_json_obj(BucketLayout& l, JSONObj *obj);
inline uint32_t num_shards(const bucket_index_normal_layout& index) {
// old buckets used num_shards=0 to mean 1
return index.num_shards > 0 ? index.num_shards : 1;
}
inline uint32_t num_shards(const bucket_index_layout& index) {
ceph_assert(index.type == BucketIndexType::Normal);
return num_shards(index.normal);
}
inline uint32_t num_shards(const bucket_index_layout_generation& index) {
return num_shards(index.layout);
}
inline uint32_t current_num_shards(const BucketLayout& layout) {
return num_shards(layout.current_index);
}
inline bool is_layout_indexless(const bucket_index_layout_generation& layout) {
return layout.layout.type == BucketIndexType::Indexless;
}
} // namespace rgw
| 9,694 | 33.257951 | 105 | h |
null | ceph-main/src/rgw/rgw_bucket_sync_cache.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#pragma once
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include "common/intrusive_lru.h"
#include "rgw_data_sync.h"
namespace rgw::bucket_sync {
// per bucket-shard state cached by DataSyncShardCR
struct State {
// the source bucket shard to sync
std::pair<rgw_bucket_shard, std::optional<uint64_t>> key;
// current sync obligation being processed by DataSyncSingleEntry
std::optional<rgw_data_sync_obligation> obligation;
// incremented with each new obligation
uint32_t counter = 0;
// highest timestamp applied by all sources
ceph::real_time progress_timestamp;
State(const std::pair<rgw_bucket_shard, std::optional<uint64_t>>& key ) noexcept
: key(key) {}
State(const rgw_bucket_shard& shard, std::optional<uint64_t> gen) noexcept
: key(shard, gen) {}
};
struct Entry;
struct EntryToKey;
class Handle;
using lru_config = ceph::common::intrusive_lru_config<
std::pair<rgw_bucket_shard, std::optional<uint64_t>>, Entry, EntryToKey>;
// a recyclable cache entry
struct Entry : State, ceph::common::intrusive_lru_base<lru_config> {
using State::State;
};
struct EntryToKey {
using type = std::pair<rgw_bucket_shard, std::optional<uint64_t>>;
const type& operator()(const Entry& e) { return e.key; }
};
// use a non-atomic reference count since these aren't shared across threads
template <typename T>
using thread_unsafe_ref_counter = boost::intrusive_ref_counter<
T, boost::thread_unsafe_counter>;
// a state cache for entries within a single datalog shard
class Cache : public thread_unsafe_ref_counter<Cache> {
ceph::common::intrusive_lru<lru_config> cache;
protected:
// protected ctor to enforce the use of factory function create()
explicit Cache(size_t target_size) {
cache.set_target_size(target_size);
}
public:
static boost::intrusive_ptr<Cache> create(size_t target_size) {
return new Cache(target_size);
}
// find or create a cache entry for the given key, and return a Handle that
// keeps it lru-pinned until destruction
Handle get(const rgw_bucket_shard& shard, std::optional<uint64_t> gen);
};
// a State handle that keeps the Cache referenced
class Handle {
boost::intrusive_ptr<Cache> cache;
boost::intrusive_ptr<Entry> entry;
public:
Handle() noexcept = default;
~Handle() = default;
Handle(boost::intrusive_ptr<Cache> cache,
boost::intrusive_ptr<Entry> entry) noexcept
: cache(std::move(cache)), entry(std::move(entry)) {}
Handle(Handle&&) = default;
Handle(const Handle&) = default;
Handle& operator=(Handle&& o) noexcept {
// move the entry first so that its cache stays referenced over destruction
entry = std::move(o.entry);
cache = std::move(o.cache);
return *this;
}
Handle& operator=(const Handle& o) noexcept {
// copy the entry first so that its cache stays referenced over destruction
entry = o.entry;
cache = o.cache;
return *this;
}
explicit operator bool() const noexcept { return static_cast<bool>(entry); }
State& operator*() const noexcept { return *entry; }
State* operator->() const noexcept { return entry.get(); }
};
inline Handle Cache::get(const rgw_bucket_shard& shard, std::optional<uint64_t> gen)
{
auto result = cache.get_or_create({ shard, gen });
return {this, std::move(result.first)};
}
} // namespace rgw::bucket_sync
| 3,750 | 31.059829 | 84 | h |
null | ceph-main/src/rgw/rgw_bucket_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* N.B., this header defines fundamental serialized types. Do not
* include files which can only be compiled in radosgw or OSD
* contexts (e.g., rgw_sal.h, rgw_common.h) */
#pragma once
#include <fmt/format.h>
#include "rgw_pool_types.h"
#include "rgw_user_types.h"
#include "rgw_placement_types.h"
#include "common/dout.h"
#include "common/Formatter.h"
struct cls_user_bucket;
struct rgw_bucket_key {
std::string tenant;
std::string name;
std::string bucket_id;
rgw_bucket_key(const std::string& _tenant,
const std::string& _name,
const std::string& _bucket_id) : tenant(_tenant),
name(_name),
bucket_id(_bucket_id) {}
rgw_bucket_key(const std::string& _tenant,
const std::string& _name) : tenant(_tenant),
name(_name) {}
};
struct rgw_bucket {
std::string tenant;
std::string name;
std::string marker;
std::string bucket_id;
rgw_data_placement_target explicit_placement;
rgw_bucket() { }
// cppcheck-suppress noExplicitConstructor
explicit rgw_bucket(const rgw_user& u, const cls_user_bucket& b);
rgw_bucket(const std::string& _tenant,
const std::string& _name,
const std::string& _bucket_id) : tenant(_tenant),
name(_name),
bucket_id(_bucket_id) {}
rgw_bucket(const rgw_bucket_key& bk) : tenant(bk.tenant),
name(bk.name),
bucket_id(bk.bucket_id) {}
rgw_bucket(const rgw_bucket&) = default;
rgw_bucket(rgw_bucket&&) = default;
bool match(const rgw_bucket& b) const {
return (tenant == b.tenant &&
name == b.name &&
(bucket_id == b.bucket_id ||
bucket_id.empty() ||
b.bucket_id.empty()));
}
void convert(cls_user_bucket *b) const;
void encode(ceph::buffer::list& bl) const {
ENCODE_START(10, 10, bl);
encode(name, bl);
encode(marker, bl);
encode(bucket_id, bl);
encode(tenant, bl);
bool encode_explicit = !explicit_placement.data_pool.empty();
encode(encode_explicit, bl);
if (encode_explicit) {
encode(explicit_placement.data_pool, bl);
encode(explicit_placement.data_extra_pool, bl);
encode(explicit_placement.index_pool, bl);
}
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(10, 3, 3, bl);
decode(name, bl);
if (struct_v < 10) {
decode(explicit_placement.data_pool.name, bl);
}
if (struct_v >= 2) {
decode(marker, bl);
if (struct_v <= 3) {
uint64_t id;
decode(id, bl);
char buf[16];
snprintf(buf, sizeof(buf), "%" PRIu64, id);
bucket_id = buf;
} else {
decode(bucket_id, bl);
}
}
if (struct_v < 10) {
if (struct_v >= 5) {
decode(explicit_placement.index_pool.name, bl);
} else {
explicit_placement.index_pool = explicit_placement.data_pool;
}
if (struct_v >= 7) {
decode(explicit_placement.data_extra_pool.name, bl);
}
}
if (struct_v >= 8) {
decode(tenant, bl);
}
if (struct_v >= 10) {
bool decode_explicit = !explicit_placement.data_pool.empty();
decode(decode_explicit, bl);
if (decode_explicit) {
decode(explicit_placement.data_pool, bl);
decode(explicit_placement.data_extra_pool, bl);
decode(explicit_placement.index_pool, bl);
}
}
DECODE_FINISH(bl);
}
void update_bucket_id(const std::string& new_bucket_id) {
bucket_id = new_bucket_id;
}
// format a key for the bucket/instance. pass delim=0 to skip a field
std::string get_key(char tenant_delim = '/',
char id_delim = ':',
size_t reserve = 0) const;
const rgw_pool& get_data_extra_pool() const {
return explicit_placement.get_data_extra_pool();
}
void dump(ceph::Formatter *f) const;
void decode_json(JSONObj *obj);
static void generate_test_instances(std::list<rgw_bucket*>& o);
rgw_bucket& operator=(const rgw_bucket&) = default;
bool operator<(const rgw_bucket& b) const {
if (tenant < b.tenant) {
return true;
} else if (tenant > b.tenant) {
return false;
}
if (name < b.name) {
return true;
} else if (name > b.name) {
return false;
}
return (bucket_id < b.bucket_id);
}
bool operator==(const rgw_bucket& b) const {
return (tenant == b.tenant) && (name == b.name) && \
(bucket_id == b.bucket_id);
}
bool operator!=(const rgw_bucket& b) const {
return (tenant != b.tenant) || (name != b.name) ||
(bucket_id != b.bucket_id);
}
};
WRITE_CLASS_ENCODER(rgw_bucket)
inline std::ostream& operator<<(std::ostream& out, const rgw_bucket &b) {
out << b.tenant << ":" << b.name << "[" << b.bucket_id << "])";
return out;
}
struct rgw_bucket_placement {
rgw_placement_rule placement_rule;
rgw_bucket bucket;
void dump(Formatter *f) const;
}; /* rgw_bucket_placement */
struct rgw_bucket_shard {
rgw_bucket bucket;
int shard_id;
rgw_bucket_shard() : shard_id(-1) {}
rgw_bucket_shard(const rgw_bucket& _b, int _sid) : bucket(_b), shard_id(_sid) {}
std::string get_key(char tenant_delim = '/', char id_delim = ':',
char shard_delim = ':',
size_t reserve = 0) const;
bool operator<(const rgw_bucket_shard& b) const {
if (bucket < b.bucket) {
return true;
}
if (b.bucket < bucket) {
return false;
}
return shard_id < b.shard_id;
}
bool operator==(const rgw_bucket_shard& b) const {
return (bucket == b.bucket &&
shard_id == b.shard_id);
}
}; /* rgw_bucket_shard */
void encode(const rgw_bucket_shard& b, bufferlist& bl, uint64_t f=0);
void decode(rgw_bucket_shard& b, bufferlist::const_iterator& bl);
inline std::ostream& operator<<(std::ostream& out, const rgw_bucket_shard& bs) {
if (bs.shard_id <= 0) {
return out << bs.bucket;
}
return out << bs.bucket << ":" << bs.shard_id;
}
| 6,688 | 27.58547 | 82 | h |
null | ceph-main/src/rgw/rgw_client_io.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <exception>
#include <string>
#include <string_view>
#include <streambuf>
#include <istream>
#include <stdlib.h>
#include <system_error>
#include "include/types.h"
#include "rgw_common.h"
class RGWRestfulIO;
namespace rgw {
namespace io {
using Exception = std::system_error;
/* The minimal and simplest subset of methods that a client of RadosGW can be
* interacted with. */
class BasicClient {
protected:
virtual int init_env(CephContext *cct) = 0;
public:
virtual ~BasicClient() = default;
/* Initialize the BasicClient and inject CephContext. */
int init(CephContext *cct);
/* Return the RGWEnv describing the environment that a given request lives in.
* The method does not throw exceptions. */
virtual RGWEnv& get_env() noexcept = 0;
/* Complete request.
* On success returns number of bytes generated for a direct client of RadosGW.
* On failure throws rgw::io::Exception containing errno. */
virtual size_t complete_request() = 0;
}; /* rgw::io::Client */
class Accounter {
public:
virtual ~Accounter() = default;
/* Enable or disable the accounting of both sent and received data. Changing
* the state does not affect the counters. */
virtual void set_account(bool enabled) = 0;
/* Return number of bytes sent to a direct client of RadosGW (direct means
* eg. a web server instance in the case of using FastCGI front-end) when
* the accounting was enabled. */
virtual uint64_t get_bytes_sent() const = 0;
/* Return number of bytes received from a direct client of RadosGW (direct
* means eg. a web server instance in the case of using FastCGI front-end)
* when the accounting was enabled. */
virtual uint64_t get_bytes_received() const = 0;
}; /* rgw::io::Accounter */
/* Interface abstracting restful interactions with clients, usually through
* the HTTP protocol. The methods participating in the response generation
* process should be called in the specific order:
* 1. send_100_continue() - at most once,
* 2. send_status() - exactly once,
* 3. Any of:
* a. send_header(),
* b. send_content_length() XOR send_chunked_transfer_encoding()
* Please note that only one of those two methods must be called
at most once.
* 4. complete_header() - exactly once,
* 5. send_body()
* 6. complete_request() - exactly once.
* There are no restrictions on flush() - it may be called in any moment.
*
* Receiving data from a client isn't a subject to any further call order
* restrictions besides those imposed by BasicClient. That is, get_env()
* and recv_body can be mixed. */
class RestfulClient : public BasicClient {
template<typename T> friend class DecoratedRestfulClient;
public:
/* Generate the 100 Continue message.
* On success returns number of bytes generated for a direct client of RadosGW.
* On failure throws rgw::io::Exception containing errno. */
virtual size_t send_100_continue() = 0;
/* Generate the response's status part taking the HTTP status code as @status
* and its name pointed in @status_name.
* On success returns number of bytes generated for a direct client of RadosGW.
* On failure throws rgw::io::Exception containing errno. */
virtual size_t send_status(int status, const char *status_name) = 0;
/* Generate header. On success returns number of bytes generated for a direct
* client of RadosGW. On failure throws rgw::io::Exception containing errno.
*
* std::string_view is being used because of length it internally carries. */
virtual size_t send_header(const std::string_view& name,
const std::string_view& value) = 0;
/* Inform a client about a content length. Takes number of bytes as @len.
* On success returns number of bytes generated for a direct client of RadosGW.
* On failure throws rgw::io::Exception containing errno.
*
* CALL LIMITATIONS:
* - The method must be called EXACTLY ONCE.
* - The method is interchangeable with send_chunked_transfer_encoding(). */
virtual size_t send_content_length(uint64_t len) = 0;
/* Inform a client that the chunked transfer encoding will be used.
* On success returns number of bytes generated for a direct client of RadosGW.
* On failure throws rgw::io::Exception containing errno.
*
* CALL LIMITATIONS:
* - The method must be called EXACTLY ONCE.
* - The method is interchangeable with send_content_length(). */
virtual size_t send_chunked_transfer_encoding() {
/* This is a null implementation. We don't send anything here, even the HTTP
* header. The intended behaviour should be provided through a decorator or
* directly by a given front-end. */
return 0;
}
/* Generate completion (the CRLF sequence separating headers and body in
* the case of HTTP) of headers. On success returns number of generated bytes
* for a direct client of RadosGW. On failure throws rgw::io::Exception with
* errno. */
virtual size_t complete_header() = 0;
/* Receive no more than @max bytes from a request's body and store it in
* buffer pointed by @buf. On success returns number of bytes received from
* a direct client of RadosGW that has been stored in @buf. On failure throws
* rgw::io::Exception containing errno. */
virtual size_t recv_body(char* buf, size_t max) = 0;
/* Generate a part of response's body by taking exactly @len bytes from
* the buffer pointed by @buf. On success returns number of generated bytes
* of response's body. On failure throws rgw::io::Exception. */
virtual size_t send_body(const char* buf, size_t len) = 0;
/* Flushes all already generated data to a direct client of RadosGW.
* On failure throws rgw::io::Exception containing errno. */
virtual void flush() = 0;
} /* rgw::io::RestfulClient */;
/* Abstract decorator over any implementation of rgw::io::RestfulClient
* which could be provided both as a pointer-to-object or the object itself. */
template <typename DecorateeT>
class DecoratedRestfulClient : public RestfulClient {
template<typename T> friend class DecoratedRestfulClient;
friend RGWRestfulIO;
typedef typename std::remove_pointer<DecorateeT>::type DerefedDecorateeT;
static_assert(std::is_base_of<RestfulClient, DerefedDecorateeT>::value,
"DecorateeT must be a subclass of rgw::io::RestfulClient");
DecorateeT decoratee;
/* There is an indirection layer over accessing decoratee to share the same
* code base between dynamic and static decorators. The difference is about
* what we store internally: pointer to a decorated object versus the whole
* object itself. */
template <typename T = void,
typename std::enable_if<
! std::is_pointer<DecorateeT>::value, T>::type* = nullptr>
DerefedDecorateeT& get_decoratee() {
return decoratee;
}
protected:
template <typename T = void,
typename std::enable_if<
std::is_pointer<DecorateeT>::value, T>::type* = nullptr>
DerefedDecorateeT& get_decoratee() {
return *decoratee;
}
/* Dynamic decorators (those storing a pointer instead of the decorated
* object itself) can be reconfigured on-the-fly. HOWEVER: there are no
* facilities for orchestrating such changes. Callers must take care of
* atomicity and thread-safety. */
template <typename T = void,
typename std::enable_if<
std::is_pointer<DecorateeT>::value, T>::type* = nullptr>
void set_decoratee(DerefedDecorateeT& new_dec) {
decoratee = &new_dec;
}
int init_env(CephContext *cct) override {
return get_decoratee().init_env(cct);
}
public:
explicit DecoratedRestfulClient(DecorateeT&& decoratee)
: decoratee(std::forward<DecorateeT>(decoratee)) {
}
size_t send_status(const int status,
const char* const status_name) override {
return get_decoratee().send_status(status, status_name);
}
size_t send_100_continue() override {
return get_decoratee().send_100_continue();
}
size_t send_header(const std::string_view& name,
const std::string_view& value) override {
return get_decoratee().send_header(name, value);
}
size_t send_content_length(const uint64_t len) override {
return get_decoratee().send_content_length(len);
}
size_t send_chunked_transfer_encoding() override {
return get_decoratee().send_chunked_transfer_encoding();
}
size_t complete_header() override {
return get_decoratee().complete_header();
}
size_t recv_body(char* const buf, const size_t max) override {
return get_decoratee().recv_body(buf, max);
}
size_t send_body(const char* const buf,
const size_t len) override {
return get_decoratee().send_body(buf, len);
}
void flush() override {
return get_decoratee().flush();
}
RGWEnv& get_env() noexcept override {
return get_decoratee().get_env();
}
size_t complete_request() override {
return get_decoratee().complete_request();
}
} /* rgw::io::DecoratedRestfulClient */;
/* Interface that should be provided by a front-end class wanting to use
* the low-level buffering offered by i.e. StaticOutputBufferer. */
class BuffererSink {
public:
virtual ~BuffererSink() = default;
/* Send exactly @len bytes from the memory location pointed by @buf.
* On success returns @len. On failure throws rgw::io::Exception. */
virtual size_t write_data(const char *buf, size_t len) = 0;
};
/* Utility class providing RestfulClient's implementations with facilities
* for low-level buffering without relying on dynamic memory allocations.
* The buffer is carried entirely on stack. This narrows down applicability
* to these situations where buffers are relatively small. This perfectly
* fits the needs of composing an HTTP header. Without that a front-end
* might need to issue a lot of small IO operations leading to increased
* overhead on syscalls and fragmentation of a message if the Nagle's
* algorithm won't be able to form a single TCP segment (usually when
* running on extremely fast network interfaces like the loopback). */
template <size_t BufferSizeV = 4096>
class StaticOutputBufferer : public std::streambuf {
static_assert(BufferSizeV >= sizeof(std::streambuf::char_type),
"Buffer size must be bigger than a single char_type.");
using std::streambuf::int_type;
int_type overflow(const int_type c) override {
*pptr() = c;
pbump(sizeof(std::streambuf::char_type));
if (! sync()) {
/* No error, the buffer has been successfully synchronized. */
return c;
} else {
return std::streambuf::traits_type::eof();
}
}
int sync() override {
const auto len = static_cast<size_t>(std::streambuf::pptr() -
std::streambuf::pbase());
std::streambuf::pbump(-len);
sink.write_data(std::streambuf::pbase(), len);
/* Always return success here. In case of failure write_data() will throw
* rgw::io::Exception. */
return 0;
}
BuffererSink& sink;
std::streambuf::char_type buffer[BufferSizeV];
public:
explicit StaticOutputBufferer(BuffererSink& sink)
: sink(sink) {
constexpr size_t len = sizeof(buffer) - sizeof(std::streambuf::char_type);
std::streambuf::setp(buffer, buffer + len);
}
};
} /* namespace io */
} /* namespace rgw */
/* We're doing this nasty thing only because of extensive usage of templates
* to implement the static decorator pattern. C++ templates de facto enforce
* mixing interfaces with implementation. Additionally, those classes derive
* from RGWRestfulIO defined here. I believe that including in the middle of
* file is still better than polluting it directly. */
#include "rgw_client_io_filters.h"
/* RGWRestfulIO: high level interface to interact with RESTful clients. What
* differentiates it from rgw::io::RestfulClient is providing more specific APIs
* like rgw::io::Accounter or the AWS Auth v4 stuff implemented by filters
* while hiding the pipelined architecture from clients.
*
* rgw::io::Accounter came in as a part of rgw::io::AccountingFilter. */
class RGWRestfulIO : public rgw::io::AccountingFilter<rgw::io::RestfulClient*> {
std::vector<std::shared_ptr<DecoratedRestfulClient>> filters;
public:
~RGWRestfulIO() override = default;
RGWRestfulIO(CephContext *_cx, rgw::io::RestfulClient* engine)
: AccountingFilter<rgw::io::RestfulClient*>(_cx, std::move(engine)) {
}
void add_filter(std::shared_ptr<DecoratedRestfulClient> new_filter) {
new_filter->set_decoratee(this->get_decoratee());
this->set_decoratee(*new_filter);
filters.emplace_back(std::move(new_filter));
}
}; /* RGWRestfulIO */
/* Type conversions to work around lack of req_state type hierarchy matching
* (e.g.) REST backends (may be replaced w/dynamic typed req_state). */
static inline rgw::io::RestfulClient* RESTFUL_IO(req_state* s) {
ceph_assert(dynamic_cast<rgw::io::RestfulClient*>(s->cio) != nullptr);
return static_cast<rgw::io::RestfulClient*>(s->cio);
}
static inline rgw::io::Accounter* ACCOUNTING_IO(req_state* s) {
auto ptr = dynamic_cast<rgw::io::Accounter*>(s->cio);
ceph_assert(ptr != nullptr);
return ptr;
}
static inline RGWRestfulIO* AWS_AUTHv4_IO(const req_state* const s) {
ceph_assert(dynamic_cast<RGWRestfulIO*>(s->cio) != nullptr);
return static_cast<RGWRestfulIO*>(s->cio);
}
class RGWClientIOStreamBuf : public std::streambuf {
protected:
RGWRestfulIO &rio;
size_t const window_size;
size_t const putback_size;
std::vector<char> buffer;
public:
RGWClientIOStreamBuf(RGWRestfulIO &rio, size_t ws, size_t ps = 1)
: rio(rio),
window_size(ws),
putback_size(ps),
buffer(ws + ps)
{
setg(nullptr, nullptr, nullptr);
}
std::streambuf::int_type underflow() override {
if (gptr() < egptr()) {
return traits_type::to_int_type(*gptr());
}
char * const base = buffer.data();
char * start;
if (nullptr != eback()) {
/* We need to skip moving bytes on first underflow. In such case
* there is simply no previous data we should preserve for unget()
* or something similar. */
std::memmove(base, egptr() - putback_size, putback_size);
start = base + putback_size;
} else {
start = base;
}
size_t read_len = 0;
try {
read_len = rio.recv_body(base, window_size);
} catch (rgw::io::Exception&) {
return traits_type::eof();
}
if (0 == read_len) {
return traits_type::eof();
}
setg(base, start, start + read_len);
return traits_type::to_int_type(*gptr());
}
};
class RGWClientIOStream : private RGWClientIOStreamBuf, public std::istream {
/* Inheritance from RGWClientIOStreamBuf is a kind of shadow, undirect
* form of composition here. We cannot do that explicitly because istream
* ctor is being called prior to construction of any member of this class. */
public:
explicit RGWClientIOStream(RGWRestfulIO &s)
: RGWClientIOStreamBuf(s, 1, 2),
std::istream(static_cast<RGWClientIOStreamBuf *>(this)) {
}
};
| 15,240 | 33.956422 | 81 | h |
null | ceph-main/src/rgw/rgw_client_io_filters.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <type_traits>
#include <boost/optional.hpp>
#include "rgw_common.h"
#include "rgw_client_io.h"
namespace rgw {
namespace io {
template <typename T>
class AccountingFilter : public DecoratedRestfulClient<T>,
public Accounter {
bool enabled;
uint64_t total_sent;
uint64_t total_received;
CephContext *cct;
public:
template <typename U>
AccountingFilter(CephContext *cct, U&& decoratee)
: DecoratedRestfulClient<T>(std::forward<U>(decoratee)),
enabled(false),
total_sent(0),
total_received(0), cct(cct) {
}
size_t send_status(const int status,
const char* const status_name) override {
const auto sent = DecoratedRestfulClient<T>::send_status(status,
status_name);
lsubdout(cct, rgw, 30) << "AccountingFilter::send_status: e="
<< (enabled ? "1" : "0") << ", sent=" << sent << ", total="
<< total_sent << dendl;
if (enabled) {
total_sent += sent;
}
return sent;
}
size_t send_100_continue() override {
const auto sent = DecoratedRestfulClient<T>::send_100_continue();
lsubdout(cct, rgw, 30) << "AccountingFilter::send_100_continue: e="
<< (enabled ? "1" : "0") << ", sent=" << sent << ", total="
<< total_sent << dendl;
if (enabled) {
total_sent += sent;
}
return sent;
}
size_t send_header(const std::string_view& name,
const std::string_view& value) override {
const auto sent = DecoratedRestfulClient<T>::send_header(name, value);
lsubdout(cct, rgw, 30) << "AccountingFilter::send_header: e="
<< (enabled ? "1" : "0") << ", sent=" << sent << ", total="
<< total_sent << dendl;
if (enabled) {
total_sent += sent;
}
return sent;
}
size_t send_content_length(const uint64_t len) override {
const auto sent = DecoratedRestfulClient<T>::send_content_length(len);
lsubdout(cct, rgw, 30) << "AccountingFilter::send_content_length: e="
<< (enabled ? "1" : "0") << ", sent=" << sent << ", total="
<< total_sent << dendl;
if (enabled) {
total_sent += sent;
}
return sent;
}
size_t send_chunked_transfer_encoding() override {
const auto sent = DecoratedRestfulClient<T>::send_chunked_transfer_encoding();
lsubdout(cct, rgw, 30) << "AccountingFilter::send_chunked_transfer_encoding: e="
<< (enabled ? "1" : "0") << ", sent=" << sent << ", total="
<< total_sent << dendl;
if (enabled) {
total_sent += sent;
}
return sent;
}
size_t complete_header() override {
const auto sent = DecoratedRestfulClient<T>::complete_header();
lsubdout(cct, rgw, 30) << "AccountingFilter::complete_header: e="
<< (enabled ? "1" : "0") << ", sent=" << sent << ", total="
<< total_sent << dendl;
if (enabled) {
total_sent += sent;
}
return sent;
}
size_t recv_body(char* buf, size_t max) override {
const auto received = DecoratedRestfulClient<T>::recv_body(buf, max);
lsubdout(cct, rgw, 30) << "AccountingFilter::recv_body: e="
<< (enabled ? "1" : "0") << ", received=" << received << dendl;
if (enabled) {
total_received += received;
}
return received;
}
size_t send_body(const char* const buf,
const size_t len) override {
const auto sent = DecoratedRestfulClient<T>::send_body(buf, len);
lsubdout(cct, rgw, 30) << "AccountingFilter::send_body: e="
<< (enabled ? "1" : "0") << ", sent=" << sent << ", total="
<< total_sent << dendl;
if (enabled) {
total_sent += sent;
}
return sent;
}
size_t complete_request() override {
const auto sent = DecoratedRestfulClient<T>::complete_request();
lsubdout(cct, rgw, 30) << "AccountingFilter::complete_request: e="
<< (enabled ? "1" : "0") << ", sent=" << sent << ", total="
<< total_sent << dendl;
if (enabled) {
total_sent += sent;
}
return sent;
}
uint64_t get_bytes_sent() const override {
return total_sent;
}
uint64_t get_bytes_received() const override {
return total_received;
}
void set_account(bool enabled) override {
this->enabled = enabled;
lsubdout(cct, rgw, 30) << "AccountingFilter::set_account: e="
<< (enabled ? "1" : "0") << dendl;
}
};
/* Filter for in-memory buffering incoming data and calculating the content
* length header if it isn't present. */
template <typename T>
class BufferingFilter : public DecoratedRestfulClient<T> {
template<typename Td> friend class DecoratedRestfulClient;
protected:
ceph::bufferlist data;
bool has_content_length;
bool buffer_data;
CephContext *cct;
public:
template <typename U>
BufferingFilter(CephContext *cct, U&& decoratee)
: DecoratedRestfulClient<T>(std::forward<U>(decoratee)),
has_content_length(false),
buffer_data(false), cct(cct) {
}
size_t send_content_length(const uint64_t len) override;
size_t send_chunked_transfer_encoding() override;
size_t complete_header() override;
size_t send_body(const char* buf, size_t len) override;
size_t complete_request() override;
};
template <typename T>
size_t BufferingFilter<T>::send_body(const char* const buf,
const size_t len)
{
if (buffer_data) {
data.append(buf, len);
lsubdout(cct, rgw, 30) << "BufferingFilter<T>::send_body: defer count = "
<< len << dendl;
return 0;
}
return DecoratedRestfulClient<T>::send_body(buf, len);
}
template <typename T>
size_t BufferingFilter<T>::send_content_length(const uint64_t len)
{
has_content_length = true;
return DecoratedRestfulClient<T>::send_content_length(len);
}
template <typename T>
size_t BufferingFilter<T>::send_chunked_transfer_encoding()
{
has_content_length = true;
return DecoratedRestfulClient<T>::send_chunked_transfer_encoding();
}
template <typename T>
size_t BufferingFilter<T>::complete_header()
{
if (! has_content_length) {
/* We will dump everything in complete_request(). */
buffer_data = true;
lsubdout(cct, rgw, 30) << "BufferingFilter<T>::complete_header: has_content_length="
<< (has_content_length ? "1" : "0") << dendl;
return 0;
}
return DecoratedRestfulClient<T>::complete_header();
}
template <typename T>
size_t BufferingFilter<T>::complete_request()
{
size_t sent = 0;
if (! has_content_length) {
/* It is not correct to count these bytes here,
* because they can only be part of the header.
* Therefore force count to 0.
*/
sent += DecoratedRestfulClient<T>::send_content_length(data.length());
sent += DecoratedRestfulClient<T>::complete_header();
lsubdout(cct, rgw, 30) <<
"BufferingFilter::complete_request: !has_content_length: IGNORE: sent="
<< sent << dendl;
sent = 0;
}
if (buffer_data) {
/* We are sending each buffer separately to avoid extra memory shuffling
* that would occur on data.c_str() to provide a continuous memory area. */
for (const auto& ptr : data.buffers()) {
sent += DecoratedRestfulClient<T>::send_body(ptr.c_str(),
ptr.length());
}
data.clear();
buffer_data = false;
lsubdout(cct, rgw, 30) << "BufferingFilter::complete_request: buffer_data: sent="
<< sent << dendl;
}
return sent + DecoratedRestfulClient<T>::complete_request();
}
template <typename T> static inline
BufferingFilter<T> add_buffering(
CephContext *cct,
T&& t) {
return BufferingFilter<T>(cct, std::forward<T>(t));
}
template <typename T>
class ChunkingFilter : public DecoratedRestfulClient<T> {
template<typename Td> friend class DecoratedRestfulClient;
protected:
bool chunking_enabled;
public:
template <typename U>
explicit ChunkingFilter(U&& decoratee)
: DecoratedRestfulClient<T>(std::forward<U>(decoratee)),
chunking_enabled(false) {
}
size_t send_chunked_transfer_encoding() override {
chunking_enabled = true;
return DecoratedRestfulClient<T>::send_header("Transfer-Encoding",
"chunked");
}
size_t send_body(const char* buf,
const size_t len) override {
if (! chunking_enabled) {
return DecoratedRestfulClient<T>::send_body(buf, len);
} else {
static constexpr char HEADER_END[] = "\r\n";
/* https://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.6.1 */
// TODO: we have no support for sending chunked-encoding
// extensions/trailing headers.
char chunk_size[32];
const auto chunk_size_len = snprintf(chunk_size, sizeof(chunk_size),
"%zx\r\n", len);
size_t sent = 0;
sent += DecoratedRestfulClient<T>::send_body(chunk_size, chunk_size_len);
sent += DecoratedRestfulClient<T>::send_body(buf, len);
sent += DecoratedRestfulClient<T>::send_body(HEADER_END,
sizeof(HEADER_END) - 1);
return sent;
}
}
size_t complete_request() override {
size_t sent = 0;
if (chunking_enabled) {
static constexpr char CHUNKED_RESP_END[] = "0\r\n\r\n";
sent += DecoratedRestfulClient<T>::send_body(CHUNKED_RESP_END,
sizeof(CHUNKED_RESP_END) - 1);
}
return sent + DecoratedRestfulClient<T>::complete_request();
}
};
template <typename T> static inline
ChunkingFilter<T> add_chunking(T&& t) {
return ChunkingFilter<T>(std::forward<T>(t));
}
/* Class that controls and inhibits the process of sending Content-Length HTTP
* header where RFC 7230 requests so. The cases worth our attention are 204 No
* Content as well as 304 Not Modified. */
template <typename T>
class ConLenControllingFilter : public DecoratedRestfulClient<T> {
protected:
enum class ContentLengthAction {
FORWARD,
INHIBIT,
UNKNOWN
} action;
public:
template <typename U>
explicit ConLenControllingFilter(U&& decoratee)
: DecoratedRestfulClient<T>(std::forward<U>(decoratee)),
action(ContentLengthAction::UNKNOWN) {
}
size_t send_status(const int status,
const char* const status_name) override {
if ((204 == status || 304 == status) &&
! g_conf()->rgw_print_prohibited_content_length) {
action = ContentLengthAction::INHIBIT;
} else {
action = ContentLengthAction::FORWARD;
}
return DecoratedRestfulClient<T>::send_status(status, status_name);
}
size_t send_content_length(const uint64_t len) override {
switch(action) {
case ContentLengthAction::FORWARD:
return DecoratedRestfulClient<T>::send_content_length(len);
case ContentLengthAction::INHIBIT:
return 0;
case ContentLengthAction::UNKNOWN:
default:
return -EINVAL;
}
}
};
template <typename T> static inline
ConLenControllingFilter<T> add_conlen_controlling(T&& t) {
return ConLenControllingFilter<T>(std::forward<T>(t));
}
/* Filter that rectifies the wrong behaviour of some clients of the RGWRestfulIO
* interface. Should be removed after fixing those clients. */
template <typename T>
class ReorderingFilter : public DecoratedRestfulClient<T> {
protected:
enum class ReorderState {
RGW_EARLY_HEADERS, /* Got headers sent before calling send_status. */
RGW_STATUS_SEEN, /* Status has been seen. */
RGW_DATA /* Header has been completed. */
} phase;
boost::optional<uint64_t> content_length;
std::vector<std::pair<std::string, std::string>> headers;
size_t send_header(const std::string_view& name,
const std::string_view& value) override {
switch (phase) {
case ReorderState::RGW_EARLY_HEADERS:
case ReorderState::RGW_STATUS_SEEN:
headers.emplace_back(std::make_pair(std::string(name.data(), name.size()),
std::string(value.data(), value.size())));
return 0;
case ReorderState::RGW_DATA:
return DecoratedRestfulClient<T>::send_header(name, value);
}
return -EIO;
}
public:
template <typename U>
explicit ReorderingFilter(U&& decoratee)
: DecoratedRestfulClient<T>(std::forward<U>(decoratee)),
phase(ReorderState::RGW_EARLY_HEADERS) {
}
size_t send_status(const int status,
const char* const status_name) override {
phase = ReorderState::RGW_STATUS_SEEN;
return DecoratedRestfulClient<T>::send_status(status, status_name);
}
size_t send_content_length(const uint64_t len) override {
if (ReorderState::RGW_EARLY_HEADERS == phase) {
/* Oh great, someone tries to send content length before status. */
content_length = len;
return 0;
} else {
return DecoratedRestfulClient<T>::send_content_length(len);
}
}
size_t complete_header() override {
size_t sent = 0;
/* Change state in order to immediately send everything we get. */
phase = ReorderState::RGW_DATA;
/* Sent content length if necessary. */
if (content_length) {
sent += DecoratedRestfulClient<T>::send_content_length(*content_length);
}
/* Header data in buffers are already counted. */
for (const auto& kv : headers) {
sent += DecoratedRestfulClient<T>::send_header(kv.first, kv.second);
}
headers.clear();
return sent + DecoratedRestfulClient<T>::complete_header();
}
};
template <typename T> static inline
ReorderingFilter<T> add_reordering(T&& t) {
return ReorderingFilter<T>(std::forward<T>(t));
}
} /* namespace io */
} /* namespace rgw */
| 13,842 | 29.424176 | 88 | h |
null | ceph-main/src/rgw/rgw_compression.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <vector>
#include "compressor/Compressor.h"
#include "rgw_putobj.h"
#include "rgw_op.h"
#include "rgw_compression_types.h"
int rgw_compression_info_from_attr(const bufferlist& attr,
bool& need_decompress,
RGWCompressionInfo& cs_info);
int rgw_compression_info_from_attrset(const std::map<std::string, bufferlist>& attrs,
bool& need_decompress,
RGWCompressionInfo& cs_info);
class RGWGetObj_Decompress : public RGWGetObj_Filter
{
CephContext* cct;
CompressorRef compressor;
RGWCompressionInfo* cs_info;
bool partial_content;
std::vector<compression_block>::iterator first_block, last_block;
off_t q_ofs, q_len;
uint64_t cur_ofs;
bufferlist waiting;
public:
RGWGetObj_Decompress(CephContext* cct_,
RGWCompressionInfo* cs_info_,
bool partial_content_,
RGWGetObj_Filter* next);
virtual ~RGWGetObj_Decompress() override {}
int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override;
int fixup_range(off_t& ofs, off_t& end) override;
};
class RGWPutObj_Compress : public rgw::putobj::Pipe
{
CephContext* cct;
bool compressed{false};
CompressorRef compressor;
std::optional<int32_t> compressor_message;
std::vector<compression_block> blocks;
uint64_t compressed_ofs{0};
public:
RGWPutObj_Compress(CephContext* cct_, CompressorRef compressor,
rgw::sal::DataProcessor *next)
: Pipe(next), cct(cct_), compressor(compressor) {}
virtual ~RGWPutObj_Compress() override {};
int process(bufferlist&& data, uint64_t logical_offset) override;
bool is_compressed() { return compressed; }
std::vector<compression_block>& get_compression_blocks() { return blocks; }
std::optional<int32_t> get_compressor_message() { return compressor_message; }
}; /* RGWPutObj_Compress */
| 2,091 | 32.206349 | 85 | h |
null | ceph-main/src/rgw/rgw_compression_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
struct compression_block {
uint64_t old_ofs;
uint64_t new_ofs;
uint64_t len;
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(old_ofs, bl);
encode(new_ofs, bl);
encode(len, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(old_ofs, bl);
decode(new_ofs, bl);
decode(len, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
};
WRITE_CLASS_ENCODER(compression_block)
struct RGWCompressionInfo {
std::string compression_type;
uint64_t orig_size;
std::optional<int32_t> compressor_message;
std::vector<compression_block> blocks;
RGWCompressionInfo() : compression_type("none"), orig_size(0) {}
RGWCompressionInfo(const RGWCompressionInfo& cs_info) : compression_type(cs_info.compression_type),
orig_size(cs_info.orig_size),
compressor_message(cs_info.compressor_message),
blocks(cs_info.blocks) {}
void encode(bufferlist& bl) const {
ENCODE_START(2, 1, bl);
encode(compression_type, bl);
encode(orig_size, bl);
encode(compressor_message, bl);
encode(blocks, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(2, bl);
decode(compression_type, bl);
decode(orig_size, bl);
if (struct_v >= 2) {
decode(compressor_message, bl);
}
decode(blocks, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
};
WRITE_CLASS_ENCODER(RGWCompressionInfo)
| 2,050 | 25.636364 | 101 | h |
null | ceph-main/src/rgw/rgw_cors.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 eNovance SAS <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <map>
#include <string>
#include <include/types.h>
#define RGW_CORS_GET 0x1
#define RGW_CORS_PUT 0x2
#define RGW_CORS_HEAD 0x4
#define RGW_CORS_POST 0x8
#define RGW_CORS_DELETE 0x10
#define RGW_CORS_COPY 0x20
#define RGW_CORS_ALL (RGW_CORS_GET | \
RGW_CORS_PUT | \
RGW_CORS_HEAD | \
RGW_CORS_POST | \
RGW_CORS_DELETE | \
RGW_CORS_COPY)
#define CORS_MAX_AGE_INVALID ((uint32_t)-1)
class RGWCORSRule
{
protected:
uint32_t max_age;
uint8_t allowed_methods;
std::string id;
std::set<std::string> allowed_hdrs; /* If you change this, you need to discard lowercase_allowed_hdrs */
std::set<std::string> lowercase_allowed_hdrs; /* Not built until needed in RGWCORSRule::is_header_allowed */
std::set<std::string> allowed_origins;
std::list<std::string> exposable_hdrs;
public:
RGWCORSRule() : max_age(CORS_MAX_AGE_INVALID),allowed_methods(0) {}
RGWCORSRule(std::set<std::string>& o, std::set<std::string>& h,
std::list<std::string>& e, uint8_t f, uint32_t a)
:max_age(a),
allowed_methods(f),
allowed_hdrs(h),
allowed_origins(o),
exposable_hdrs(e) {}
virtual ~RGWCORSRule() {}
std::string& get_id() { return id; }
uint32_t get_max_age() { return max_age; }
uint8_t get_allowed_methods() { return allowed_methods; }
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(max_age, bl);
encode(allowed_methods, bl);
encode(id, bl);
encode(allowed_hdrs, bl);
encode(allowed_origins, bl);
encode(exposable_hdrs, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(max_age, bl);
decode(allowed_methods, bl);
decode(id, bl);
decode(allowed_hdrs, bl);
decode(allowed_origins, bl);
decode(exposable_hdrs, bl);
DECODE_FINISH(bl);
}
bool has_wildcard_origin();
bool is_origin_present(const char *o);
void format_exp_headers(std::string& s);
void erase_origin_if_present(std::string& origin, bool *rule_empty);
void dump_origins();
void dump(Formatter *f) const;
bool is_header_allowed(const char *hdr, size_t len);
};
WRITE_CLASS_ENCODER(RGWCORSRule)
class RGWCORSConfiguration
{
protected:
std::list<RGWCORSRule> rules;
public:
RGWCORSConfiguration() {}
~RGWCORSConfiguration() {}
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(rules, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(rules, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
std::list<RGWCORSRule>& get_rules() {
return rules;
}
bool is_empty() {
return rules.empty();
}
void get_origins_list(const char *origin, std::list<std::string>& origins);
RGWCORSRule * host_name_rule(const char *origin);
void erase_host_name_rule(std::string& origin);
void dump();
void stack_rule(RGWCORSRule& r) {
rules.push_front(r);
}
};
WRITE_CLASS_ENCODER(RGWCORSConfiguration)
static inline int validate_name_string(std::string_view o) {
if (o.length() == 0)
return -1;
if (o.find_first_of("*") != o.find_last_of("*"))
return -1;
return 0;
}
| 3,817 | 27.281481 | 110 | h |
null | ceph-main/src/rgw/rgw_cors_s3.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 eNovance SAS <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <map>
#include <string>
#include <iosfwd>
#include <include/types.h>
#include <common/Formatter.h>
#include <common/dout.h>
#include "rgw_xml.h"
#include "rgw_cors.h"
class RGWCORSRule_S3 : public RGWCORSRule, public XMLObj
{
const DoutPrefixProvider *dpp;
public:
RGWCORSRule_S3(const DoutPrefixProvider *dpp) : dpp(dpp) {}
~RGWCORSRule_S3() override {}
bool xml_end(const char *el) override;
void to_xml(XMLFormatter& f);
};
class RGWCORSConfiguration_S3 : public RGWCORSConfiguration, public XMLObj
{
const DoutPrefixProvider *dpp;
public:
RGWCORSConfiguration_S3(const DoutPrefixProvider *dpp) : dpp(dpp) {}
~RGWCORSConfiguration_S3() override {}
bool xml_end(const char *el) override;
void to_xml(std::ostream& out);
};
class RGWCORSXMLParser_S3 : public RGWXMLParser
{
const DoutPrefixProvider *dpp;
CephContext *cct;
XMLObj *alloc_obj(const char *el) override;
public:
explicit RGWCORSXMLParser_S3(const DoutPrefixProvider *_dpp, CephContext *_cct) : dpp(_dpp), cct(_cct) {}
};
| 1,504 | 24.508475 | 107 | h |
null | ceph-main/src/rgw/rgw_cors_swift.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 eNovance SAS <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <map>
#include <string>
#include <vector>
#include <include/types.h>
#include <include/str_list.h>
#include "rgw_cors.h"
class RGWCORSConfiguration_SWIFT : public RGWCORSConfiguration
{
public:
RGWCORSConfiguration_SWIFT() {}
~RGWCORSConfiguration_SWIFT() {}
int create_update(const char *allow_origins, const char *allow_headers,
const char *expose_headers, const char *max_age) {
std::set<std::string> o, h;
std::list<std::string> e;
unsigned long a = CORS_MAX_AGE_INVALID;
uint8_t flags = RGW_CORS_ALL;
int nr_invalid_names = 0;
auto add_host = [&nr_invalid_names, &o] (auto host) {
if (validate_name_string(host) == 0) {
o.emplace(std::string{host});
} else {
nr_invalid_names++;
}
};
for_each_substr(allow_origins, ";,= \t", add_host);
if (o.empty() || nr_invalid_names > 0) {
return -EINVAL;
}
if (allow_headers) {
int nr_invalid_headers = 0;
auto add_header = [&nr_invalid_headers, &h] (auto allow_header) {
if (validate_name_string(allow_header) == 0) {
h.emplace(std::string{allow_header});
} else {
nr_invalid_headers++;
}
};
for_each_substr(allow_headers, ";,= \t", add_header);
if (h.empty() || nr_invalid_headers > 0) {
return -EINVAL;
}
}
if (expose_headers) {
for_each_substr(expose_headers, ";,= \t",
[&e] (auto expose_header) {
e.emplace_back(std::string(expose_header));
});
}
if (max_age) {
char *end = NULL;
a = strtoul(max_age, &end, 10);
if (a == ULONG_MAX)
a = CORS_MAX_AGE_INVALID;
}
RGWCORSRule rule(o, h, e, flags, a);
stack_rule(rule);
return 0;
}
};
| 2,339 | 26.857143 | 76 | h |
null | ceph-main/src/rgw/rgw_cr_rest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <mutex>
#include "include/ceph_assert.h" // boost header clobbers our assert.h
#include "rgw_coroutine.h"
#include "rgw_rest_conn.h"
struct rgw_rest_obj {
rgw_obj_key key;
uint64_t content_len;
std::map<std::string, std::string> attrs;
std::map<std::string, std::string> custom_attrs;
RGWAccessControlPolicy acls;
void init(const rgw_obj_key& _key) {
key = _key;
}
};
class RGWReadRawRESTResourceCR : public RGWSimpleCoroutine {
bufferlist *result;
protected:
RGWRESTConn *conn;
RGWHTTPManager *http_manager;
std::string path;
param_vec_t params;
param_vec_t extra_headers;
public:
boost::intrusive_ptr<RGWRESTReadResource> http_op;
RGWReadRawRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager, const std::string& _path,
rgw_http_param_pair *params, bufferlist *_result)
: RGWSimpleCoroutine(_cct), result(_result), conn(_conn), http_manager(_http_manager),
path(_path), params(make_param_list(params))
{}
RGWReadRawRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager, const std::string& _path,
rgw_http_param_pair *params)
: RGWSimpleCoroutine(_cct), conn(_conn), http_manager(_http_manager),
path(_path), params(make_param_list(params))
{}
RGWReadRawRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager, const std::string& _path,
rgw_http_param_pair *params, param_vec_t &hdrs)
: RGWSimpleCoroutine(_cct), conn(_conn), http_manager(_http_manager),
path(_path), params(make_param_list(params)),
extra_headers(hdrs)
{}
RGWReadRawRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager, const std::string& _path,
rgw_http_param_pair *params,
std::map <std::string, std::string> *hdrs)
: RGWSimpleCoroutine(_cct), conn(_conn), http_manager(_http_manager),
path(_path), params(make_param_list(params)),
extra_headers(make_param_list(hdrs))
{}
~RGWReadRawRESTResourceCR() override {
request_cleanup();
}
int send_request(const DoutPrefixProvider *dpp) override {
auto op = boost::intrusive_ptr<RGWRESTReadResource>(
new RGWRESTReadResource(conn, path, params, &extra_headers, http_manager));
init_new_io(op.get());
int ret = op->aio_read(dpp);
if (ret < 0) {
log_error() << "failed to send http operation: " << op->to_str()
<< " ret=" << ret << std::endl;
op->put();
return ret;
}
std::swap(http_op, op); // store reference in http_op on success
return 0;
}
virtual int wait_result() {
return http_op->wait(result, null_yield);
}
int request_complete() override {
int ret;
ret = wait_result();
auto op = std::move(http_op); // release ref on return
if (ret < 0) {
error_stream << "http operation failed: " << op->to_str()
<< " status=" << op->get_http_status() << std::endl;
op->put();
return ret;
}
op->put();
return 0;
}
void request_cleanup() override {
if (http_op) {
http_op->put();
http_op = NULL;
}
}
};
template <class T>
class RGWReadRESTResourceCR : public RGWReadRawRESTResourceCR {
T *result;
public:
RGWReadRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager, const std::string& _path,
rgw_http_param_pair *params, T *_result)
: RGWReadRawRESTResourceCR(_cct, _conn, _http_manager, _path, params), result(_result)
{}
RGWReadRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager, const std::string& _path,
rgw_http_param_pair *params,
std::map <std::string, std::string> *hdrs,
T *_result)
: RGWReadRawRESTResourceCR(_cct, _conn, _http_manager, _path, params, hdrs), result(_result)
{}
int wait_result() override {
return http_op->wait(result, null_yield);
}
};
template <class T, class E = int>
class RGWSendRawRESTResourceCR: public RGWSimpleCoroutine {
protected:
RGWRESTConn *conn;
RGWHTTPManager *http_manager;
std::string method;
std::string path;
param_vec_t params;
param_vec_t headers;
std::map<std::string, std::string> *attrs;
T *result;
E *err_result;
bufferlist input_bl;
bool send_content_length=false;
boost::intrusive_ptr<RGWRESTSendResource> http_op;
public:
RGWSendRawRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager,
const std::string& _method, const std::string& _path,
rgw_http_param_pair *_params,
std::map<std::string, std::string> *_attrs,
bufferlist& _input, T *_result,
bool _send_content_length,
E *_err_result = nullptr)
: RGWSimpleCoroutine(_cct), conn(_conn), http_manager(_http_manager),
method(_method), path(_path), params(make_param_list(_params)),
headers(make_param_list(_attrs)), attrs(_attrs),
result(_result), err_result(_err_result),
input_bl(_input), send_content_length(_send_content_length) {}
RGWSendRawRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager,
const std::string& _method, const std::string& _path,
rgw_http_param_pair *_params, std::map<std::string, std::string> *_attrs,
T *_result, E *_err_result = nullptr)
: RGWSimpleCoroutine(_cct), conn(_conn), http_manager(_http_manager),
method(_method), path(_path), params(make_param_list(_params)), headers(make_param_list(_attrs)), attrs(_attrs), result(_result),
err_result(_err_result) {}
~RGWSendRawRESTResourceCR() override {
request_cleanup();
}
int send_request(const DoutPrefixProvider *dpp) override {
auto op = boost::intrusive_ptr<RGWRESTSendResource>(
new RGWRESTSendResource(conn, method, path, params, &headers, http_manager));
init_new_io(op.get());
int ret = op->aio_send(dpp, input_bl);
if (ret < 0) {
ldpp_subdout(dpp, rgw, 0) << "ERROR: failed to send request" << dendl;
op->put();
return ret;
}
std::swap(http_op, op); // store reference in http_op on success
return 0;
}
int request_complete() override {
int ret;
if (result || err_result) {
ret = http_op->wait(result, null_yield, err_result);
} else {
bufferlist bl;
ret = http_op->wait(&bl, null_yield);
}
auto op = std::move(http_op); // release ref on return
if (ret < 0) {
error_stream << "http operation failed: " << op->to_str()
<< " status=" << op->get_http_status() << std::endl;
lsubdout(cct, rgw, 5) << "failed to wait for op, ret=" << ret
<< ": " << op->to_str() << dendl;
op->put();
return ret;
}
op->put();
return 0;
}
void request_cleanup() override {
if (http_op) {
http_op->put();
http_op = NULL;
}
}
};
template <class S, class T, class E = int>
class RGWSendRESTResourceCR : public RGWSendRawRESTResourceCR<T, E> {
public:
RGWSendRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager,
const std::string& _method, const std::string& _path,
rgw_http_param_pair *_params, std::map<std::string, std::string> *_attrs,
S& _input, T *_result, E *_err_result = nullptr)
: RGWSendRawRESTResourceCR<T, E>(_cct, _conn, _http_manager, _method, _path, _params, _attrs, _result, _err_result) {
JSONFormatter jf;
encode_json("data", _input, &jf);
std::stringstream ss;
jf.flush(ss);
//bufferlist bl;
this->input_bl.append(ss.str());
}
};
template <class S, class T, class E = int>
class RGWPostRESTResourceCR : public RGWSendRESTResourceCR<S, T, E> {
public:
RGWPostRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager,
const std::string& _path,
rgw_http_param_pair *_params, S& _input,
T *_result, E *_err_result = nullptr)
: RGWSendRESTResourceCR<S, T, E>(_cct, _conn, _http_manager,
"POST", _path,
_params, nullptr, _input,
_result, _err_result) {}
};
template <class T, class E = int>
class RGWPutRawRESTResourceCR: public RGWSendRawRESTResourceCR <T, E> {
public:
RGWPutRawRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager,
const std::string& _path,
rgw_http_param_pair *_params, bufferlist& _input,
T *_result, E *_err_result = nullptr)
: RGWSendRawRESTResourceCR<T, E>(_cct, _conn, _http_manager, "PUT", _path,
_params, nullptr, _input, _result, true, _err_result) {}
};
template <class T, class E = int>
class RGWPostRawRESTResourceCR: public RGWSendRawRESTResourceCR <T, E> {
public:
RGWPostRawRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager,
const std::string& _path,
rgw_http_param_pair *_params,
std::map<std::string, std::string> * _attrs,
bufferlist& _input,
T *_result, E *_err_result = nullptr)
: RGWSendRawRESTResourceCR<T, E>(_cct, _conn, _http_manager, "POST", _path,
_params, _attrs, _input, _result, true, _err_result) {}
};
template <class S, class T, class E = int>
class RGWPutRESTResourceCR : public RGWSendRESTResourceCR<S, T, E> {
public:
RGWPutRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager,
const std::string& _path,
rgw_http_param_pair *_params, S& _input,
T *_result, E *_err_result = nullptr)
: RGWSendRESTResourceCR<S, T, E>(_cct, _conn, _http_manager,
"PUT", _path,
_params, nullptr, _input,
_result, _err_result) {}
RGWPutRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager,
const std::string& _path,
rgw_http_param_pair *_params,
std::map<std::string, std::string> *_attrs,
S& _input, T *_result, E *_err_result = nullptr)
: RGWSendRESTResourceCR<S, T, E>(_cct, _conn, _http_manager,
"PUT", _path,
_params, _attrs, _input,
_result, _err_result) {}
};
class RGWDeleteRESTResourceCR : public RGWSimpleCoroutine {
RGWRESTConn *conn;
RGWHTTPManager *http_manager;
std::string path;
param_vec_t params;
boost::intrusive_ptr<RGWRESTDeleteResource> http_op;
public:
RGWDeleteRESTResourceCR(CephContext *_cct, RGWRESTConn *_conn,
RGWHTTPManager *_http_manager,
const std::string& _path,
rgw_http_param_pair *_params)
: RGWSimpleCoroutine(_cct), conn(_conn), http_manager(_http_manager),
path(_path), params(make_param_list(_params))
{}
~RGWDeleteRESTResourceCR() override {
request_cleanup();
}
int send_request(const DoutPrefixProvider *dpp) override {
auto op = boost::intrusive_ptr<RGWRESTDeleteResource>(
new RGWRESTDeleteResource(conn, path, params, nullptr, http_manager));
init_new_io(op.get());
bufferlist bl;
int ret = op->aio_send(dpp, bl);
if (ret < 0) {
ldpp_subdout(dpp, rgw, 0) << "ERROR: failed to send DELETE request" << dendl;
op->put();
return ret;
}
std::swap(http_op, op); // store reference in http_op on success
return 0;
}
int request_complete() override {
int ret;
bufferlist bl;
ret = http_op->wait(&bl, null_yield);
auto op = std::move(http_op); // release ref on return
if (ret < 0) {
error_stream << "http operation failed: " << op->to_str()
<< " status=" << op->get_http_status() << std::endl;
lsubdout(cct, rgw, 5) << "failed to wait for op, ret=" << ret
<< ": " << op->to_str() << dendl;
op->put();
return ret;
}
op->put();
return 0;
}
void request_cleanup() override {
if (http_op) {
http_op->put();
http_op = NULL;
}
}
};
class RGWCRHTTPGetDataCB : public RGWHTTPStreamRWRequest::ReceiveCB {
ceph::mutex lock = ceph::make_mutex("RGWCRHTTPGetDataCB");
RGWCoroutinesEnv *env;
RGWCoroutine *cr;
RGWHTTPStreamRWRequest *req;
rgw_io_id io_id;
bufferlist data;
bufferlist extra_data;
bool got_all_extra_data{false};
bool paused{false};
bool notified{false};
public:
RGWCRHTTPGetDataCB(RGWCoroutinesEnv *_env, RGWCoroutine *_cr, RGWHTTPStreamRWRequest *_req);
int handle_data(bufferlist& bl, bool *pause) override;
void claim_data(bufferlist *dest, uint64_t max);
bufferlist& get_extra_data() {
return extra_data;
}
bool has_data() {
return (data.length() > 0);
}
bool has_all_extra_data() {
return got_all_extra_data;
}
};
class RGWStreamReadResourceCRF {
protected:
boost::asio::coroutine read_state;
public:
virtual int init(const DoutPrefixProvider *dpp) = 0;
virtual int read(const DoutPrefixProvider *dpp, bufferlist *data, uint64_t max, bool *need_retry) = 0; /* reentrant */
virtual int decode_rest_obj(const DoutPrefixProvider *dpp, std::map<std::string, std::string>& headers, bufferlist& extra_data) = 0;
virtual bool has_attrs() = 0;
virtual void get_attrs(std::map<std::string, std::string> *attrs) = 0;
virtual ~RGWStreamReadResourceCRF() = default;
};
class RGWStreamWriteResourceCRF {
protected:
boost::asio::coroutine write_state;
boost::asio::coroutine drain_state;
public:
virtual int init() = 0;
virtual void send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) = 0;
virtual int send() = 0;
virtual int write(bufferlist& data, bool *need_retry) = 0; /* reentrant */
virtual int drain_writes(bool *need_retry) = 0; /* reentrant */
virtual ~RGWStreamWriteResourceCRF() = default;
};
class RGWStreamReadHTTPResourceCRF : public RGWStreamReadResourceCRF {
CephContext *cct;
RGWCoroutinesEnv *env;
RGWCoroutine *caller;
RGWHTTPManager *http_manager;
RGWHTTPStreamRWRequest *req{nullptr};
std::optional<RGWCRHTTPGetDataCB> in_cb;
bufferlist extra_data;
bool got_attrs{false};
bool got_extra_data{false};
rgw_io_id io_read_mask;
protected:
rgw_rest_obj rest_obj;
struct range_info {
bool is_set{false};
uint64_t ofs;
uint64_t size;
} range;
ceph::real_time mtime;
std::string etag;
public:
RGWStreamReadHTTPResourceCRF(CephContext *_cct,
RGWCoroutinesEnv *_env,
RGWCoroutine *_caller,
RGWHTTPManager *_http_manager,
const rgw_obj_key& _src_key) : cct(_cct),
env(_env),
caller(_caller),
http_manager(_http_manager) {
rest_obj.init(_src_key);
}
~RGWStreamReadHTTPResourceCRF();
int init(const DoutPrefixProvider *dpp) override;
int read(const DoutPrefixProvider *dpp, bufferlist *data, uint64_t max, bool *need_retry) override; /* reentrant */
int decode_rest_obj(const DoutPrefixProvider *dpp, std::map<std::string, std::string>& headers, bufferlist& extra_data) override;
bool has_attrs() override;
void get_attrs(std::map<std::string, std::string> *attrs) override;
bool is_done();
virtual bool need_extra_data() { return false; }
void set_req(RGWHTTPStreamRWRequest *r) {
req = r;
}
rgw_rest_obj& get_rest_obj() {
return rest_obj;
}
void set_range(uint64_t ofs, uint64_t size) {
range.is_set = true;
range.ofs = ofs;
range.size = size;
}
};
class RGWStreamWriteHTTPResourceCRF : public RGWStreamWriteResourceCRF {
protected:
RGWCoroutinesEnv *env;
RGWCoroutine *caller;
RGWHTTPManager *http_manager;
using lock_guard = std::lock_guard<std::mutex>;
std::mutex blocked_lock;
bool is_blocked;
RGWHTTPStreamRWRequest *req{nullptr};
struct multipart_info {
bool is_multipart{false};
std::string upload_id;
int part_num{0};
uint64_t part_size;
} multipart;
class WriteDrainNotify : public RGWWriteDrainCB {
RGWStreamWriteHTTPResourceCRF *crf;
public:
explicit WriteDrainNotify(RGWStreamWriteHTTPResourceCRF *_crf) : crf(_crf) {}
void notify(uint64_t pending_size) override;
} write_drain_notify_cb;
public:
RGWStreamWriteHTTPResourceCRF(CephContext *_cct,
RGWCoroutinesEnv *_env,
RGWCoroutine *_caller,
RGWHTTPManager *_http_manager) : env(_env),
caller(_caller),
http_manager(_http_manager),
write_drain_notify_cb(this) {}
virtual ~RGWStreamWriteHTTPResourceCRF();
int init() override {
return 0;
}
void send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) override;
int send() override;
int write(bufferlist& data, bool *need_retry) override; /* reentrant */
void write_drain_notify(uint64_t pending_size);
int drain_writes(bool *need_retry) override; /* reentrant */
virtual void handle_headers(const std::map<std::string, std::string>& headers) {}
void set_req(RGWHTTPStreamRWRequest *r) {
req = r;
}
void set_multipart(const std::string& upload_id, int part_num, uint64_t part_size) {
multipart.is_multipart = true;
multipart.upload_id = upload_id;
multipart.part_num = part_num;
multipart.part_size = part_size;
}
};
class RGWStreamSpliceCR : public RGWCoroutine {
CephContext *cct;
RGWHTTPManager *http_manager;
std::string url;
std::shared_ptr<RGWStreamReadHTTPResourceCRF> in_crf;
std::shared_ptr<RGWStreamWriteHTTPResourceCRF> out_crf;
bufferlist bl;
bool need_retry{false};
bool sent_attrs{false};
uint64_t total_read{0};
int ret{0};
public:
RGWStreamSpliceCR(CephContext *_cct, RGWHTTPManager *_mgr,
std::shared_ptr<RGWStreamReadHTTPResourceCRF>& _in_crf,
std::shared_ptr<RGWStreamWriteHTTPResourceCRF>& _out_crf);
~RGWStreamSpliceCR();
int operate(const DoutPrefixProvider *dpp) override;
};
| 19,624 | 32.20643 | 134 | h |
null | ceph-main/src/rgw/rgw_crypt.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/**
* Crypto filters for Put/Post/Get operations.
*/
#pragma once
#include <string_view>
#include <rgw/rgw_op.h>
#include <rgw/rgw_rest.h>
#include <rgw/rgw_rest_s3.h>
#include "rgw_putobj.h"
#include "common/async/yield_context.h"
/**
* \brief Interface for block encryption methods
*
* Encrypts and decrypts data.
* Operations are performed in context of larger stream being divided into blocks.
* Each block can be processed independently, but only as a whole.
* Part block cannot be properly processed.
* Each request must start on block-aligned offset.
* Each request should have length that is multiply of block size.
* Request with unaligned length is only acceptable for last part of stream.
*/
class BlockCrypt {
public:
BlockCrypt(){};
virtual ~BlockCrypt(){};
/**
* Determines size of encryption block.
* This is usually multiply of key size.
* It determines size of chunks that should be passed to \ref encrypt and \ref decrypt.
*/
virtual size_t get_block_size() = 0;
/**
* Encrypts data.
* Argument \ref stream_offset shows where in generalized stream chunk is located.
* Input for encryption is \ref input buffer, with relevant data in range <in_ofs, in_ofs+size).
* \ref input and \output may not be the same buffer.
*
* \params
* input - source buffer of data
* in_ofs - offset of chunk inside input
* size - size of chunk, must be chunk-aligned unless last part is processed
* output - destination buffer to encrypt to
* stream_offset - location of <in_ofs,in_ofs+size) chunk in data stream, must be chunk-aligned
* \return true iff successfully encrypted
*/
virtual bool encrypt(bufferlist& input,
off_t in_ofs,
size_t size,
bufferlist& output,
off_t stream_offset,
optional_yield y) = 0;
/**
* Decrypts data.
* Argument \ref stream_offset shows where in generalized stream chunk is located.
* Input for decryption is \ref input buffer, with relevant data in range <in_ofs, in_ofs+size).
* \ref input and \output may not be the same buffer.
*
* \params
* input - source buffer of data
* in_ofs - offset of chunk inside input
* size - size of chunk, must be chunk-aligned unless last part is processed
* output - destination buffer to encrypt to
* stream_offset - location of <in_ofs,in_ofs+size) chunk in data stream, must be chunk-aligned
* \return true iff successfully encrypted
*/
virtual bool decrypt(bufferlist& input,
off_t in_ofs,
size_t size,
bufferlist& output,
off_t stream_offset,
optional_yield y) = 0;
};
static const size_t AES_256_KEYSIZE = 256 / 8;
bool AES_256_ECB_encrypt(const DoutPrefixProvider* dpp,
CephContext* cct,
const uint8_t* key,
size_t key_size,
const uint8_t* data_in,
uint8_t* data_out,
size_t data_size);
class RGWGetObj_BlockDecrypt : public RGWGetObj_Filter {
const DoutPrefixProvider *dpp;
CephContext* cct;
std::unique_ptr<BlockCrypt> crypt; /**< already configured stateless BlockCrypt
for operations when enough data is accumulated */
off_t enc_begin_skip; /**< amount of data to skip from beginning of received data */
off_t ofs; /**< stream offset of data we expect to show up next through \ref handle_data */
off_t end; /**< stream offset of last byte that is requested */
bufferlist cache; /**< stores extra data that could not (yet) be processed by BlockCrypt */
size_t block_size; /**< snapshot of \ref BlockCrypt.get_block_size() */
optional_yield y;
int process(bufferlist& cipher, size_t part_ofs, size_t size);
protected:
std::vector<size_t> parts_len; /**< size of parts of multipart object, parsed from manifest */
public:
RGWGetObj_BlockDecrypt(const DoutPrefixProvider *dpp,
CephContext* cct,
RGWGetObj_Filter* next,
std::unique_ptr<BlockCrypt> crypt,
optional_yield y);
virtual ~RGWGetObj_BlockDecrypt();
virtual int fixup_range(off_t& bl_ofs,
off_t& bl_end) override;
virtual int handle_data(bufferlist& bl,
off_t bl_ofs,
off_t bl_len) override;
virtual int flush() override;
int read_manifest(const DoutPrefixProvider *dpp, bufferlist& manifest_bl);
}; /* RGWGetObj_BlockDecrypt */
class RGWPutObj_BlockEncrypt : public rgw::putobj::Pipe
{
const DoutPrefixProvider *dpp;
CephContext* cct;
std::unique_ptr<BlockCrypt> crypt; /**< already configured stateless BlockCrypt
for operations when enough data is accumulated */
bufferlist cache; /**< stores extra data that could not (yet) be processed by BlockCrypt */
const size_t block_size; /**< snapshot of \ref BlockCrypt.get_block_size() */
optional_yield y;
public:
RGWPutObj_BlockEncrypt(const DoutPrefixProvider *dpp,
CephContext* cct,
rgw::sal::DataProcessor *next,
std::unique_ptr<BlockCrypt> crypt,
optional_yield y);
int process(bufferlist&& data, uint64_t logical_offset) override;
}; /* RGWPutObj_BlockEncrypt */
int rgw_s3_prepare_encrypt(req_state* s,
std::map<std::string, ceph::bufferlist>& attrs,
std::unique_ptr<BlockCrypt>* block_crypt,
std::map<std::string,
std::string>& crypt_http_responses);
int rgw_s3_prepare_decrypt(req_state* s,
std::map<std::string, ceph::bufferlist>& attrs,
std::unique_ptr<BlockCrypt>* block_crypt,
std::map<std::string,
std::string>& crypt_http_responses);
static inline void set_attr(std::map<std::string, bufferlist>& attrs,
const char* key,
std::string_view value)
{
bufferlist bl;
bl.append(value.data(), value.size());
attrs[key] = std::move(bl);
}
static inline std::string get_str_attribute(std::map<std::string, bufferlist>& attrs,
const char *name)
{
auto iter = attrs.find(name);
if (iter == attrs.end()) {
return {};
}
return iter->second.to_str();
}
int rgw_remove_sse_s3_bucket_key(req_state *s);
| 6,873 | 37.188889 | 98 | h |
null | ceph-main/src/rgw/rgw_crypt_sanitize.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <string_view>
#include "rgw_common.h"
namespace rgw {
namespace crypt_sanitize {
/*
* Temporary container for suppressing printing if variable contains secret key.
*/
struct env {
std::string_view name;
std::string_view value;
env(std::string_view name, std::string_view value)
: name(name), value(value) {}
};
/*
* Temporary container for suppressing printing if aws meta attributes contains secret key.
*/
struct x_meta_map {
std::string_view name;
std::string_view value;
x_meta_map(std::string_view name, std::string_view value)
: name(name), value(value) {}
};
/*
* Temporary container for suppressing printing if s3_policy calculation variable contains secret key.
*/
struct s3_policy {
std::string_view name;
std::string_view value;
s3_policy(std::string_view name, std::string_view value)
: name(name), value(value) {}
};
/*
* Temporary container for suppressing printing if auth string contains secret key.
*/
struct auth {
const req_state* const s;
std::string_view value;
auth(const req_state* const s, std::string_view value)
: s(s), value(value) {}
};
/*
* Temporary container for suppressing printing if log made from civetweb may contain secret key.
*/
struct log_content {
const std::string_view buf;
explicit log_content(const std::string_view buf)
: buf(buf) {}
};
std::ostream& operator<<(std::ostream& out, const env& e);
std::ostream& operator<<(std::ostream& out, const x_meta_map& x);
std::ostream& operator<<(std::ostream& out, const s3_policy& x);
std::ostream& operator<<(std::ostream& out, const auth& x);
std::ostream& operator<<(std::ostream& out, const log_content& x);
}
}
| 1,789 | 24.942029 | 102 | h |
null | ceph-main/src/rgw/rgw_dmclock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
* Copyright (C) 2019 SUSE LLC
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "dmclock/src/dmclock_server.h"
namespace rgw::dmclock {
// TODO: implement read vs write
enum class client_id {
admin, //< /admin apis
auth, //< swift auth, sts
data, //< PutObj, GetObj
metadata, //< bucket operations, object metadata
count
};
// TODO move these to dmclock/types or so in submodule
using crimson::dmclock::Cost;
using crimson::dmclock::ClientInfo;
enum class scheduler_t {
none,
throttler,
dmclock
};
inline scheduler_t get_scheduler_t(CephContext* const cct)
{
const auto scheduler_type = cct->_conf.get_val<std::string>("rgw_scheduler_type");
if (scheduler_type == "dmclock")
return scheduler_t::dmclock;
else if (scheduler_type == "throttler")
return scheduler_t::throttler;
else
return scheduler_t::none;
}
} // namespace rgw::dmclock
| 1,430 | 26 | 84 | h |
null | ceph-main/src/rgw/rgw_dmclock_async_scheduler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "common/async/completion.h"
#include <boost/asio.hpp>
#include "rgw_dmclock_scheduler.h"
#include "rgw_dmclock_scheduler_ctx.h"
namespace rgw::dmclock {
namespace async = ceph::async;
/*
* A dmclock request scheduling service for use with boost::asio.
*
* An asynchronous dmclock priority queue, where scheduled requests complete
* on a boost::asio executor.
*/
class AsyncScheduler : public md_config_obs_t, public Scheduler {
public:
template <typename ...Args> // args forwarded to PullPriorityQueue ctor
AsyncScheduler(CephContext *cct, boost::asio::io_context& context,
GetClientCounters&& counters, md_config_obs_t *observer,
Args&& ...args);
~AsyncScheduler();
using executor_type = boost::asio::io_context::executor_type;
/// return the default executor for async_request() callbacks
executor_type get_executor() noexcept {
return timer.get_executor();
}
/// submit an async request for dmclock scheduling. the given completion
/// handler will be invoked with (error_code, PhaseType) when the request
/// is ready or canceled. on success, this grants a throttle unit that must
/// be returned with a call to request_complete()
template <typename CompletionToken>
auto async_request(const client_id& client, const ReqParams& params,
const Time& time, Cost cost, CompletionToken&& token);
/// returns a throttle unit granted by async_request()
void request_complete() override;
/// cancel all queued requests, invoking their completion handlers with an
/// operation_aborted error and default-constructed result
void cancel();
/// cancel all queued requests for a given client, invoking their completion
/// handler with an operation_aborted error and default-constructed result
void cancel(const client_id& client);
const char** get_tracked_conf_keys() const override;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string>& changed) override;
private:
int schedule_request_impl(const client_id& client, const ReqParams& params,
const Time& time, const Cost& cost,
optional_yield yield_ctx) override;
static constexpr bool IsDelayed = false;
using Queue = crimson::dmclock::PullPriorityQueue<client_id, Request, IsDelayed>;
using RequestRef = typename Queue::RequestRef;
Queue queue; //< dmclock priority queue
using Signature = void(boost::system::error_code, PhaseType);
using Completion = async::Completion<Signature, async::AsBase<Request>>;
using Clock = ceph::coarse_real_clock;
using Timer = boost::asio::basic_waitable_timer<Clock,
boost::asio::wait_traits<Clock>, executor_type>;
Timer timer; //< timer for the next scheduled request
CephContext *const cct;
md_config_obs_t *const observer; //< observer to update ClientInfoFunc
GetClientCounters counters; //< provides per-client perf counters
/// max request throttle
std::atomic<int64_t> max_requests;
std::atomic<int64_t> outstanding_requests = 0;
/// set a timer to process the next request
void schedule(const Time& time);
/// process ready requests, then schedule the next pending request
void process(const Time& now);
};
template <typename ...Args>
AsyncScheduler::AsyncScheduler(CephContext *cct, boost::asio::io_context& context,
GetClientCounters&& counters,
md_config_obs_t *observer, Args&& ...args)
: queue(std::forward<Args>(args)...),
timer(context), cct(cct), observer(observer),
counters(std::move(counters)),
max_requests(cct->_conf.get_val<int64_t>("rgw_max_concurrent_requests"))
{
if (max_requests <= 0) {
max_requests = std::numeric_limits<int64_t>::max();
}
if (observer) {
cct->_conf.add_observer(this);
}
}
template <typename CompletionToken>
auto AsyncScheduler::async_request(const client_id& client,
const ReqParams& params,
const Time& time, Cost cost,
CompletionToken&& token)
{
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto ex1 = get_executor();
auto& handler = init.completion_handler;
// allocate the Request and add it to the queue
auto completion = Completion::create(ex1, std::move(handler),
Request{client, time, cost});
// cast to unique_ptr<Request>
auto req = RequestRef{std::move(completion)};
int r = queue.add_request(std::move(req), client, params, time, cost);
if (r == 0) {
// schedule an immediate call to process() on the executor
schedule(crimson::dmclock::TimeZero);
if (auto c = counters(client)) {
c->inc(queue_counters::l_qlen);
c->inc(queue_counters::l_cost, cost);
}
} else {
// post the error code
boost::system::error_code ec(r, boost::system::system_category());
// cast back to Completion
auto completion = static_cast<Completion*>(req.release());
async::post(std::unique_ptr<Completion>{completion},
ec, PhaseType::priority);
if (auto c = counters(client)) {
c->inc(queue_counters::l_limit);
c->inc(queue_counters::l_limit_cost, cost);
}
}
return init.result.get();
}
class SimpleThrottler : public md_config_obs_t, public dmclock::Scheduler {
public:
SimpleThrottler(CephContext *cct) :
max_requests(cct->_conf.get_val<int64_t>("rgw_max_concurrent_requests")),
counters(cct, "simple-throttler")
{
if (max_requests <= 0) {
max_requests = std::numeric_limits<int64_t>::max();
}
cct->_conf.add_observer(this);
}
const char** get_tracked_conf_keys() const override {
static const char* keys[] = { "rgw_max_concurrent_requests", nullptr };
return keys;
}
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string>& changed) override
{
if (changed.count("rgw_max_concurrent_requests")) {
auto new_max = conf.get_val<int64_t>("rgw_max_concurrent_requests");
max_requests = new_max > 0 ? new_max : std::numeric_limits<int64_t>::max();
}
}
void request_complete() override {
--outstanding_requests;
if (auto c = counters();
c != nullptr) {
c->inc(throttle_counters::l_outstanding, -1);
}
}
private:
int schedule_request_impl(const client_id&, const ReqParams&,
const Time&, const Cost&,
optional_yield) override {
if (outstanding_requests++ >= max_requests) {
if (auto c = counters();
c != nullptr) {
c->inc(throttle_counters::l_outstanding);
c->inc(throttle_counters::l_throttle);
}
return -EAGAIN;
}
return 0 ;
}
std::atomic<int64_t> max_requests;
std::atomic<int64_t> outstanding_requests = 0;
ThrottleCounters counters;
};
} // namespace rgw::dmclock
| 7,424 | 33.059633 | 83 | h |
null | ceph-main/src/rgw/rgw_dmclock_scheduler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
* (C) 2019 SUSE LLC
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "common/ceph_time.h"
#include "common/ceph_context.h"
#include "common/config.h"
#include "common/async/yield_context.h"
#include "rgw_dmclock.h"
namespace rgw::dmclock {
using crimson::dmclock::ReqParams;
using crimson::dmclock::PhaseType;
using crimson::dmclock::AtLimit;
using crimson::dmclock::Time;
using crimson::dmclock::get_time;
/// function to provide client counters
using GetClientCounters = std::function<PerfCounters*(client_id)>;
struct Request {
client_id client;
Time started;
Cost cost;
};
enum class ReqState {
Wait,
Ready,
Cancelled
};
template <typename F>
class Completer {
public:
Completer(F &&f): f(std::move(f)) {}
// Default constructor is needed as we need to create an empty completer
// that'll be move assigned later in process request
Completer() = default;
~Completer() {
if (f) {
f();
}
}
Completer(const Completer&) = delete;
Completer& operator=(const Completer&) = delete;
Completer(Completer&& other) = default;
Completer& operator=(Completer&& other) = default;
private:
F f;
};
using SchedulerCompleter = Completer<std::function<void()>>;
class Scheduler {
public:
auto schedule_request(const client_id& client, const ReqParams& params,
const Time& time, const Cost& cost,
optional_yield yield)
{
int r = schedule_request_impl(client,params,time,cost,yield);
return std::make_pair(r,SchedulerCompleter(std::bind(&Scheduler::request_complete,this)));
}
virtual void request_complete() {};
virtual ~Scheduler() {};
private:
virtual int schedule_request_impl(const client_id&, const ReqParams&,
const Time&, const Cost&,
optional_yield) = 0;
};
} // namespace rgw::dmclock
| 2,173 | 23.988506 | 94 | h |
null | ceph-main/src/rgw/rgw_dmclock_scheduler_ctx.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "common/perf_counters.h"
#include "common/ceph_context.h"
#include "common/config.h"
#include "rgw_dmclock.h"
namespace queue_counters {
enum {
l_first = 427150,
l_qlen,
l_cost,
l_res,
l_res_cost,
l_prio,
l_prio_cost,
l_limit,
l_limit_cost,
l_cancel,
l_cancel_cost,
l_res_latency,
l_prio_latency,
l_last,
};
PerfCountersRef build(CephContext *cct, const std::string& name);
} // namespace queue_counters
namespace throttle_counters {
enum {
l_first = 437219,
l_throttle,
l_outstanding,
l_last
};
PerfCountersRef build(CephContext *cct, const std::string& name);
} // namespace throttle
namespace rgw::dmclock {
// the last client counter would be for global scheduler stats
static constexpr auto counter_size = static_cast<size_t>(client_id::count) + 1;
/// array of per-client counters to serve as GetClientCounters
class ClientCounters {
std::array<PerfCountersRef, counter_size> clients;
public:
ClientCounters(CephContext *cct);
PerfCounters* operator()(client_id client) const {
return clients[static_cast<size_t>(client)].get();
}
};
class ThrottleCounters {
PerfCountersRef counters;
public:
ThrottleCounters(CephContext* const cct,const std::string& name):
counters(throttle_counters::build(cct, name)) {}
PerfCounters* operator()() const {
return counters.get();
}
};
struct ClientSum {
uint64_t count{0};
Cost cost{0};
};
constexpr auto client_count = static_cast<size_t>(client_id::count);
using ClientSums = std::array<ClientSum, client_count>;
void inc(ClientSums& sums, client_id client, Cost cost);
void on_cancel(PerfCounters *c, const ClientSum& sum);
void on_process(PerfCounters* c, const ClientSum& rsum, const ClientSum& psum);
class ClientConfig : public md_config_obs_t {
std::vector<ClientInfo> clients;
void update(const ConfigProxy &conf);
public:
ClientConfig(CephContext *cct);
ClientInfo* operator()(client_id client);
const char** get_tracked_conf_keys() const override;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string>& changed) override;
};
class SchedulerCtx {
public:
SchedulerCtx(CephContext* const cct) : sched_t(get_scheduler_t(cct))
{
if(sched_t == scheduler_t::dmclock) {
dmc_client_config = std::make_shared<ClientConfig>(cct);
// we don't have a move only cref std::function yet
dmc_client_counters = std::make_optional<ClientCounters>(cct);
}
}
// We need to construct a std::function from a NonCopyable object
ClientCounters& get_dmc_client_counters() { return dmc_client_counters.value(); }
ClientConfig* const get_dmc_client_config() const { return dmc_client_config.get(); }
private:
scheduler_t sched_t;
std::shared_ptr<ClientConfig> dmc_client_config {nullptr};
std::optional<ClientCounters> dmc_client_counters {std::nullopt};
};
} // namespace rgw::dmclock
| 3,159 | 25.333333 | 87 | h |
null | ceph-main/src/rgw/rgw_dmclock_sync_scheduler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 SUSE Linux Gmbh
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_dmclock_scheduler.h"
#include "rgw_dmclock_scheduler_ctx.h"
namespace rgw::dmclock {
// For a blocking SyncRequest we hold a reference to a cv and the caller must
// ensure the lifetime
struct SyncRequest : public Request {
std::mutex& req_mtx;
std::condition_variable& req_cv;
ReqState& req_state;
GetClientCounters& counters;
explicit SyncRequest(client_id _id, Time started, Cost cost,
std::mutex& mtx, std::condition_variable& _cv,
ReqState& _state, GetClientCounters& counters):
Request{_id, started, cost}, req_mtx(mtx), req_cv(_cv), req_state(_state), counters(counters) {};
};
class SyncScheduler: public Scheduler {
public:
template <typename ...Args>
SyncScheduler(CephContext *cct, GetClientCounters&& counters,
Args&& ...args);
~SyncScheduler();
// submit a blocking request for dmclock scheduling, this function waits until
// the request is ready.
int add_request(const client_id& client, const ReqParams& params,
const Time& time, Cost cost);
void cancel();
void cancel(const client_id& client);
static void handle_request_cb(const client_id& c, std::unique_ptr<SyncRequest> req,
PhaseType phase, Cost cost);
private:
int schedule_request_impl(const client_id& client, const ReqParams& params,
const Time& time, const Cost& cost,
optional_yield _y [[maybe_unused]]) override
{
return add_request(client, params, time, cost);
}
static constexpr bool IsDelayed = false;
using Queue = crimson::dmclock::PushPriorityQueue<client_id, SyncRequest, IsDelayed>;
using RequestRef = typename Queue::RequestRef;
using Clock = ceph::coarse_real_clock;
Queue queue;
CephContext const *cct;
GetClientCounters counters; //< provides per-client perf counters
};
template <typename ...Args>
SyncScheduler::SyncScheduler(CephContext *cct, GetClientCounters&& counters,
Args&& ...args):
queue(std::forward<Args>(args)...), cct(cct), counters(std::move(counters))
{}
} // namespace rgw::dmclock
| 2,478 | 30.782051 | 101 | h |
null | ceph-main/src/rgw/rgw_es_query.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_string.h"
class ESQueryStack {
std::list<std::string> l;
std::list<std::string>::iterator iter;
public:
explicit ESQueryStack(std::list<std::string>& src) {
assign(src);
}
ESQueryStack() {}
void assign(std::list<std::string>& src) {
l.swap(src);
iter = l.begin();
}
bool peek(std::string *dest) {
if (done()) {
return false;
}
*dest = *iter;
return true;
}
bool pop(std::string *dest) {
bool valid = peek(dest);
if (!valid) {
return false;
}
++iter;
return true;
}
bool done() {
return (iter == l.end());
}
};
class ESInfixQueryParser {
std::string query;
int size;
const char *str;
int pos{0};
std::list<std::string> args;
void skip_whitespace(const char *str, int size, int& pos);
bool get_next_token(bool (*filter)(char));
bool parse_condition();
bool parse_and_or();
bool parse_specific_char(const char *pchar);
bool parse_open_bracket();
bool parse_close_bracket();
public:
explicit ESInfixQueryParser(const std::string& _query) : query(_query), size(query.size()), str(query.c_str()) {}
bool parse(std::list<std::string> *result);
};
class ESQueryNode;
struct ESEntityTypeMap {
enum EntityType {
ES_ENTITY_NONE = 0,
ES_ENTITY_STR = 1,
ES_ENTITY_INT = 2,
ES_ENTITY_DATE = 3,
};
std::map<std::string, EntityType> m;
explicit ESEntityTypeMap(std::map<std::string, EntityType>& _m) : m(_m) {}
bool find(const std::string& entity, EntityType *ptype) {
auto i = m.find(entity);
if (i != m.end()) {
*ptype = i->second;
return true;
}
*ptype = ES_ENTITY_NONE;
return false;
}
};
class ESQueryCompiler {
ESInfixQueryParser parser;
ESQueryStack stack;
ESQueryNode *query_root{nullptr};
std::string custom_prefix;
bool convert(std::list<std::string>& infix, std::string *perr);
std::list<std::pair<std::string, std::string> > eq_conds;
ESEntityTypeMap *generic_type_map{nullptr};
ESEntityTypeMap *custom_type_map{nullptr};
std::map<std::string, std::string, ltstr_nocase> *field_aliases = nullptr;
std::set<std::string> *restricted_fields = nullptr;
public:
ESQueryCompiler(const std::string& query,
std::list<std::pair<std::string, std::string> > *prepend_eq_conds,
const std::string& _custom_prefix)
: parser(query), custom_prefix(_custom_prefix) {
if (prepend_eq_conds) {
eq_conds = std::move(*prepend_eq_conds);
}
}
~ESQueryCompiler();
bool compile(std::string *perr);
void dump(Formatter *f) const;
void set_generic_type_map(ESEntityTypeMap *entity_map) {
generic_type_map = entity_map;
}
ESEntityTypeMap *get_generic_type_map() {
return generic_type_map;
}
const std::string& get_custom_prefix() { return custom_prefix; }
void set_custom_type_map(ESEntityTypeMap *entity_map) {
custom_type_map = entity_map;
}
ESEntityTypeMap *get_custom_type_map() {
return custom_type_map;
}
void set_field_aliases(std::map<std::string, std::string, ltstr_nocase> *fa) {
field_aliases = fa;
}
std::string unalias_field(const std::string& field) {
if (!field_aliases) {
return field;
}
auto i = field_aliases->find(field);
if (i == field_aliases->end()) {
return field;
}
return i->second;
}
void set_restricted_fields(std::set<std::string> *rf) {
restricted_fields = rf;
}
bool is_restricted(const std::string& f) {
return (restricted_fields && restricted_fields->find(f) != restricted_fields->end());
}
};
| 3,705 | 21.460606 | 115 | h |
null | ceph-main/src/rgw/rgw_flight.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright 2023 IBM
*
* See file COPYING for licensing information.
*/
#pragma once
#include <map>
#include <mutex>
#include <atomic>
#include "include/common_fwd.h"
#include "common/ceph_context.h"
#include "common/Thread.h"
#include "common/ceph_time.h"
#include "rgw_frontend.h"
#include "arrow/type.h"
#include "arrow/flight/server.h"
#include "arrow/util/string_view.h"
#include "rgw_flight_frontend.h"
#define INFO_F(dp) ldpp_dout(&dp, 20) << "INFO: " << __func__ << ": "
#define STATUS_F(dp) ldpp_dout(&dp, 10) << "STATUS: " << __func__ << ": "
#define WARN_F(dp) ldpp_dout(&dp, 0) << "WARNING: " << __func__ << ": "
#define ERROR_F(dp) ldpp_dout(&dp, 0) << "ERROR: " << __func__ << ": "
#define INFO INFO_F(dp)
#define STATUS STATUS_F(dp)
#define WARN WARN_F(dp)
#define ERROR ERROR_F(dp)
namespace arw = arrow;
namespace flt = arrow::flight;
struct req_state;
namespace rgw::flight {
static const coarse_real_clock::duration lifespan = std::chrono::hours(1);
struct FlightData {
FlightKey key;
// coarse_real_clock::time_point expires;
std::string uri;
std::string tenant_name;
std::string bucket_name;
rgw_obj_key object_key;
// NB: what about object's namespace and instance?
uint64_t num_records;
uint64_t obj_size;
std::shared_ptr<arw::Schema> schema;
std::shared_ptr<const arw::KeyValueMetadata> kv_metadata;
rgw_user user_id; // TODO: this should be removed when we do
// proper flight authentication
FlightData(const std::string& _uri,
const std::string& _tenant_name,
const std::string& _bucket_name,
const rgw_obj_key& _object_key,
uint64_t _num_records,
uint64_t _obj_size,
std::shared_ptr<arw::Schema>& _schema,
std::shared_ptr<const arw::KeyValueMetadata>& _kv_metadata,
rgw_user _user_id);
};
// stores flights that have been created and helps expire them
class FlightStore {
protected:
const DoutPrefix& dp;
public:
FlightStore(const DoutPrefix& dp);
virtual ~FlightStore();
virtual FlightKey add_flight(FlightData&& flight) = 0;
// TODO consider returning const shared pointers to FlightData in
// the following two functions
virtual arw::Result<FlightData> get_flight(const FlightKey& key) const = 0;
virtual std::optional<FlightData> after_key(const FlightKey& key) const = 0;
virtual int remove_flight(const FlightKey& key) = 0;
virtual int expire_flights() = 0;
};
class MemoryFlightStore : public FlightStore {
std::map<FlightKey, FlightData> map;
mutable std::mutex mtx; // for map
public:
MemoryFlightStore(const DoutPrefix& dp);
virtual ~MemoryFlightStore();
FlightKey add_flight(FlightData&& flight) override;
arw::Result<FlightData> get_flight(const FlightKey& key) const override;
std::optional<FlightData> after_key(const FlightKey& key) const override;
int remove_flight(const FlightKey& key) override;
int expire_flights() override;
};
class FlightServer : public flt::FlightServerBase {
using Data1 = std::vector<std::shared_ptr<arw::RecordBatch>>;
RGWProcessEnv& env;
rgw::sal::Driver* driver;
const DoutPrefix& dp;
FlightStore* flight_store;
std::map<std::string, Data1> data;
public:
static constexpr int default_port = 8077;
FlightServer(RGWProcessEnv& env,
FlightStore* flight_store,
const DoutPrefix& dp);
~FlightServer() override;
FlightStore* get_flight_store() {
return flight_store;
}
arw::Status ListFlights(const flt::ServerCallContext& context,
const flt::Criteria* criteria,
std::unique_ptr<flt::FlightListing>* listings) override;
arw::Status GetFlightInfo(const flt::ServerCallContext &context,
const flt::FlightDescriptor &request,
std::unique_ptr<flt::FlightInfo> *info) override;
arw::Status GetSchema(const flt::ServerCallContext &context,
const flt::FlightDescriptor &request,
std::unique_ptr<flt::SchemaResult> *schema) override;
arw::Status DoGet(const flt::ServerCallContext &context,
const flt::Ticket &request,
std::unique_ptr<flt::FlightDataStream> *stream) override;
}; // class FlightServer
class OwningStringView : public arw::util::string_view {
uint8_t* buffer;
int64_t capacity;
int64_t consumed;
OwningStringView(uint8_t* _buffer, int64_t _size) :
arw::util::string_view((const char*) _buffer, _size),
buffer(_buffer),
capacity(_size),
consumed(_size)
{ }
OwningStringView(OwningStringView&& from, int64_t new_size) :
buffer(nullptr),
capacity(from.capacity),
consumed(new_size)
{
// should be impossible due to static function check
ceph_assertf(consumed <= capacity, "new size cannot exceed capacity");
std::swap(buffer, from.buffer);
from.capacity = 0;
from.consumed = 0;
}
public:
OwningStringView(OwningStringView&&) = default;
OwningStringView& operator=(OwningStringView&&) = default;
uint8_t* writeable_data() {
return buffer;
}
~OwningStringView() {
if (buffer) {
delete[] buffer;
}
}
static arw::Result<OwningStringView> make(int64_t size) {
uint8_t* buffer = new uint8_t[size];
if (!buffer) {
return arw::Status::OutOfMemory("could not allocated buffer of size %" PRId64, size);
}
return OwningStringView(buffer, size);
}
static arw::Result<OwningStringView> shrink(OwningStringView&& from,
int64_t new_size) {
if (new_size > from.capacity) {
return arw::Status::Invalid("new size cannot exceed capacity");
} else {
return OwningStringView(std::move(from), new_size);
}
}
};
// GLOBAL
flt::Ticket FlightKeyToTicket(const FlightKey& key);
arw::Status TicketToFlightKey(const flt::Ticket& t, FlightKey& key);
} // namespace rgw::flight
| 5,924 | 25.689189 | 91 | h |
null | ceph-main/src/rgw/rgw_flight_frontend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright 2023 IBM
*
* See file COPYING for licensing information.
*/
#pragma once
#include "include/common_fwd.h"
#include "common/Thread.h"
#include "rgw_frontend.h"
#include "rgw_op.h"
#include "arrow/status.h"
namespace rgw::flight {
using FlightKey = uint32_t;
extern const FlightKey null_flight_key;
class FlightServer;
class FlightFrontend : public RGWFrontend {
static constexpr std::string_view server_thread_name =
"Arrow Flight Server thread";
RGWProcessEnv& env;
std::thread flight_thread;
RGWFrontendConfig* config;
int port;
const DoutPrefix dp;
public:
// port <= 0 means let server decide; typically 8077
FlightFrontend(RGWProcessEnv& env,
RGWFrontendConfig* config,
int port = -1);
~FlightFrontend() override;
int init() override;
int run() override;
void stop() override;
void join() override;
void pause_for_new_config() override;
void unpause_with_new_config() override;
}; // class FlightFrontend
class FlightGetObj_Filter : public RGWGetObj_Filter {
const RGWProcessEnv& penv;
const DoutPrefix dp;
FlightKey key;
uint64_t current_offset;
uint64_t expected_size;
std::string uri;
std::string tenant_name;
std::string bucket_name;
rgw_obj_key object_key;
std::string temp_file_name;
std::ofstream temp_file;
arrow::Status schema_status;
rgw_user user_id; // TODO: this should be removed when we do
// proper flight authentication
public:
FlightGetObj_Filter(const req_state* request, RGWGetObj_Filter* next);
~FlightGetObj_Filter();
int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override;
#if 0
// this would allow the range to be modified if necessary;
int fixup_range(off_t& ofs, off_t& end) override;
#endif
};
} // namespace rgw::flight
| 1,934 | 21.241379 | 72 | h |
null | ceph-main/src/rgw/rgw_formats.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "common/Formatter.h"
#include <list>
#include <stdint.h>
#include <string>
#include <ostream>
struct plain_stack_entry {
int size;
bool is_array;
};
/* FIXME: this class is mis-named.
* FIXME: This was a hack to send certain swift messages.
* There is a much better way to do this.
*/
class RGWFormatter_Plain : public Formatter {
void reset_buf();
public:
explicit RGWFormatter_Plain(bool use_kv = false);
~RGWFormatter_Plain() override;
void set_status(int status, const char* status_name) override {};
void output_header() override {};
void output_footer() override {};
void enable_line_break() override {};
void flush(std::ostream& os) override;
void reset() override;
void open_array_section(std::string_view name) override;
void open_array_section_in_ns(std::string_view name, const char *ns) override;
void open_object_section(std::string_view name) override;
void open_object_section_in_ns(std::string_view name, const char *ns) override;
void close_section() override;
void dump_unsigned(std::string_view name, uint64_t u) override;
void dump_int(std::string_view name, int64_t u) override;
void dump_float(std::string_view name, double d) override;
void dump_string(std::string_view name, std::string_view s) override;
std::ostream& dump_stream(std::string_view name) override;
void dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap) override;
int get_len() const override;
void write_raw_data(const char *data) override;
private:
void write_data(const char *fmt, ...);
void dump_value_int(std::string_view name, const char *fmt, ...);
char *buf = nullptr;
int len = 0;
int max_len = 0;
std::list<struct plain_stack_entry> stack;
size_t min_stack_level = 0;
bool use_kv;
bool wrote_something = 0;
};
/* This is a presentation layer. No logic inside, please. */
class RGWSwiftWebsiteListingFormatter {
std::ostream& ss;
const std::string prefix;
protected:
std::string format_name(const std::string& item_name) const;
public:
RGWSwiftWebsiteListingFormatter(std::ostream& ss,
std::string prefix)
: ss(ss),
prefix(std::move(prefix)) {
}
/* The supplied css_path can be empty. In such situation a default,
* embedded style sheet will be generated. */
void generate_header(const std::string& dir_path,
const std::string& css_path);
void generate_footer();
void dump_object(const rgw_bucket_dir_entry& objent);
void dump_subdir(const std::string& name);
};
class RGWFormatterFlusher {
protected:
Formatter *formatter;
bool flushed;
bool started;
virtual void do_flush() = 0;
virtual void do_start(int ret) {}
void set_formatter(Formatter *f) {
formatter = f;
}
public:
explicit RGWFormatterFlusher(Formatter *f) : formatter(f), flushed(false), started(false) {}
virtual ~RGWFormatterFlusher() {}
void flush() {
do_flush();
flushed = true;
}
virtual void start(int client_ret) {
if (!started)
do_start(client_ret);
started = true;
}
Formatter *get_formatter() { return formatter; }
bool did_flush() { return flushed; }
bool did_start() { return started; }
};
class RGWStreamFlusher : public RGWFormatterFlusher {
std::ostream& os;
protected:
void do_flush() override {
formatter->flush(os);
}
public:
RGWStreamFlusher(Formatter *f, std::ostream& _os) : RGWFormatterFlusher(f), os(_os) {}
};
class RGWNullFlusher : public RGWFormatterFlusher {
protected:
void do_flush() override {
}
public:
RGWNullFlusher() : RGWFormatterFlusher(nullptr) {}
};
| 3,776 | 27.186567 | 112 | h |
null | ceph-main/src/rgw/rgw_frontend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <map>
#include <string>
#include <vector>
#include "common/RWLock.h"
#include "rgw_request.h"
#include "rgw_process.h"
#include "rgw_process_env.h"
#include "rgw_realm_reloader.h"
#include "rgw_auth_registry.h"
#include "rgw_sal_rados.h"
#define dout_context g_ceph_context
namespace rgw::dmclock {
class SyncScheduler;
class ClientConfig;
class SchedulerCtx;
}
class RGWFrontendConfig {
std::string config;
std::multimap<std::string, std::string> config_map;
std::string framework;
int parse_config(const std::string& config,
std::multimap<std::string, std::string>& config_map);
public:
explicit RGWFrontendConfig(const std::string& config)
: config(config) {
}
int init() {
const int ret = parse_config(config, config_map);
return ret < 0 ? ret : 0;
}
void set_default_config(RGWFrontendConfig& def_conf);
std::optional<std::string> get_val(const std::string& key);
bool get_val(const std::string& key,
const std::string& def_val,
std::string* out);
bool get_val(const std::string& key, int def_val, int *out);
std::string get_val(const std::string& key,
const std::string& def_val) {
std::string out;
get_val(key, def_val, &out);
return out;
}
const std::string& get_config() {
return config;
}
std::multimap<std::string, std::string>& get_config_map() {
return config_map;
}
std::string get_framework() const {
return framework;
}
};
class RGWFrontend {
public:
virtual ~RGWFrontend() {}
virtual int init() = 0;
virtual int run() = 0;
virtual void stop() = 0;
virtual void join() = 0;
virtual void pause_for_new_config() = 0;
virtual void unpause_with_new_config() = 0;
};
class RGWProcessFrontend : public RGWFrontend {
protected:
RGWFrontendConfig* conf;
RGWProcess* pprocess;
RGWProcessEnv& env;
RGWProcessControlThread* thread;
public:
RGWProcessFrontend(RGWProcessEnv& pe, RGWFrontendConfig* _conf)
: conf(_conf), pprocess(nullptr), env(pe), thread(nullptr) {
}
~RGWProcessFrontend() override {
delete thread;
delete pprocess;
}
int run() override {
ceph_assert(pprocess); /* should have initialized by init() */
thread = new RGWProcessControlThread(pprocess);
thread->create("rgw_frontend");
return 0;
}
void stop() override;
void join() override {
thread->join();
}
void pause_for_new_config() override {
pprocess->pause();
}
void unpause_with_new_config() override {
pprocess->unpause_with_new_config();
}
}; /* RGWProcessFrontend */
class RGWLoadGenFrontend : public RGWProcessFrontend, public DoutPrefixProvider {
public:
RGWLoadGenFrontend(RGWProcessEnv& pe, RGWFrontendConfig *_conf)
: RGWProcessFrontend(pe, _conf) {}
CephContext *get_cct() const {
return env.driver->ctx();
}
unsigned get_subsys() const
{
return ceph_subsys_rgw;
}
std::ostream& gen_prefix(std::ostream& out) const
{
return out << "rgw loadgen frontend: ";
}
int init() override {
int num_threads;
conf->get_val("num_threads", g_conf()->rgw_thread_pool_size, &num_threads);
std::string uri_prefix;
conf->get_val("prefix", "", &uri_prefix);
RGWLoadGenProcess *pp = new RGWLoadGenProcess(
g_ceph_context, env, num_threads, std::move(uri_prefix), conf);
pprocess = pp;
std::string uid_str;
conf->get_val("uid", "", &uid_str);
if (uid_str.empty()) {
derr << "ERROR: uid param must be specified for loadgen frontend"
<< dendl;
return -EINVAL;
}
rgw_user uid(uid_str);
std::unique_ptr<rgw::sal::User> user = env.driver->get_user(uid);
int ret = user->load_user(this, null_yield);
if (ret < 0) {
derr << "ERROR: failed reading user info: uid=" << uid << " ret="
<< ret << dendl;
return ret;
}
auto aiter = user->get_info().access_keys.begin();
if (aiter == user->get_info().access_keys.end()) {
derr << "ERROR: user has no S3 access keys set" << dendl;
return -EINVAL;
}
pp->set_access_key(aiter->second);
return 0;
}
}; /* RGWLoadGenFrontend */
// FrontendPauser implementation for RGWRealmReloader
class RGWFrontendPauser : public RGWRealmReloader::Pauser {
std::vector<RGWFrontend*> &frontends;
RGWRealmReloader::Pauser* pauser;
public:
RGWFrontendPauser(std::vector<RGWFrontend*> &frontends,
RGWRealmReloader::Pauser* pauser = nullptr)
: frontends(frontends), pauser(pauser) {}
void pause() override {
for (auto frontend : frontends)
frontend->pause_for_new_config();
if (pauser)
pauser->pause();
}
void resume(rgw::sal::Driver* driver) override {
for (auto frontend : frontends)
frontend->unpause_with_new_config();
if (pauser)
pauser->resume(driver);
}
};
| 5,004 | 22.608491 | 81 | h |
null | ceph-main/src/rgw/rgw_gc_log.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "include/rados/librados.hpp"
#include "cls/rgw/cls_rgw_types.h"
// initialize the cls_rgw_gc queue
void gc_log_init2(librados::ObjectWriteOperation& op,
uint64_t max_size, uint64_t max_deferred);
// enqueue a gc entry to omap with cls_rgw
void gc_log_enqueue1(librados::ObjectWriteOperation& op,
uint32_t expiration, cls_rgw_gc_obj_info& info);
// enqueue a gc entry to the cls_rgw_gc queue
void gc_log_enqueue2(librados::ObjectWriteOperation& op,
uint32_t expiration, const cls_rgw_gc_obj_info& info);
// defer a gc entry in omap with cls_rgw
void gc_log_defer1(librados::ObjectWriteOperation& op,
uint32_t expiration, const cls_rgw_gc_obj_info& info);
// defer a gc entry in the cls_rgw_gc queue
void gc_log_defer2(librados::ObjectWriteOperation& op,
uint32_t expiration, const cls_rgw_gc_obj_info& info);
| 1,041 | 34.931034 | 75 | h |
null | ceph-main/src/rgw/rgw_http_client.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "common/async/yield_context.h"
#include "common/Cond.h"
#include "rgw_common.h"
#include "rgw_string.h"
#include "rgw_http_client_types.h"
#include <atomic>
using param_pair_t = std::pair<std::string, std::string>;
using param_vec_t = std::vector<param_pair_t>;
void rgw_http_client_init(CephContext *cct);
void rgw_http_client_cleanup();
struct rgw_http_req_data;
class RGWHTTPManager;
class RGWHTTPClient : public RGWIOProvider,
public NoDoutPrefix
{
friend class RGWHTTPManager;
bufferlist send_bl;
bufferlist::iterator send_iter;
bool has_send_len;
long http_status;
bool send_data_hint{false};
size_t receive_pause_skip{0}; /* how many bytes to skip next time receive_data is called
due to being paused */
void *user_info{nullptr};
rgw_http_req_data *req_data;
bool verify_ssl; // Do not validate self signed certificates, default to false
std::string ca_path;
std::string client_cert;
std::string client_key;
std::atomic<unsigned> stopped { 0 };
protected:
CephContext *cct;
std::string method;
std::string url;
std::string protocol;
std::string host;
std::string resource_prefix;
size_t send_len{0};
param_vec_t headers;
long req_timeout{0L};
void init();
RGWHTTPManager *get_manager();
int init_request(rgw_http_req_data *req_data);
virtual int receive_header(void *ptr, size_t len) {
return 0;
}
virtual int receive_data(void *ptr, size_t len, bool *pause) {
return 0;
}
virtual int send_data(void *ptr, size_t len, bool *pause=nullptr) {
return 0;
}
/* Callbacks for libcurl. */
static size_t receive_http_header(void *ptr,
size_t size,
size_t nmemb,
void *_info);
static size_t receive_http_data(void *ptr,
size_t size,
size_t nmemb,
void *_info);
static size_t send_http_data(void *ptr,
size_t size,
size_t nmemb,
void *_info);
ceph::mutex& get_req_lock();
/* needs to be called under req_lock() */
void _set_write_paused(bool pause);
void _set_read_paused(bool pause);
public:
static const long HTTP_STATUS_NOSTATUS = 0;
static const long HTTP_STATUS_UNAUTHORIZED = 401;
static const long HTTP_STATUS_NOTFOUND = 404;
static constexpr int HTTPCLIENT_IO_READ = 0x1;
static constexpr int HTTPCLIENT_IO_WRITE = 0x2;
static constexpr int HTTPCLIENT_IO_CONTROL = 0x4;
virtual ~RGWHTTPClient();
explicit RGWHTTPClient(CephContext *cct,
const std::string& _method,
const std::string& _url);
std::ostream& gen_prefix(std::ostream& out) const override;
void append_header(const std::string& name, const std::string& val) {
headers.push_back(std::pair<std::string, std::string>(name, val));
}
void set_send_length(size_t len) {
send_len = len;
has_send_len = true;
}
void set_send_data_hint(bool hint) {
send_data_hint = hint;
}
long get_http_status() const {
return http_status;
}
void set_http_status(long _http_status) {
http_status = _http_status;
}
void set_verify_ssl(bool flag) {
verify_ssl = flag;
}
// set request timeout in seconds
// zero (default) mean that request will never timeout
void set_req_timeout(long timeout) {
req_timeout = timeout;
}
int process(optional_yield y);
int wait(optional_yield y);
void cancel();
bool is_done();
rgw_http_req_data *get_req_data() { return req_data; }
std::string to_str();
int get_req_retcode();
void set_url(const std::string& _url) {
url = _url;
}
void set_method(const std::string& _method) {
method = _method;
}
void set_io_user_info(void *_user_info) override {
user_info = _user_info;
}
void *get_io_user_info() override {
return user_info;
}
void set_ca_path(const std::string& _ca_path) {
ca_path = _ca_path;
}
void set_client_cert(const std::string& _client_cert) {
client_cert = _client_cert;
}
void set_client_key(const std::string& _client_key) {
client_key = _client_key;
}
};
class RGWHTTPHeadersCollector : public RGWHTTPClient {
public:
typedef std::string header_name_t;
typedef std::string header_value_t;
typedef std::set<header_name_t, ltstr_nocase> header_spec_t;
RGWHTTPHeadersCollector(CephContext * const cct,
const std::string& method,
const std::string& url,
const header_spec_t &relevant_headers)
: RGWHTTPClient(cct, method, url),
relevant_headers(relevant_headers) {
}
std::map<header_name_t, header_value_t, ltstr_nocase> get_headers() const {
return found_headers;
}
/* Throws std::out_of_range */
const header_value_t& get_header_value(const header_name_t& name) const {
return found_headers.at(name);
}
protected:
int receive_header(void *ptr, size_t len) override;
private:
const std::set<header_name_t, ltstr_nocase> relevant_headers;
std::map<header_name_t, header_value_t, ltstr_nocase> found_headers;
};
class RGWHTTPTransceiver : public RGWHTTPHeadersCollector {
bufferlist * const read_bl;
std::string post_data;
size_t post_data_index;
public:
RGWHTTPTransceiver(CephContext * const cct,
const std::string& method,
const std::string& url,
bufferlist * const read_bl,
const header_spec_t intercept_headers = {})
: RGWHTTPHeadersCollector(cct, method, url, intercept_headers),
read_bl(read_bl),
post_data_index(0) {
}
RGWHTTPTransceiver(CephContext * const cct,
const std::string& method,
const std::string& url,
bufferlist * const read_bl,
const bool verify_ssl,
const header_spec_t intercept_headers = {})
: RGWHTTPHeadersCollector(cct, method, url, intercept_headers),
read_bl(read_bl),
post_data_index(0) {
set_verify_ssl(verify_ssl);
}
void set_post_data(const std::string& _post_data) {
this->post_data = _post_data;
}
protected:
int send_data(void* ptr, size_t len, bool *pause=nullptr) override;
int receive_data(void *ptr, size_t len, bool *pause) override {
read_bl->append((char *)ptr, len);
return 0;
}
};
typedef RGWHTTPTransceiver RGWPostHTTPData;
class RGWCompletionManager;
enum RGWHTTPRequestSetState {
SET_NOP = 0,
SET_WRITE_PAUSED = 1,
SET_WRITE_RESUME = 2,
SET_READ_PAUSED = 3,
SET_READ_RESUME = 4,
};
class RGWHTTPManager {
struct set_state {
rgw_http_req_data *req;
int bitmask;
set_state(rgw_http_req_data *_req, int _bitmask) : req(_req), bitmask(_bitmask) {}
};
CephContext *cct;
RGWCompletionManager *completion_mgr;
void *multi_handle;
bool is_started = false;
std::atomic<unsigned> going_down { 0 };
std::atomic<unsigned> is_stopped { 0 };
ceph::shared_mutex reqs_lock = ceph::make_shared_mutex("RGWHTTPManager::reqs_lock");
std::map<uint64_t, rgw_http_req_data *> reqs;
std::list<rgw_http_req_data *> unregistered_reqs;
std::list<set_state> reqs_change_state;
std::map<uint64_t, rgw_http_req_data *> complete_reqs;
int64_t num_reqs = 0;
int64_t max_threaded_req = 0;
int thread_pipe[2];
void register_request(rgw_http_req_data *req_data);
void complete_request(rgw_http_req_data *req_data);
void _complete_request(rgw_http_req_data *req_data);
bool unregister_request(rgw_http_req_data *req_data);
void _unlink_request(rgw_http_req_data *req_data);
void unlink_request(rgw_http_req_data *req_data);
void finish_request(rgw_http_req_data *req_data, int r, long http_status = -1);
void _finish_request(rgw_http_req_data *req_data, int r);
void _set_req_state(set_state& ss);
int link_request(rgw_http_req_data *req_data);
void manage_pending_requests();
class ReqsThread : public Thread {
RGWHTTPManager *manager;
public:
explicit ReqsThread(RGWHTTPManager *_m) : manager(_m) {}
void *entry() override;
};
ReqsThread *reqs_thread = nullptr;
void *reqs_thread_entry();
int signal_thread();
public:
RGWHTTPManager(CephContext *_cct, RGWCompletionManager *completion_mgr = NULL);
~RGWHTTPManager();
int start();
void stop();
int add_request(RGWHTTPClient *client);
int remove_request(RGWHTTPClient *client);
int set_request_state(RGWHTTPClient *client, RGWHTTPRequestSetState state);
};
class RGWHTTP
{
public:
static int send(RGWHTTPClient *req);
static int process(RGWHTTPClient *req, optional_yield y);
};
| 9,012 | 24.825215 | 90 | h |
null | ceph-main/src/rgw/rgw_http_client_curl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 SUSE Linux GmBH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <map>
#include <boost/optional.hpp>
#include "rgw_frontend.h"
namespace rgw {
namespace curl {
using fe_map_t = std::multimap <std::string, RGWFrontendConfig *>;
void setup_curl(boost::optional<const fe_map_t&> m);
void cleanup_curl();
}
}
| 680 | 21.7 | 70 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.