repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
null
ceph-main/src/librbd/AsioEngine.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_ASIO_ENGINE_H #define CEPH_LIBRBD_ASIO_ENGINE_H #include "include/common_fwd.h" #include "include/rados/librados_fwd.hpp" #include <memory> #include <boost/asio/dispatch.hpp> #include <boost/asio/io_context.hpp> #include <boost/asio/io_context_strand.hpp> #include <boost/asio/post.hpp> struct Context; namespace neorados { struct RADOS; } namespace librbd { namespace asio { struct ContextWQ; } class AsioEngine { public: explicit AsioEngine(std::shared_ptr<librados::Rados> rados); explicit AsioEngine(librados::IoCtx& io_ctx); ~AsioEngine(); AsioEngine(AsioEngine&&) = delete; AsioEngine(const AsioEngine&) = delete; AsioEngine& operator=(const AsioEngine&) = delete; inline neorados::RADOS& get_rados_api() { return *m_rados_api; } inline boost::asio::io_context& get_io_context() { return m_io_context; } inline operator boost::asio::io_context&() { return m_io_context; } using executor_type = boost::asio::io_context::executor_type; inline executor_type get_executor() { return m_io_context.get_executor(); } inline boost::asio::io_context::strand& get_api_strand() { // API client callbacks should never fire concurrently return *m_api_strand; } inline asio::ContextWQ* get_work_queue() { return m_context_wq.get(); } template <typename T> void dispatch(T&& t) { boost::asio::dispatch(m_io_context, std::forward<T>(t)); } void dispatch(Context* ctx, int r); template <typename T> void post(T&& t) { boost::asio::post(m_io_context, std::forward<T>(t)); } void post(Context* ctx, int r); private: std::shared_ptr<neorados::RADOS> m_rados_api; CephContext* m_cct; boost::asio::io_context& m_io_context; std::unique_ptr<boost::asio::io_context::strand> m_api_strand; std::unique_ptr<asio::ContextWQ> m_context_wq; }; } // namespace librbd #endif // CEPH_LIBRBD_ASIO_ENGINE_H
2,015
23.888889
70
h
null
ceph-main/src/librbd/AsyncObjectThrottle.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_ASYNC_OBJECT_THROTTLE_H #define CEPH_LIBRBD_ASYNC_OBJECT_THROTTLE_H #include "include/int_types.h" #include "include/Context.h" #include <boost/function.hpp> namespace librbd { template <typename ImageCtxT> class AsyncRequest; class ProgressContext; struct ImageCtx; class AsyncObjectThrottleFinisher { public: virtual ~AsyncObjectThrottleFinisher() {}; virtual void finish_op(int r) = 0; }; template <typename ImageCtxT = ImageCtx> class C_AsyncObjectThrottle : public Context { public: C_AsyncObjectThrottle(AsyncObjectThrottleFinisher &finisher, ImageCtxT &image_ctx) : m_image_ctx(image_ctx), m_finisher(finisher) { } virtual int send() = 0; protected: ImageCtxT &m_image_ctx; void finish(int r) override { m_finisher.finish_op(r); } private: AsyncObjectThrottleFinisher &m_finisher; }; template <typename ImageCtxT = ImageCtx> class AsyncObjectThrottle : public AsyncObjectThrottleFinisher { public: typedef boost::function< C_AsyncObjectThrottle<ImageCtxT>* (AsyncObjectThrottle&, uint64_t)> ContextFactory; AsyncObjectThrottle(const AsyncRequest<ImageCtxT> *async_request, ImageCtxT &image_ctx, const ContextFactory& context_factory, Context *ctx, ProgressContext *prog_ctx, uint64_t object_no, uint64_t end_object_no); void start_ops(uint64_t max_concurrent); void finish_op(int r) override; private: ceph::mutex m_lock; const AsyncRequest<ImageCtxT> *m_async_request; ImageCtxT &m_image_ctx; ContextFactory m_context_factory; Context *m_ctx; ProgressContext *m_prog_ctx; uint64_t m_object_no; uint64_t m_end_object_no; uint64_t m_current_ops; int m_ret; void start_next_op(); }; } // namespace librbd extern template class librbd::AsyncObjectThrottle<librbd::ImageCtx>; #endif // CEPH_LIBRBD_ASYNC_OBJECT_THROTTLE_H
2,043
24.55
74
h
null
ceph-main/src/librbd/BlockGuard.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IO_BLOCK_GUARD_H #define CEPH_LIBRBD_IO_BLOCK_GUARD_H #include "include/int_types.h" #include "common/dout.h" #include "common/ceph_mutex.h" #include <boost/intrusive/list.hpp> #include <boost/intrusive/set.hpp> #include <deque> #include <list> #include "include/ceph_assert.h" #define dout_subsys ceph_subsys_rbd #undef dout_prefix #define dout_prefix *_dout << "librbd::BlockGuard: " << this << " " \ << __func__ << ": " namespace librbd { struct BlockExtent { // [block_start, block_end) uint64_t block_start = 0; uint64_t block_end = 0; BlockExtent() { } BlockExtent(uint64_t block_start, uint64_t block_end) : block_start(block_start), block_end(block_end) { } friend std::ostream& operator<< (std::ostream& os, const BlockExtent& block_extent) { os << "[block_start=" << block_extent.block_start << ", block_end=" << block_extent.block_end << "]"; return os; } }; struct BlockGuardCell { }; /** * Helper class to restrict and order concurrent IO to the same block. The * definition of a block is dependent upon the user of this class. It might * represent a backing object, 512 byte sectors, etc. */ template <typename BlockOperation> class BlockGuard { private: struct DetainedBlockExtent; public: typedef std::list<BlockOperation> BlockOperations; BlockGuard(CephContext *cct) : m_cct(cct) { } BlockGuard(const BlockGuard&) = delete; BlockGuard &operator=(const BlockGuard&) = delete; /** * Detain future IO for a range of blocks. the guard will keep * ownership of the provided operation if the operation is blocked. * @return 0 upon success and IO can be issued * >0 if the IO is blocked, * <0 upon error */ int detain(const BlockExtent &block_extent, BlockOperation *block_operation, BlockGuardCell **cell) { std::lock_guard locker{m_lock}; ldout(m_cct, 20) << block_extent << ", free_slots=" << m_free_detained_block_extents.size() << dendl; DetainedBlockExtent *detained_block_extent; auto it = m_detained_block_extents.find(block_extent); if (it != m_detained_block_extents.end()) { // request against an already detained block detained_block_extent = &(*it); if (block_operation != nullptr) { detained_block_extent->block_operations.emplace_back( std::move(*block_operation)); } // alert the caller that the IO was detained *cell = nullptr; return detained_block_extent->block_operations.size(); } else { if (!m_free_detained_block_extents.empty()) { detained_block_extent = &m_free_detained_block_extents.front(); detained_block_extent->block_operations.clear(); m_free_detained_block_extents.pop_front(); } else { ldout(m_cct, 20) << "no free detained block cells" << dendl; m_detained_block_extent_pool.emplace_back(); detained_block_extent = &m_detained_block_extent_pool.back(); } detained_block_extent->block_extent = block_extent; m_detained_block_extents.insert(*detained_block_extent); *cell = reinterpret_cast<BlockGuardCell*>(detained_block_extent); return 0; } } /** * Release any detained IO operations from the provided cell. */ void release(BlockGuardCell *cell, BlockOperations *block_operations) { std::lock_guard locker{m_lock}; ceph_assert(cell != nullptr); auto &detained_block_extent = reinterpret_cast<DetainedBlockExtent &>( *cell); ldout(m_cct, 20) << detained_block_extent.block_extent << ", pending_ops=" << detained_block_extent.block_operations.size() << dendl; *block_operations = std::move(detained_block_extent.block_operations); m_detained_block_extents.erase(detained_block_extent.block_extent); m_free_detained_block_extents.push_back(detained_block_extent); } private: struct DetainedBlockExtent : public boost::intrusive::list_base_hook<>, public boost::intrusive::set_base_hook<> { BlockExtent block_extent; BlockOperations block_operations; }; struct DetainedBlockExtentKey { typedef BlockExtent type; const BlockExtent &operator()(const DetainedBlockExtent &value) { return value.block_extent; } }; struct DetainedBlockExtentCompare { bool operator()(const BlockExtent &lhs, const BlockExtent &rhs) const { // check for range overlap (lhs < rhs) if (lhs.block_end <= rhs.block_start) { return true; } return false; } }; typedef std::deque<DetainedBlockExtent> DetainedBlockExtentsPool; typedef boost::intrusive::list<DetainedBlockExtent> DetainedBlockExtents; typedef boost::intrusive::set< DetainedBlockExtent, boost::intrusive::compare<DetainedBlockExtentCompare>, boost::intrusive::key_of_value<DetainedBlockExtentKey> > BlockExtentToDetainedBlockExtents; CephContext *m_cct; ceph::mutex m_lock = ceph::make_mutex("librbd::BlockGuard::m_lock"); DetainedBlockExtentsPool m_detained_block_extent_pool; DetainedBlockExtents m_free_detained_block_extents; BlockExtentToDetainedBlockExtents m_detained_block_extents; }; } // namespace librbd #undef dout_subsys #undef dout_prefix #define dout_prefix *_dout #endif // CEPH_LIBRBD_IO_BLOCK_GUARD_H
5,596
30.44382
87
h
null
ceph-main/src/librbd/DeepCopyRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_DEEP_COPY_REQUEST_H #define CEPH_LIBRBD_DEEP_COPY_REQUEST_H #include "common/ceph_mutex.h" #include "common/RefCountedObj.h" #include "include/int_types.h" #include "librbd/ImageCtx.h" #include "librbd/Types.h" #include "librbd/deep_copy/Types.h" #include <map> #include <vector> class Context; namespace librbd { class ImageCtx; namespace asio { struct ContextWQ; } namespace deep_copy { template <typename> class ImageCopyRequest; template <typename> class SnapshotCopyRequest; struct Handler; } template <typename ImageCtxT = ImageCtx> class DeepCopyRequest : public RefCountedObject { public: static DeepCopyRequest* create(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx, librados::snap_t src_snap_id_start, librados::snap_t src_snap_id_end, librados::snap_t dst_snap_id_start, bool flatten, const deep_copy::ObjectNumber &object_number, asio::ContextWQ *work_queue, SnapSeqs *snap_seqs, deep_copy::Handler *handler, Context *on_finish) { return new DeepCopyRequest(src_image_ctx, dst_image_ctx, src_snap_id_start, src_snap_id_end, dst_snap_id_start, flatten, object_number, work_queue, snap_seqs, handler, on_finish); } DeepCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx, librados::snap_t src_snap_id_start, librados::snap_t src_snap_id_end, librados::snap_t dst_snap_id_start, bool flatten, const deep_copy::ObjectNumber &object_number, asio::ContextWQ *work_queue, SnapSeqs *snap_seqs, deep_copy::Handler *handler, Context *on_finish); ~DeepCopyRequest(); void send(); void cancel(); private: /** * @verbatim * * <start> * | * v * COPY_SNAPSHOTS * | * v * COPY_IMAGE . . . . . . . . . . . . . . * | . * v . * COPY_OBJECT_MAP (skip if object . * | map disabled) . * v . * REFRESH_OBJECT_MAP (skip if object . (image copy canceled) * | map disabled) . * v . * COPY_METADATA . * | . * v . * <finish> < . . . . . . . . . . . . . . * * @endverbatim */ typedef std::vector<librados::snap_t> SnapIds; typedef std::map<librados::snap_t, SnapIds> SnapMap; ImageCtxT *m_src_image_ctx; ImageCtxT *m_dst_image_ctx; librados::snap_t m_src_snap_id_start; librados::snap_t m_src_snap_id_end; librados::snap_t m_dst_snap_id_start; bool m_flatten; deep_copy::ObjectNumber m_object_number; asio::ContextWQ *m_work_queue; SnapSeqs *m_snap_seqs; deep_copy::Handler *m_handler; Context *m_on_finish; CephContext *m_cct; ceph::mutex m_lock; bool m_canceled = false; deep_copy::SnapshotCopyRequest<ImageCtxT> *m_snapshot_copy_request = nullptr; deep_copy::ImageCopyRequest<ImageCtxT> *m_image_copy_request = nullptr; decltype(ImageCtxT::object_map) m_object_map = nullptr; void send_copy_snapshots(); void handle_copy_snapshots(int r); void send_copy_image(); void handle_copy_image(int r); void send_copy_object_map(); void handle_copy_object_map(int r); void send_refresh_object_map(); void handle_refresh_object_map(int r); void send_copy_metadata(); void handle_copy_metadata(int r); int validate_copy_points(); void finish(int r); }; } // namespace librbd extern template class librbd::DeepCopyRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_DEEP_COPY_REQUEST_H
4,194
29.179856
79
h
null
ceph-main/src/librbd/ExclusiveLock.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_H #define CEPH_LIBRBD_EXCLUSIVE_LOCK_H #include "common/AsyncOpTracker.h" #include "librbd/ManagedLock.h" #include "librbd/exclusive_lock/Policy.h" #include "librbd/io/Types.h" #include "common/RefCountedObj.h" struct Context; namespace librbd { namespace exclusive_lock { template <typename> struct ImageDispatch; } template <typename ImageCtxT = ImageCtx> class ExclusiveLock : public RefCountedObject, public ManagedLock<ImageCtxT> { public: static ExclusiveLock *create(ImageCtxT &image_ctx) { return new ExclusiveLock<ImageCtxT>(image_ctx); } ExclusiveLock(ImageCtxT &image_ctx); bool accept_request(exclusive_lock::OperationRequestType request_type, int *ret_val) const; bool accept_ops() const; void set_require_lock(bool init_shutdown, io::Direction direction, Context* on_finish); void unset_require_lock(io::Direction direction); void block_requests(int r); void unblock_requests(); void init(uint64_t features, Context *on_init); void shut_down(Context *on_shutdown); void handle_peer_notification(int r); int get_unlocked_op_error() const; Context *start_op(int* ret_val); protected: void shutdown_handler(int r, Context *on_finish) override; void pre_acquire_lock_handler(Context *on_finish) override; void post_acquire_lock_handler(int r, Context *on_finish) override; void pre_release_lock_handler(bool shutting_down, Context *on_finish) override; void post_release_lock_handler(bool shutting_down, int r, Context *on_finish) override; void post_reacquire_lock_handler(int r, Context *on_finish) override; private: /** * @verbatim * * <start> * * > WAITING_FOR_REGISTER --------\ * | * (watch not registered) | * | * | * | * * > WAITING_FOR_PEER ------------\ * | * (request_lock busy) | * | * | * | * * * * * * * * * * * * * * | * | * | * v (init) (try_lock/request_lock) * | * UNINITIALIZED -------> UNLOCKED ------------------------> ACQUIRING <--/ * ^ | * | v * RELEASING POST_ACQUIRING * | | * | | * | (release_lock) v * PRE_RELEASING <------------------------ LOCKED * * <LOCKED state> * | * v * REACQUIRING -------------------------------------> <finish> * . ^ * . | * . . . > <RELEASE action> ---> <ACQUIRE action> ---/ * * <UNLOCKED/LOCKED states> * | * | * v * PRE_SHUTTING_DOWN ---> SHUTTING_DOWN ---> SHUTDOWN ---> <finish> * * @endverbatim */ ImageCtxT& m_image_ctx; exclusive_lock::ImageDispatch<ImageCtxT>* m_image_dispatch = nullptr; Context *m_pre_post_callback = nullptr; AsyncOpTracker m_async_op_tracker; uint32_t m_request_blocked_count = 0; int m_request_blocked_ret_val = 0; int m_acquire_lock_peer_ret_val = 0; bool accept_ops(const ceph::mutex &lock) const; void handle_post_acquiring_lock(int r); void handle_post_acquired_lock(int r); }; } // namespace librbd #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_H
4,124
33.957627
78
h
null
ceph-main/src/librbd/ImageCtx.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGECTX_H #define CEPH_LIBRBD_IMAGECTX_H #include "include/int_types.h" #include <atomic> #include <list> #include <map> #include <memory> #include <set> #include <string> #include <vector> #include "common/Timer.h" #include "common/ceph_mutex.h" #include "common/config_proxy.h" #include "common/event_socket.h" #include "common/Readahead.h" #include "common/snap_types.h" #include "common/zipkin_trace.h" #include "include/common_fwd.h" #include "include/buffer_fwd.h" #include "include/rbd/librbd.hpp" #include "include/rbd_types.h" #include "include/types.h" #include "include/xlist.h" #include "cls/rbd/cls_rbd_types.h" #include "cls/rbd/cls_rbd_client.h" #include "librbd/AsyncRequest.h" #include "librbd/Types.h" #include <boost/lockfree/policies.hpp> #include <boost/lockfree/queue.hpp> namespace neorados { class IOContext; class RADOS; } // namespace neorados namespace librbd { struct AsioEngine; template <typename> class ConfigWatcher; template <typename> class ExclusiveLock; template <typename> class ImageState; template <typename> class ImageWatcher; template <typename> class Journal; class LibrbdAdminSocketHook; template <typename> class ObjectMap; template <typename> class Operations; template <typename> class PluginRegistry; namespace asio { struct ContextWQ; } namespace crypto { template <typename> class EncryptionFormat; } namespace exclusive_lock { struct Policy; } namespace io { class AioCompletion; class AsyncOperation; template <typename> class CopyupRequest; enum class ImageArea; struct ImageDispatcherInterface; struct ObjectDispatcherInterface; } namespace journal { struct Policy; } namespace operation { template <typename> class ResizeRequest; } struct ImageCtx { typedef std::pair<cls::rbd::SnapshotNamespace, std::string> SnapKey; struct SnapKeyComparator { inline bool operator()(const SnapKey& lhs, const SnapKey& rhs) const { // only compare by namespace type and name if (lhs.first.index() != rhs.first.index()) { return lhs.first.index() < rhs.first.index(); } return lhs.second < rhs.second; } }; static const std::string METADATA_CONF_PREFIX; CephContext *cct; ConfigProxy config; std::set<std::string> config_overrides; PerfCounters *perfcounter; struct rbd_obj_header_ondisk header; ::SnapContext snapc; std::vector<librados::snap_t> snaps; // this mirrors snapc.snaps, but is in // a format librados can understand std::map<librados::snap_t, SnapInfo> snap_info; std::map<SnapKey, librados::snap_t, SnapKeyComparator> snap_ids; uint64_t open_snap_id = CEPH_NOSNAP; uint64_t snap_id; bool snap_exists; // false if our snap_id was deleted // whether the image was opened read-only. cannot be changed after opening bool read_only; uint32_t read_only_flags = 0U; uint32_t read_only_mask = ~0U; std::map<rados::cls::lock::locker_id_t, rados::cls::lock::locker_info_t> lockers; bool exclusive_locked; std::string lock_tag; std::string name; cls::rbd::SnapshotNamespace snap_namespace; std::string snap_name; std::shared_ptr<AsioEngine> asio_engine; // New ASIO-style RADOS API neorados::RADOS& rados_api; // Legacy RADOS API librados::IoCtx data_ctx; librados::IoCtx md_ctx; ConfigWatcher<ImageCtx> *config_watcher = nullptr; ImageWatcher<ImageCtx> *image_watcher; Journal<ImageCtx> *journal; /** * Lock ordering: * * owner_lock, image_lock * async_op_lock, timestamp_lock */ ceph::shared_mutex owner_lock; // protects exclusive lock leadership updates mutable ceph::shared_mutex image_lock; // protects snapshot-related member variables, // features (and associated helper classes), and flags // protects access to the mutable image metadata that // isn't guarded by other locks below, and blocks writes // when held exclusively, so snapshots can be consistent. // Fields guarded include: // total_bytes_read // exclusive_locked // lock_tag // lockers // object_map // parent_md and parent // encryption_format ceph::shared_mutex timestamp_lock; // protects (create/access/modify)_timestamp ceph::mutex async_ops_lock; // protects async_ops and async_requests ceph::mutex copyup_list_lock; // protects copyup_waiting_list unsigned extra_read_flags; // librados::OPERATION_* bool old_format; uint8_t order; uint64_t size; uint64_t features; std::string object_prefix; char *format_string; std::string header_oid; std::string id; // only used for new-format images ParentImageInfo parent_md; ImageCtx *parent; ImageCtx *child = nullptr; MigrationInfo migration_info; cls::rbd::GroupSpec group_spec; uint64_t stripe_unit, stripe_count; uint64_t flags; uint64_t op_features = 0; bool operations_disabled = false; utime_t create_timestamp; utime_t access_timestamp; utime_t modify_timestamp; file_layout_t layout; Readahead readahead; std::atomic<uint64_t> total_bytes_read = {0}; std::map<uint64_t, io::CopyupRequest<ImageCtx>*> copyup_list; xlist<io::AsyncOperation*> async_ops; xlist<AsyncRequest<>*> async_requests; std::list<Context*> async_requests_waiters; ImageState<ImageCtx> *state; Operations<ImageCtx> *operations; ExclusiveLock<ImageCtx> *exclusive_lock; ObjectMap<ImageCtx> *object_map; xlist<operation::ResizeRequest<ImageCtx>*> resize_reqs; io::ImageDispatcherInterface *io_image_dispatcher = nullptr; io::ObjectDispatcherInterface *io_object_dispatcher = nullptr; asio::ContextWQ *op_work_queue; PluginRegistry<ImageCtx>* plugin_registry; using Completions = boost::lockfree::queue<io::AioCompletion*>; Completions event_socket_completions; EventSocket event_socket; bool ignore_migrating = false; bool disable_zero_copy = false; bool enable_sparse_copyup = false; /// Cached latency-sensitive configuration settings bool non_blocking_aio; bool cache; uint64_t sparse_read_threshold_bytes; uint64_t readahead_max_bytes = 0; uint64_t readahead_disable_after_bytes = 0; bool clone_copy_on_read; bool enable_alloc_hint; uint32_t alloc_hint_flags = 0U; uint32_t read_flags = 0U; // librados::OPERATION_* uint32_t discard_granularity_bytes = 0; bool blkin_trace_all; uint64_t mirroring_replay_delay; uint64_t mtime_update_interval; uint64_t atime_update_interval; LibrbdAdminSocketHook *asok_hook; exclusive_lock::Policy *exclusive_lock_policy = nullptr; journal::Policy *journal_policy = nullptr; ZTracer::Endpoint trace_endpoint; std::unique_ptr<crypto::EncryptionFormat<ImageCtx>> encryption_format; // unit test mock helpers static ImageCtx* create(const std::string &image_name, const std::string &image_id, const char *snap, IoCtx& p, bool read_only) { return new ImageCtx(image_name, image_id, snap, p, read_only); } static ImageCtx* create(const std::string &image_name, const std::string &image_id, librados::snap_t snap_id, IoCtx& p, bool read_only) { return new ImageCtx(image_name, image_id, snap_id, p, read_only); } /** * Either image_name or image_id must be set. * If id is not known, pass the empty std::string, * and init() will look it up. */ ImageCtx(const std::string &image_name, const std::string &image_id, const char *snap, IoCtx& p, bool read_only); ImageCtx(const std::string &image_name, const std::string &image_id, librados::snap_t snap_id, IoCtx& p, bool read_only); ~ImageCtx(); void init(); void shutdown(); void init_layout(int64_t pool_id); void perf_start(std::string name); void perf_stop(); void set_read_flag(unsigned flag); int get_read_flags(librados::snap_t snap_id); int snap_set(uint64_t snap_id); void snap_unset(); librados::snap_t get_snap_id(const cls::rbd::SnapshotNamespace& in_snap_namespace, const std::string& in_snap_name) const; const SnapInfo* get_snap_info(librados::snap_t in_snap_id) const; int get_snap_name(librados::snap_t in_snap_id, std::string *out_snap_name) const; int get_snap_namespace(librados::snap_t in_snap_id, cls::rbd::SnapshotNamespace *out_snap_namespace) const; int get_parent_spec(librados::snap_t in_snap_id, cls::rbd::ParentImageSpec *pspec) const; int is_snap_protected(librados::snap_t in_snap_id, bool *is_protected) const; int is_snap_unprotected(librados::snap_t in_snap_id, bool *is_unprotected) const; uint64_t get_current_size() const; uint64_t get_object_size() const; std::string get_object_name(uint64_t num) const; uint64_t get_stripe_unit() const; uint64_t get_stripe_count() const; uint64_t get_stripe_period() const; utime_t get_create_timestamp() const; utime_t get_access_timestamp() const; utime_t get_modify_timestamp() const; void set_access_timestamp(utime_t at); void set_modify_timestamp(utime_t at); void add_snap(cls::rbd::SnapshotNamespace in_snap_namespace, std::string in_snap_name, librados::snap_t id, uint64_t in_size, const ParentImageInfo &parent, uint8_t protection_status, uint64_t flags, utime_t timestamp); void rm_snap(cls::rbd::SnapshotNamespace in_snap_namespace, std::string in_snap_name, librados::snap_t id); uint64_t get_image_size(librados::snap_t in_snap_id) const; uint64_t get_area_size(io::ImageArea area) const; uint64_t get_object_count(librados::snap_t in_snap_id) const; bool test_features(uint64_t test_features) const; bool test_features(uint64_t test_features, const ceph::shared_mutex &in_image_lock) const; bool test_op_features(uint64_t op_features) const; bool test_op_features(uint64_t op_features, const ceph::shared_mutex &in_image_lock) const; int get_flags(librados::snap_t in_snap_id, uint64_t *flags) const; int test_flags(librados::snap_t in_snap_id, uint64_t test_flags, bool *flags_set) const; int test_flags(librados::snap_t in_snap_id, uint64_t test_flags, const ceph::shared_mutex &in_image_lock, bool *flags_set) const; int update_flags(librados::snap_t in_snap_id, uint64_t flag, bool enabled); const ParentImageInfo* get_parent_info(librados::snap_t in_snap_id) const; int64_t get_parent_pool_id(librados::snap_t in_snap_id) const; std::string get_parent_image_id(librados::snap_t in_snap_id) const; uint64_t get_parent_snap_id(librados::snap_t in_snap_id) const; int get_parent_overlap(librados::snap_t in_snap_id, uint64_t* raw_overlap) const; std::pair<uint64_t, io::ImageArea> reduce_parent_overlap( uint64_t raw_overlap, bool migration_write) const; uint64_t prune_parent_extents( std::vector<std::pair<uint64_t, uint64_t>>& image_extents, io::ImageArea area, uint64_t raw_overlap, bool migration_write) const; void register_watch(Context *on_finish); void cancel_async_requests(); void cancel_async_requests(Context *on_finish); void apply_metadata(const std::map<std::string, bufferlist> &meta, bool thread_safe); ExclusiveLock<ImageCtx> *create_exclusive_lock(); ObjectMap<ImageCtx> *create_object_map(uint64_t snap_id); Journal<ImageCtx> *create_journal(); void set_image_name(const std::string &name); void notify_update(); void notify_update(Context *on_finish); exclusive_lock::Policy *get_exclusive_lock_policy() const; void set_exclusive_lock_policy(exclusive_lock::Policy *policy); journal::Policy *get_journal_policy() const; void set_journal_policy(journal::Policy *policy); void rebuild_data_io_context(); IOContext get_data_io_context() const; IOContext duplicate_data_io_context() const; static void get_timer_instance(CephContext *cct, SafeTimer **timer, ceph::mutex **timer_lock); private: std::shared_ptr<neorados::IOContext> data_io_context; }; } #endif
12,929
34.04065
89
h
null
ceph-main/src/librbd/ImageState.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_STATE_H #define CEPH_LIBRBD_IMAGE_STATE_H #include "include/int_types.h" #include "common/ceph_mutex.h" #include <list> #include <string> #include <utility> #include "cls/rbd/cls_rbd_types.h" class Context; class RWLock; namespace librbd { class QuiesceWatchCtx; class QuiesceWatchers; class ImageCtx; class ImageUpdateWatchers; class UpdateWatchCtx; template <typename ImageCtxT = ImageCtx> class ImageState { public: ImageState(ImageCtxT *image_ctx); ~ImageState(); int open(uint64_t flags); void open(uint64_t flags, Context *on_finish); int close(); void close(Context *on_finish); void handle_update_notification(); bool is_refresh_required() const; int refresh(); int refresh_if_required(); void refresh(Context *on_finish); void snap_set(uint64_t snap_id, Context *on_finish); void prepare_lock(Context *on_ready); void handle_prepare_lock_complete(); int register_update_watcher(UpdateWatchCtx *watcher, uint64_t *handle); void unregister_update_watcher(uint64_t handle, Context *on_finish); int unregister_update_watcher(uint64_t handle); void flush_update_watchers(Context *on_finish); void shut_down_update_watchers(Context *on_finish); int register_quiesce_watcher(QuiesceWatchCtx *watcher, uint64_t *handle); int unregister_quiesce_watcher(uint64_t handle); void notify_quiesce(Context *on_finish); void notify_unquiesce(Context *on_finish); void quiesce_complete(uint64_t handle, int r); private: enum State { STATE_UNINITIALIZED, STATE_OPEN, STATE_CLOSED, STATE_OPENING, STATE_CLOSING, STATE_REFRESHING, STATE_SETTING_SNAP, STATE_PREPARING_LOCK }; enum ActionType { ACTION_TYPE_OPEN, ACTION_TYPE_CLOSE, ACTION_TYPE_REFRESH, ACTION_TYPE_SET_SNAP, ACTION_TYPE_LOCK }; struct Action { ActionType action_type; uint64_t refresh_seq = 0; uint64_t snap_id = CEPH_NOSNAP; Context *on_ready = nullptr; Action(ActionType action_type) : action_type(action_type) { } inline bool operator==(const Action &action) const { if (action_type != action.action_type) { return false; } switch (action_type) { case ACTION_TYPE_REFRESH: return (refresh_seq == action.refresh_seq); case ACTION_TYPE_SET_SNAP: return (snap_id == action.snap_id); case ACTION_TYPE_LOCK: return false; default: return true; } } }; typedef std::list<Context *> Contexts; typedef std::pair<Action, Contexts> ActionContexts; typedef std::list<ActionContexts> ActionsContexts; ImageCtxT *m_image_ctx; State m_state; mutable ceph::mutex m_lock; ActionsContexts m_actions_contexts; uint64_t m_last_refresh; uint64_t m_refresh_seq; ImageUpdateWatchers *m_update_watchers; QuiesceWatchers *m_quiesce_watchers; uint64_t m_open_flags; bool is_transition_state() const; bool is_closed() const; const Action *find_pending_refresh() const; void append_context(const Action &action, Context *context); void execute_next_action_unlock(); void execute_action_unlock(const Action &action, Context *context); void complete_action_unlock(State next_state, int r); void send_open_unlock(); void handle_open(int r); void send_close_unlock(); void handle_close(int r); void send_refresh_unlock(); void handle_refresh(int r); void send_set_snap_unlock(); void handle_set_snap(int r); void send_prepare_lock_unlock(); }; } // namespace librbd extern template class librbd::ImageState<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_STATE_H
3,735
22.948718
75
h
null
ceph-main/src/librbd/ImageWatcher.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_WATCHER_H #define CEPH_LIBRBD_IMAGE_WATCHER_H #include "cls/rbd/cls_rbd_types.h" #include "common/AsyncOpTracker.h" #include "common/ceph_mutex.h" #include "include/Context.h" #include "include/rbd/librbd.hpp" #include "librbd/Operations.h" #include "librbd/Watcher.h" #include "librbd/WatchNotifyTypes.h" #include "librbd/exclusive_lock/Policy.h" #include "librbd/internal.h" #include <functional> #include <set> #include <string> #include <utility> class entity_name_t; namespace librbd { class ImageCtx; template <typename> class TaskFinisher; template <typename ImageCtxT = ImageCtx> class ImageWatcher : public Watcher { public: ImageWatcher(ImageCtxT& image_ctx); ~ImageWatcher() override; void unregister_watch(Context *on_finish) override; void block_notifies(Context *on_finish) override; void notify_flatten(uint64_t request_id, ProgressContext &prog_ctx, Context *on_finish); void notify_resize(uint64_t request_id, uint64_t size, bool allow_shrink, ProgressContext &prog_ctx, Context *on_finish); void notify_snap_create(uint64_t request_id, const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, uint64_t flags, ProgressContext &prog_ctx, Context *on_finish); void notify_snap_rename(uint64_t request_id, const snapid_t &src_snap_id, const std::string &dst_snap_name, Context *on_finish); void notify_snap_remove(uint64_t request_id, const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish); void notify_snap_protect(uint64_t request_id, const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish); void notify_snap_unprotect(uint64_t request_id, const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish); void notify_rebuild_object_map(uint64_t request_id, ProgressContext &prog_ctx, Context *on_finish); void notify_rename(uint64_t request_id, const std::string &image_name, Context *on_finish); void notify_update_features(uint64_t request_id, uint64_t features, bool enabled, Context *on_finish); void notify_migrate(uint64_t request_id, ProgressContext &prog_ctx, Context *on_finish); void notify_sparsify(uint64_t request_id, size_t sparse_size, ProgressContext &prog_ctx, Context *on_finish); void notify_acquired_lock(); void notify_released_lock(); void notify_request_lock(); void notify_header_update(Context *on_finish); static void notify_header_update(librados::IoCtx &io_ctx, const std::string &oid); void notify_quiesce(uint64_t *request_id, ProgressContext &prog_ctx, Context *on_finish); void notify_unquiesce(uint64_t request_id, Context *on_finish); void notify_metadata_set(uint64_t request_id, const std::string &key, const std::string &value, Context *on_finish); void notify_metadata_remove(uint64_t request_id, const std::string &key, Context *on_finish); private: enum TaskCode { TASK_CODE_REQUEST_LOCK, TASK_CODE_CANCEL_ASYNC_REQUESTS, TASK_CODE_REREGISTER_WATCH, TASK_CODE_ASYNC_REQUEST, TASK_CODE_ASYNC_PROGRESS, TASK_CODE_QUIESCE, }; typedef std::pair<Context *, ProgressContext *> AsyncRequest; class Task { public: Task(TaskCode task_code) : m_task_code(task_code) {} Task(TaskCode task_code, const watch_notify::AsyncRequestId &id) : m_task_code(task_code), m_async_request_id(id) {} inline bool operator<(const Task& rhs) const { if (m_task_code != rhs.m_task_code) { return m_task_code < rhs.m_task_code; } else if ((m_task_code == TASK_CODE_ASYNC_REQUEST || m_task_code == TASK_CODE_ASYNC_PROGRESS || m_task_code == TASK_CODE_QUIESCE) && m_async_request_id != rhs.m_async_request_id) { return m_async_request_id < rhs.m_async_request_id; } return false; } private: TaskCode m_task_code; watch_notify::AsyncRequestId m_async_request_id; }; class RemoteProgressContext : public ProgressContext { public: RemoteProgressContext(ImageWatcher &image_watcher, const watch_notify::AsyncRequestId &id) : m_image_watcher(image_watcher), m_async_request_id(id) { } int update_progress(uint64_t offset, uint64_t total) override { m_image_watcher.schedule_async_progress(m_async_request_id, offset, total); return 0; } private: ImageWatcher &m_image_watcher; watch_notify::AsyncRequestId m_async_request_id; }; class RemoteContext : public Context { public: RemoteContext(ImageWatcher &image_watcher, const watch_notify::AsyncRequestId &id, ProgressContext *prog_ctx) : m_image_watcher(image_watcher), m_async_request_id(id), m_prog_ctx(prog_ctx) { } ~RemoteContext() override { delete m_prog_ctx; } void finish(int r) override; private: ImageWatcher &m_image_watcher; watch_notify::AsyncRequestId m_async_request_id; ProgressContext *m_prog_ctx; }; struct C_ProcessPayload; struct C_ResponseMessage : public Context { C_NotifyAck *notify_ack; C_ResponseMessage(C_NotifyAck *notify_ack) : notify_ack(notify_ack) { } void finish(int r) override; }; ImageCtxT &m_image_ctx; TaskFinisher<Task> *m_task_finisher; ceph::shared_mutex m_async_request_lock; std::map<watch_notify::AsyncRequestId, AsyncRequest> m_async_requests; std::set<watch_notify::AsyncRequestId> m_async_pending; std::map<watch_notify::AsyncRequestId, int> m_async_complete; std::set<std::pair<utime_t, watch_notify::AsyncRequestId>> m_async_complete_expiration; ceph::mutex m_owner_client_id_lock; watch_notify::ClientId m_owner_client_id; AsyncOpTracker m_async_op_tracker; NoOpProgressContext m_no_op_prog_ctx; void handle_register_watch(int r); void schedule_cancel_async_requests(); void cancel_async_requests(); void set_owner_client_id(const watch_notify::ClientId &client_id); watch_notify::ClientId get_client_id(); void handle_request_lock(int r); void schedule_request_lock(bool use_timer, int timer_delay = -1); void notify_lock_owner(watch_notify::Payload *payload, Context *on_finish); bool is_new_request(const watch_notify::AsyncRequestId &id) const; bool mark_async_request_complete(const watch_notify::AsyncRequestId &id, int r); Context *remove_async_request(const watch_notify::AsyncRequestId &id); Context *remove_async_request(const watch_notify::AsyncRequestId &id, ceph::shared_mutex &lock); void schedule_async_request_timed_out(const watch_notify::AsyncRequestId &id); void async_request_timed_out(const watch_notify::AsyncRequestId &id); void notify_async_request(const watch_notify::AsyncRequestId &id, watch_notify::Payload *payload, ProgressContext& prog_ctx, Context *on_finish); void schedule_async_progress(const watch_notify::AsyncRequestId &id, uint64_t offset, uint64_t total); int notify_async_progress(const watch_notify::AsyncRequestId &id, uint64_t offset, uint64_t total); void schedule_async_complete(const watch_notify::AsyncRequestId &id, int r); void notify_async_complete(const watch_notify::AsyncRequestId &id, int r); void handle_async_complete(const watch_notify::AsyncRequestId &request, int r, int ret_val); int prepare_async_request(const watch_notify::AsyncRequestId& id, bool* new_request, Context** ctx, ProgressContext** prog_ctx); Context *prepare_quiesce_request(const watch_notify::AsyncRequestId &request, C_NotifyAck *ack_ctx); void prepare_unquiesce_request(const watch_notify::AsyncRequestId &request); void cancel_quiesce_requests(); void notify_quiesce(const watch_notify::AsyncRequestId &async_request_id, size_t attempt, size_t total_attempts, ProgressContext &prog_ctx, Context *on_finish); bool handle_operation_request( const watch_notify::AsyncRequestId& async_request_id, exclusive_lock::OperationRequestType request_type, Operation operation, std::function<void(ProgressContext &prog_ctx, Context*)> execute, C_NotifyAck *ack_ctx); bool handle_payload(const watch_notify::HeaderUpdatePayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::AcquiredLockPayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::ReleasedLockPayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::RequestLockPayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::AsyncProgressPayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::AsyncCompletePayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::FlattenPayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::ResizePayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::SnapCreatePayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::SnapRenamePayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::SnapRemovePayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::SnapProtectPayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::SnapUnprotectPayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::RebuildObjectMapPayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::RenamePayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::UpdateFeaturesPayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::MigratePayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::SparsifyPayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::QuiescePayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::UnquiescePayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::MetadataUpdatePayload& payload, C_NotifyAck *ctx); bool handle_payload(const watch_notify::UnknownPayload& payload, C_NotifyAck *ctx); void process_payload(uint64_t notify_id, uint64_t handle, watch_notify::Payload *payload); void handle_notify(uint64_t notify_id, uint64_t handle, uint64_t notifier_id, bufferlist &bl) override; void handle_error(uint64_t cookie, int err) override; void handle_rewatch_complete(int r) override; void send_notify(watch_notify::Payload *payload, Context *ctx = nullptr); }; } // namespace librbd extern template class librbd::ImageWatcher<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_WATCHER_H
12,304
38.187898
80
h
null
ceph-main/src/librbd/Journal.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_JOURNAL_H #define CEPH_LIBRBD_JOURNAL_H #include "include/int_types.h" #include "include/Context.h" #include "include/interval_set.h" #include "include/rados/librados_fwd.hpp" #include "common/AsyncOpTracker.h" #include "common/Cond.h" #include "common/Timer.h" #include "common/RefCountedObj.h" #include "journal/Future.h" #include "journal/JournalMetadataListener.h" #include "journal/ReplayEntry.h" #include "journal/ReplayHandler.h" #include "librbd/Utils.h" #include "librbd/asio/ContextWQ.h" #include "librbd/journal/Types.h" #include "librbd/journal/TypeTraits.h" #include <algorithm> #include <list> #include <string> #include <atomic> #include <unordered_map> class ContextWQ; namespace journal { class Journaler; } namespace librbd { class ImageCtx; namespace journal { template <typename> class Replay; } template <typename ImageCtxT = ImageCtx> class Journal : public RefCountedObject { public: /** * @verbatim * * <start> * | * v * UNINITIALIZED ---> INITIALIZING ---> REPLAYING ------> FLUSHING ---> READY * | * . ^ * . * | * | * . | * . * | * | * . | (error) * . . . . . . . * | * | * . | * . * | * | * . | v . * | * | * . | FLUSHING_RESTART . * | * | * . | | . * | * | * . | | . * | * | * . | v . * v * | * . | RESTARTING < * * * * * STOPPING * | * . | | . | * | * . | | . | * | * * * * * * . \-------------/ . | * | * (error) . . | * | * . . . . . . . . . . . . . . . . | * | * . . | * | v v v | * | CLOSED <----- CLOSING <---------------------------------------/ * | | * | v * \---> <finish> * * @endverbatim */ enum State { STATE_UNINITIALIZED, STATE_INITIALIZING, STATE_REPLAYING, STATE_FLUSHING_RESTART, STATE_RESTARTING_REPLAY, STATE_FLUSHING_REPLAY, STATE_READY, STATE_STOPPING, STATE_CLOSING, STATE_CLOSED }; static const std::string IMAGE_CLIENT_ID; static const std::string LOCAL_MIRROR_UUID; static const std::string ORPHAN_MIRROR_UUID; Journal(ImageCtxT &image_ctx); ~Journal(); static void get_work_queue(CephContext *cct, ContextWQ **work_queue); static bool is_journal_supported(ImageCtxT &image_ctx); static int create(librados::IoCtx &io_ctx, const std::string &image_id, uint8_t order, uint8_t splay_width, const std::string &object_pool); static int remove(librados::IoCtx &io_ctx, const std::string &image_id); static int reset(librados::IoCtx &io_ctx, const std::string &image_id); static void is_tag_owner(ImageCtxT *image_ctx, bool *is_tag_owner, Context *on_finish); static void is_tag_owner(librados::IoCtx& io_ctx, std::string& image_id, bool *is_tag_owner, asio::ContextWQ *op_work_queue, Context *on_finish); static void get_tag_owner(librados::IoCtx& io_ctx, std::string& image_id, std::string *mirror_uuid, asio::ContextWQ *op_work_queue, Context *on_finish); static int request_resync(ImageCtxT *image_ctx); static void promote(ImageCtxT *image_ctx, Context *on_finish); static void demote(ImageCtxT *image_ctx, Context *on_finish); bool is_journal_ready() const; bool is_journal_replaying() const; bool is_journal_appending() const; void wait_for_journal_ready(Context *on_ready); void open(Context *on_finish); void close(Context *on_finish); bool is_tag_owner() const; uint64_t get_tag_tid() const; journal::TagData get_tag_data() const; void allocate_local_tag(Context *on_finish); void allocate_tag(const std::string &mirror_uuid, const journal::TagPredecessor &predecessor, Context *on_finish); void flush_commit_position(Context *on_finish); void user_flushed(); uint64_t append_write_event(uint64_t offset, size_t length, const bufferlist &bl, bool flush_entry); uint64_t append_compare_and_write_event(uint64_t offset, size_t length, const bufferlist &cmp_bl, const bufferlist &write_bl, bool flush_entry); uint64_t append_io_event(journal::EventEntry &&event_entry, uint64_t offset, size_t length, bool flush_entry, int filter_ret_val); void commit_io_event(uint64_t tid, int r); void commit_io_event_extent(uint64_t tid, uint64_t offset, uint64_t length, int r); void append_op_event(uint64_t op_tid, journal::EventEntry &&event_entry, Context *on_safe); void commit_op_event(uint64_t tid, int r, Context *on_safe); void replay_op_ready(uint64_t op_tid, Context *on_resume); void flush_event(uint64_t tid, Context *on_safe); void wait_event(uint64_t tid, Context *on_safe); uint64_t allocate_op_tid() { uint64_t op_tid = ++m_op_tid; ceph_assert(op_tid != 0); return op_tid; } void start_external_replay(journal::Replay<ImageCtxT> **journal_replay, Context *on_start); void stop_external_replay(); void add_listener(journal::Listener *listener); void remove_listener(journal::Listener *listener); int is_resync_requested(bool *do_resync); inline ContextWQ *get_work_queue() { return m_work_queue; } private: ImageCtxT &m_image_ctx; // mock unit testing support typedef journal::TypeTraits<ImageCtxT> TypeTraits; typedef typename TypeTraits::Journaler Journaler; typedef typename TypeTraits::Future Future; typedef typename TypeTraits::ReplayEntry ReplayEntry; typedef std::list<bufferlist> Bufferlists; typedef std::list<Context *> Contexts; typedef std::list<Future> Futures; typedef interval_set<uint64_t> ExtentInterval; struct Event { Futures futures; Contexts on_safe_contexts; ExtentInterval pending_extents; int filter_ret_val = 0; bool committed_io = false; bool safe = false; int ret_val = 0; Event() { } Event(const Futures &_futures, uint64_t offset, size_t length, int filter_ret_val) : futures(_futures), filter_ret_val(filter_ret_val) { if (length > 0) { pending_extents.insert(offset, length); } } }; typedef std::unordered_map<uint64_t, Event> Events; typedef std::unordered_map<uint64_t, Future> TidToFutures; struct C_IOEventSafe : public Context { Journal *journal; uint64_t tid; C_IOEventSafe(Journal *_journal, uint64_t _tid) : journal(_journal), tid(_tid) { } void finish(int r) override { journal->handle_io_event_safe(r, tid); } }; struct C_OpEventSafe : public Context { Journal *journal; uint64_t tid; Future op_start_future; Future op_finish_future; Context *on_safe; C_OpEventSafe(Journal *journal, uint64_t tid, const Future &op_start_future, const Future &op_finish_future, Context *on_safe) : journal(journal), tid(tid), op_start_future(op_start_future), op_finish_future(op_finish_future), on_safe(on_safe) { } void finish(int r) override { journal->handle_op_event_safe(r, tid, op_start_future, op_finish_future, on_safe); } }; struct C_ReplayProcessSafe : public Context { Journal *journal; ReplayEntry replay_entry; C_ReplayProcessSafe(Journal *journal, ReplayEntry &&replay_entry) : journal(journal), replay_entry(std::move(replay_entry)) { } void finish(int r) override { journal->handle_replay_process_safe(replay_entry, r); } }; struct ReplayHandler : public ::journal::ReplayHandler { Journal *journal; ReplayHandler(Journal *_journal) : journal(_journal) { } void handle_entries_available() override { journal->handle_replay_ready(); } void handle_complete(int r) override { journal->handle_replay_complete(r); } }; ContextWQ *m_work_queue = nullptr; SafeTimer *m_timer = nullptr; ceph::mutex *m_timer_lock = nullptr; Journaler *m_journaler; mutable ceph::mutex m_lock = ceph::make_mutex("Journal<I>::m_lock"); State m_state; uint64_t m_max_append_size = 0; uint64_t m_tag_class = 0; uint64_t m_tag_tid = 0; journal::ImageClientMeta m_client_meta; journal::TagData m_tag_data; int m_error_result; Contexts m_wait_for_state_contexts; ReplayHandler m_replay_handler; bool m_close_pending; ceph::mutex m_event_lock = ceph::make_mutex("Journal<I>::m_event_lock"); uint64_t m_event_tid; Events m_events; std::atomic<bool> m_user_flushed = false; std::atomic<uint64_t> m_op_tid = { 0 }; TidToFutures m_op_futures; bool m_processing_entry = false; bool m_blocking_writes; journal::Replay<ImageCtxT> *m_journal_replay; AsyncOpTracker m_async_journal_op_tracker; struct MetadataListener : public ::journal::JournalMetadataListener { Journal<ImageCtxT> *journal; MetadataListener(Journal<ImageCtxT> *journal) : journal(journal) { } void handle_update(::journal::JournalMetadata *) override; } m_metadata_listener; typedef std::set<journal::Listener *> Listeners; Listeners m_listeners; ceph::condition_variable m_listener_cond; bool m_listener_notify = false; uint64_t m_refresh_sequence = 0; bool is_journal_replaying(const ceph::mutex &) const; bool is_tag_owner(const ceph::mutex &) const; uint64_t append_io_events(journal::EventType event_type, const Bufferlists &bufferlists, uint64_t offset, size_t length, bool flush_entry, int filter_ret_val); Future wait_event(ceph::mutex &lock, uint64_t tid, Context *on_safe); void create_journaler(); void destroy_journaler(int r); void recreate_journaler(int r); void complete_event(typename Events::iterator it, int r); void start_append(); void handle_open(int r); void handle_replay_ready(); void handle_replay_complete(int r); void handle_replay_process_ready(int r); void handle_replay_process_safe(ReplayEntry replay_entry, int r); void handle_start_external_replay(int r, journal::Replay<ImageCtxT> **journal_replay, Context *on_finish); void handle_flushing_restart(int r); void handle_flushing_replay(); void handle_recording_stopped(int r); void handle_journal_destroyed(int r); void handle_io_event_safe(int r, uint64_t tid); void handle_op_event_safe(int r, uint64_t tid, const Future &op_start_future, const Future &op_finish_future, Context *on_safe); void stop_recording(); void transition_state(State state, int r); bool is_steady_state() const; void wait_for_steady_state(Context *on_state); int check_resync_requested(bool *do_resync); void handle_metadata_updated(); void handle_refresh_metadata(uint64_t refresh_sequence, uint64_t tag_tid, journal::TagData tag_data, int r); }; } // namespace librbd extern template class librbd::Journal<librbd::ImageCtx>; #endif // CEPH_LIBRBD_JOURNAL_H
12,384
31.506562
80
h
null
ceph-main/src/librbd/LibrbdAdminSocketHook.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_LIBRBDADMINSOCKETHOOK_H #define CEPH_LIBRBD_LIBRBDADMINSOCKETHOOK_H #include <map> #include "common/admin_socket.h" namespace librbd { struct ImageCtx; class LibrbdAdminSocketCommand; class LibrbdAdminSocketHook : public AdminSocketHook { public: LibrbdAdminSocketHook(ImageCtx *ictx); ~LibrbdAdminSocketHook() override; int call(std::string_view command, const cmdmap_t& cmdmap, const bufferlist&, Formatter *f, std::ostream& errss, bufferlist& out) override; private: typedef std::map<std::string,LibrbdAdminSocketCommand*, std::less<>> Commands; AdminSocket *admin_socket; Commands commands; }; } #endif
801
21.277778
70
h
null
ceph-main/src/librbd/ManagedLock.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_MANAGED_LOCK_H #define CEPH_LIBRBD_MANAGED_LOCK_H #include "include/int_types.h" #include "include/Context.h" #include "include/rados/librados.hpp" #include "common/AsyncOpTracker.h" #include "cls/lock/cls_lock_types.h" #include "librbd/watcher/Types.h" #include "librbd/managed_lock/Types.h" #include <list> #include <string> #include <utility> namespace librbd { struct AsioEngine; struct ImageCtx; namespace asio { struct ContextWQ; } namespace managed_lock { struct Locker; } template <typename ImageCtxT = librbd::ImageCtx> class ManagedLock { private: typedef watcher::Traits<ImageCtxT> TypeTraits; typedef typename TypeTraits::Watcher Watcher; public: static ManagedLock *create(librados::IoCtx& ioctx, AsioEngine& asio_engine, const std::string& oid, Watcher *watcher, managed_lock::Mode mode, bool blocklist_on_break_lock, uint32_t blocklist_expire_seconds) { return new ManagedLock(ioctx, asio_engine, oid, watcher, mode, blocklist_on_break_lock, blocklist_expire_seconds); } void destroy() { delete this; } ManagedLock(librados::IoCtx& ioctx, AsioEngine& asio_engine, const std::string& oid, Watcher *watcher, managed_lock::Mode mode, bool blocklist_on_break_lock, uint32_t blocklist_expire_seconds); virtual ~ManagedLock(); bool is_lock_owner() const; void shut_down(Context *on_shutdown); void acquire_lock(Context *on_acquired); void try_acquire_lock(Context *on_acquired); void release_lock(Context *on_released); void reacquire_lock(Context *on_reacquired); void get_locker(managed_lock::Locker *locker, Context *on_finish); void break_lock(const managed_lock::Locker &locker, bool force_break_lock, Context *on_finish); int assert_header_locked(); bool is_shutdown() const { std::lock_guard l{m_lock}; return is_state_shutdown(); } protected: mutable ceph::mutex m_lock; inline void set_state_uninitialized() { ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_state == STATE_UNLOCKED); m_state = STATE_UNINITIALIZED; } inline void set_state_initializing() { ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_state == STATE_UNINITIALIZED); m_state = STATE_INITIALIZING; } inline void set_state_unlocked() { ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_state == STATE_INITIALIZING || m_state == STATE_RELEASING); m_state = STATE_UNLOCKED; } inline void set_state_waiting_for_lock() { ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_state == STATE_ACQUIRING); m_state = STATE_WAITING_FOR_LOCK; } inline void set_state_post_acquiring() { ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_state == STATE_ACQUIRING); m_state = STATE_POST_ACQUIRING; } bool is_state_shutdown() const; inline bool is_state_acquiring() const { ceph_assert(ceph_mutex_is_locked(m_lock)); return m_state == STATE_ACQUIRING; } inline bool is_state_post_acquiring() const { ceph_assert(ceph_mutex_is_locked(m_lock)); return m_state == STATE_POST_ACQUIRING; } inline bool is_state_releasing() const { ceph_assert(ceph_mutex_is_locked(m_lock)); return m_state == STATE_RELEASING; } inline bool is_state_pre_releasing() const { ceph_assert(ceph_mutex_is_locked(m_lock)); return m_state == STATE_PRE_RELEASING; } inline bool is_state_locked() const { ceph_assert(ceph_mutex_is_locked(m_lock)); return m_state == STATE_LOCKED; } inline bool is_state_waiting_for_lock() const { ceph_assert(ceph_mutex_is_locked(m_lock)); return m_state == STATE_WAITING_FOR_LOCK; } inline bool is_action_acquire_lock() const { ceph_assert(ceph_mutex_is_locked(m_lock)); return get_active_action() == ACTION_ACQUIRE_LOCK; } virtual void shutdown_handler(int r, Context *on_finish); virtual void pre_acquire_lock_handler(Context *on_finish); virtual void post_acquire_lock_handler(int r, Context *on_finish); virtual void pre_release_lock_handler(bool shutting_down, Context *on_finish); virtual void post_release_lock_handler(bool shutting_down, int r, Context *on_finish); virtual void post_reacquire_lock_handler(int r, Context *on_finish); void execute_next_action(); private: /** * @verbatim * * <start> * | * | * v (acquire_lock) * UNLOCKED -----------------------------------------> ACQUIRING * ^ | * | | * RELEASING | * | | * | | * | (release_lock) v * PRE_RELEASING <----------------------------------------- LOCKED * * <LOCKED state> * | * v * REACQUIRING -------------------------------------> <finish> * . ^ * . | * . . . > <RELEASE action> ---> <ACQUIRE action> ---/ * * <UNLOCKED/LOCKED states> * | * | * v * PRE_SHUTTING_DOWN ---> SHUTTING_DOWN ---> SHUTDOWN ---> <finish> * * @endverbatim */ enum State { STATE_UNINITIALIZED, STATE_INITIALIZING, STATE_UNLOCKED, STATE_LOCKED, STATE_ACQUIRING, STATE_POST_ACQUIRING, STATE_WAITING_FOR_REGISTER, STATE_WAITING_FOR_LOCK, STATE_REACQUIRING, STATE_PRE_RELEASING, STATE_RELEASING, STATE_PRE_SHUTTING_DOWN, STATE_SHUTTING_DOWN, STATE_SHUTDOWN, }; enum Action { ACTION_TRY_LOCK, ACTION_ACQUIRE_LOCK, ACTION_REACQUIRE_LOCK, ACTION_RELEASE_LOCK, ACTION_SHUT_DOWN }; typedef std::list<Context *> Contexts; typedef std::pair<Action, Contexts> ActionContexts; typedef std::list<ActionContexts> ActionsContexts; struct C_ShutDownRelease : public Context { ManagedLock *lock; C_ShutDownRelease(ManagedLock *lock) : lock(lock) { } void finish(int r) override { lock->send_shutdown_release(); } }; librados::IoCtx& m_ioctx; CephContext *m_cct; AsioEngine& m_asio_engine; asio::ContextWQ* m_work_queue; std::string m_oid; Watcher *m_watcher; managed_lock::Mode m_mode; bool m_blocklist_on_break_lock; uint32_t m_blocklist_expire_seconds; std::string m_cookie; std::string m_new_cookie; State m_state; State m_post_next_state; ActionsContexts m_actions_contexts; AsyncOpTracker m_async_op_tracker; bool is_lock_owner(ceph::mutex &lock) const; bool is_transition_state() const; void append_context(Action action, Context *ctx); void execute_action(Action action, Context *ctx); Action get_active_action() const; void complete_active_action(State next_state, int r); void send_acquire_lock(); void handle_pre_acquire_lock(int r); void handle_acquire_lock(int r); void handle_no_op_reacquire_lock(int r); void handle_post_acquire_lock(int r); void revert_to_unlock_state(int r); void send_reacquire_lock(); void handle_reacquire_lock(int r); void release_acquire_lock(); void send_release_lock(); void handle_pre_release_lock(int r); void handle_release_lock(int r); void handle_post_release_lock(int r); void send_shutdown(); void handle_shutdown(int r); void send_shutdown_release(); void handle_shutdown_pre_release(int r); void handle_shutdown_post_release(int r); void wait_for_tracked_ops(int r); void complete_shutdown(int r); }; } // namespace librbd extern template class librbd::ManagedLock<librbd::ImageCtx>; #endif // CEPH_LIBRBD_MANAGED_LOCK_H
8,221
29.339483
78
h
null
ceph-main/src/librbd/MirroringWatcher.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_MIRRORING_WATCHER_H #define CEPH_LIBRBD_MIRRORING_WATCHER_H #include "include/int_types.h" #include "include/rados/librados_fwd.hpp" #include "cls/rbd/cls_rbd_types.h" #include "librbd/ImageCtx.h" #include "librbd/Watcher.h" #include "librbd/mirroring_watcher/Types.h" namespace librbd { namespace asio { struct ContextWQ; } namespace watcher { namespace util { template <typename> struct HandlePayloadVisitor; } } template <typename ImageCtxT = librbd::ImageCtx> class MirroringWatcher : public Watcher { friend struct watcher::util::HandlePayloadVisitor<MirroringWatcher<ImageCtxT>>; public: MirroringWatcher(librados::IoCtx &io_ctx, asio::ContextWQ *work_queue); static int notify_mode_updated(librados::IoCtx &io_ctx, cls::rbd::MirrorMode mirror_mode); static void notify_mode_updated(librados::IoCtx &io_ctx, cls::rbd::MirrorMode mirror_mode, Context *on_finish); static int notify_image_updated(librados::IoCtx &io_ctx, cls::rbd::MirrorImageState mirror_image_state, const std::string &image_id, const std::string &global_image_id); static void notify_image_updated(librados::IoCtx &io_ctx, cls::rbd::MirrorImageState mirror_image_state, const std::string &image_id, const std::string &global_image_id, Context *on_finish); virtual void handle_mode_updated(cls::rbd::MirrorMode mirror_mode) = 0; virtual void handle_image_updated(cls::rbd::MirrorImageState state, const std::string &image_id, const std::string &global_image_id) = 0; private: bool handle_payload(const mirroring_watcher::ModeUpdatedPayload &payload, Context *on_notify_ack); bool handle_payload(const mirroring_watcher::ImageUpdatedPayload &payload, Context *on_notify_ack); bool handle_payload(const mirroring_watcher::UnknownPayload &payload, Context *on_notify_ack); void handle_notify(uint64_t notify_id, uint64_t handle, uint64_t notifier_id, bufferlist &bl) override; }; } // namespace librbd extern template class librbd::MirroringWatcher<librbd::ImageCtx>; #endif // CEPH_LIBRBD_MIRRORING_WATCHER_H
2,638
37.808824
81
h
null
ceph-main/src/librbd/ObjectMap.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_OBJECT_MAP_H #define CEPH_LIBRBD_OBJECT_MAP_H #include "include/int_types.h" #include "include/fs_types.h" #include "include/rados/librados_fwd.hpp" #include "include/rbd/object_map_types.h" #include "common/AsyncOpTracker.h" #include "common/bit_vector.hpp" #include "common/RefCountedObj.h" #include "librbd/Utils.h" #include <boost/optional.hpp> class Context; namespace ZTracer { struct Trace; } namespace librbd { template <typename Op> class BlockGuard; struct BlockGuardCell; class ImageCtx; template <typename ImageCtxT = ImageCtx> class ObjectMap : public RefCountedObject { public: static ObjectMap *create(ImageCtxT &image_ctx, uint64_t snap_id) { return new ObjectMap(image_ctx, snap_id); } ObjectMap(ImageCtxT &image_ctx, uint64_t snap_id); ~ObjectMap(); static int aio_remove(librados::IoCtx &io_ctx, const std::string &image_id, librados::AioCompletion *c); static std::string object_map_name(const std::string &image_id, uint64_t snap_id); static bool is_compatible(const file_layout_t& layout, uint64_t size); uint8_t operator[](uint64_t object_no) const; inline uint64_t size() const { std::shared_lock locker{m_lock}; return m_object_map.size(); } inline void set_state(uint64_t object_no, uint8_t new_state, const boost::optional<uint8_t> &current_state) { std::unique_lock locker{m_lock}; ceph_assert(object_no < m_object_map.size()); if (current_state && m_object_map[object_no] != *current_state) { return; } m_object_map[object_no] = new_state; } void open(Context *on_finish); void close(Context *on_finish); bool set_object_map(ceph::BitVector<2> &target_object_map); bool object_may_exist(uint64_t object_no) const; bool object_may_not_exist(uint64_t object_no) const; void aio_save(Context *on_finish); void aio_resize(uint64_t new_size, uint8_t default_object_state, Context *on_finish); template <typename T, void(T::*MF)(int) = &T::complete> bool aio_update(uint64_t snap_id, uint64_t start_object_no, uint8_t new_state, const boost::optional<uint8_t> &current_state, const ZTracer::Trace &parent_trace, bool ignore_enoent, T *callback_object) { return aio_update<T, MF>(snap_id, start_object_no, start_object_no + 1, new_state, current_state, parent_trace, ignore_enoent, callback_object); } template <typename T, void(T::*MF)(int) = &T::complete> bool aio_update(uint64_t snap_id, uint64_t start_object_no, uint64_t end_object_no, uint8_t new_state, const boost::optional<uint8_t> &current_state, const ZTracer::Trace &parent_trace, bool ignore_enoent, T *callback_object) { ceph_assert(start_object_no < end_object_no); std::unique_lock locker{m_lock}; if (snap_id == CEPH_NOSNAP) { end_object_no = std::min(end_object_no, m_object_map.size()); if (start_object_no >= end_object_no) { return false; } auto it = m_object_map.begin() + start_object_no; auto end_it = m_object_map.begin() + end_object_no; for (; it != end_it; ++it) { if (update_required(it, new_state)) { break; } } if (it == end_it) { return false; } m_async_op_tracker.start_op(); UpdateOperation update_operation(start_object_no, end_object_no, new_state, current_state, parent_trace, ignore_enoent, util::create_context_callback<T, MF>( callback_object)); detained_aio_update(std::move(update_operation)); } else { aio_update(snap_id, start_object_no, end_object_no, new_state, current_state, parent_trace, ignore_enoent, util::create_context_callback<T, MF>(callback_object)); } return true; } void rollback(uint64_t snap_id, Context *on_finish); void snapshot_add(uint64_t snap_id, Context *on_finish); void snapshot_remove(uint64_t snap_id, Context *on_finish); private: struct UpdateOperation { uint64_t start_object_no; uint64_t end_object_no; uint8_t new_state; boost::optional<uint8_t> current_state; ZTracer::Trace parent_trace; bool ignore_enoent; Context *on_finish; UpdateOperation(uint64_t start_object_no, uint64_t end_object_no, uint8_t new_state, const boost::optional<uint8_t> &current_state, const ZTracer::Trace &parent_trace, bool ignore_enoent, Context *on_finish) : start_object_no(start_object_no), end_object_no(end_object_no), new_state(new_state), current_state(current_state), parent_trace(parent_trace), ignore_enoent(ignore_enoent), on_finish(on_finish) { } }; typedef BlockGuard<UpdateOperation> UpdateGuard; ImageCtxT &m_image_ctx; uint64_t m_snap_id; mutable ceph::shared_mutex m_lock; ceph::BitVector<2> m_object_map; AsyncOpTracker m_async_op_tracker; UpdateGuard *m_update_guard = nullptr; void detained_aio_update(UpdateOperation &&update_operation); void handle_detained_aio_update(BlockGuardCell *cell, int r, Context *on_finish); void aio_update(uint64_t snap_id, uint64_t start_object_no, uint64_t end_object_no, uint8_t new_state, const boost::optional<uint8_t> &current_state, const ZTracer::Trace &parent_trace, bool ignore_enoent, Context *on_finish); bool update_required(const ceph::BitVector<2>::Iterator &it, uint8_t new_state); }; } // namespace librbd extern template class librbd::ObjectMap<librbd::ImageCtx>; #endif // CEPH_LIBRBD_OBJECT_MAP_H
6,104
33.6875
106
h
null
ceph-main/src/librbd/Operations.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_OPERATIONS_H #define CEPH_LIBRBD_OPERATIONS_H #include "cls/rbd/cls_rbd_types.h" #include "include/int_types.h" #include "librbd/exclusive_lock/Policy.h" #include "librbd/operation/ObjectMapIterate.h" #include <atomic> #include <string> #include <list> #include <map> #include <set> #include <boost/function.hpp> class Context; namespace librbd { class ImageCtx; class ProgressContext; enum Operation { OPERATION_CHECK_OBJECT_MAP, OPERATION_FLATTEN, OPERATION_METADATA_UPDATE, OPERATION_MIGRATE, OPERATION_REBUILD_OBJECT_MAP, OPERATION_RENAME, OPERATION_RESIZE, OPERATION_SNAP_CREATE, OPERATION_SNAP_PROTECT, OPERATION_SNAP_REMOVE, OPERATION_SNAP_RENAME, OPERATION_SNAP_ROLLBACK, OPERATION_SNAP_UNPROTECT, OPERATION_SPARSIFY, OPERATION_UPDATE_FEATURES, }; template <typename ImageCtxT = ImageCtx> class Operations { public: Operations(ImageCtxT &image_ctx); void start_op(enum Operation op, Context *ctx); void finish_op(enum Operation op, int r); int flatten(ProgressContext &prog_ctx); void execute_flatten(ProgressContext &prog_ctx, Context *on_finish); int rebuild_object_map(ProgressContext &prog_ctx); void execute_rebuild_object_map(ProgressContext &prog_ctx, Context *on_finish); int check_object_map(ProgressContext &prog_ctx); void check_object_map(ProgressContext &prog_ctx, Context *on_finish); void object_map_iterate(ProgressContext &prog_ctx, operation::ObjectIterateWork<ImageCtxT> handle_mismatch, Context* on_finish); int rename(const char *dstname); void execute_rename(const std::string &dest_name, Context *on_finish); int resize(uint64_t size, bool allow_shrink, ProgressContext& prog_ctx); void execute_resize(uint64_t size, bool allow_shrink, ProgressContext &prog_ctx, Context *on_finish, uint64_t journal_op_tid); int snap_create(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string& snap_name, uint64_t flags, ProgressContext& prog_ctx); void snap_create(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string& snap_name, uint64_t flags, ProgressContext& prog_ctx, Context *on_finish); void execute_snap_create(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish, uint64_t journal_op_tid, uint64_t flags, ProgressContext &prog_ctx); int snap_rollback(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string& snap_name, ProgressContext& prog_ctx); void execute_snap_rollback(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string &snap_name, ProgressContext& prog_ctx, Context *on_finish); int snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string& snap_name); void snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string& snap_name, Context *on_finish); void execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string &snap_name, Context *on_finish); int snap_rename(const char *srcname, const char *dstname); void execute_snap_rename(const uint64_t src_snap_id, const std::string &dest_snap_name, Context *on_finish); int snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string& snap_name); void execute_snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string &snap_name, Context *on_finish); int snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string& snap_name); void execute_snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string &snap_name, Context *on_finish); int snap_set_limit(uint64_t limit); void execute_snap_set_limit(uint64_t limit, Context *on_finish); int update_features(uint64_t features, bool enabled); void execute_update_features(uint64_t features, bool enabled, Context *on_finish, uint64_t journal_op_tid); int metadata_set(const std::string &key, const std::string &value); void execute_metadata_set(const std::string &key, const std::string &value, Context *on_finish); int metadata_remove(const std::string &key); void execute_metadata_remove(const std::string &key, Context *on_finish); int migrate(ProgressContext &prog_ctx); void execute_migrate(ProgressContext &prog_ctx, Context *on_finish); int sparsify(size_t sparse_size, ProgressContext &prog_ctx); void execute_sparsify(size_t sparse_size, ProgressContext &prog_ctx, Context *on_finish); int prepare_image_update(exclusive_lock::OperationRequestType request_type, bool request_lock); private: ImageCtxT &m_image_ctx; mutable ceph::mutex m_queue_lock; std::set<Operation> m_in_flight_ops; std::map<Operation, std::list<Context *>> m_queued_ops; int invoke_async_request(Operation op, exclusive_lock::OperationRequestType request_type, bool permit_snapshot, const boost::function<void(Context*)>& local, const boost::function<void(Context*)>& remote); }; } // namespace librbd extern template class librbd::Operations<librbd::ImageCtx>; #endif // CEPH_LIBRBD_OPERATIONS_H
5,716
34.955975
82
h
null
ceph-main/src/librbd/PluginRegistry.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_PLUGIN_REGISTRY_H #define CEPH_LIBRBD_PLUGIN_REGISTRY_H #include "librbd/plugin/Types.h" #include <memory> #include <string> #include <list> struct Context; namespace librbd { struct ImageCtx; namespace cache { class ImageWritebackInterface; } namespace plugin { template <typename> struct Api; } template <typename ImageCtxT> class PluginRegistry { public: PluginRegistry(ImageCtxT* image_ctx); ~PluginRegistry(); void init(const std::string& plugins, Context* on_finish); void acquired_exclusive_lock(Context* on_finish); void prerelease_exclusive_lock(Context* on_finish); void discard(Context* on_finish); private: ImageCtxT* m_image_ctx; std::unique_ptr<plugin::Api<ImageCtxT>> m_plugin_api; std::unique_ptr<cache::ImageWritebackInterface> m_image_writeback; std::string m_plugins; plugin::PluginHookPoints m_plugin_hook_points; }; } // namespace librbd extern template class librbd::PluginRegistry<librbd::ImageCtx>; #endif // CEPH_LIBRBD_PLUGIN_REGISTRY_H
1,120
20.557692
70
h
null
ceph-main/src/librbd/TaskFinisher.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef LIBRBD_TASK_FINISHER_H #define LIBRBD_TASK_FINISHER_H #include "include/common_fwd.h" #include "include/Context.h" #include "common/ceph_context.h" #include "common/Finisher.h" #include "common/ceph_mutex.h" #include "common/Timer.h" #include <map> #include <utility> namespace librbd { struct TaskFinisherSingleton { ceph::mutex m_lock = ceph::make_mutex("librbd::TaskFinisher::m_lock"); SafeTimer *m_safe_timer; Finisher *m_finisher; static TaskFinisherSingleton& get_singleton(CephContext* cct) { return cct->lookup_or_create_singleton_object< TaskFinisherSingleton>("librbd::TaskFinisherSingleton", false, cct); } explicit TaskFinisherSingleton(CephContext *cct) { m_safe_timer = new SafeTimer(cct, m_lock, false); m_safe_timer->init(); m_finisher = new Finisher(cct, "librbd::TaskFinisher::m_finisher", "taskfin_librbd"); m_finisher->start(); } virtual ~TaskFinisherSingleton() { { std::lock_guard l{m_lock}; m_safe_timer->shutdown(); delete m_safe_timer; } m_finisher->wait_for_empty(); m_finisher->stop(); delete m_finisher; } void queue(Context* ctx, int r) { m_finisher->queue(ctx, r); } }; template <typename Task> class TaskFinisher { public: TaskFinisher(CephContext &cct) : m_cct(cct) { auto& singleton = TaskFinisherSingleton::get_singleton(&cct); m_lock = &singleton.m_lock; m_safe_timer = singleton.m_safe_timer; m_finisher = singleton.m_finisher; } bool cancel(const Task& task) { std::lock_guard l{*m_lock}; typename TaskContexts::iterator it = m_task_contexts.find(task); if (it == m_task_contexts.end()) { return false; } it->second.first->complete(-ECANCELED); m_safe_timer->cancel_event(it->second.second); m_task_contexts.erase(it); return true; } void cancel_all() { std::lock_guard l{*m_lock}; for (auto &[task, pair] : m_task_contexts) { pair.first->complete(-ECANCELED); m_safe_timer->cancel_event(pair.second); } m_task_contexts.clear(); } bool add_event_after(const Task& task, double seconds, Context *ctx) { std::lock_guard l{*m_lock}; if (m_task_contexts.count(task) != 0) { // task already scheduled on finisher or timer delete ctx; return false; } C_Task *timer_ctx = new C_Task(this, task); m_task_contexts[task] = std::make_pair(ctx, timer_ctx); m_safe_timer->add_event_after(seconds, timer_ctx); return true; } bool reschedule_event_after(const Task& task, double seconds) { std::lock_guard l{*m_lock}; auto it = m_task_contexts.find(task); if (it == m_task_contexts.end()) { return false; } bool canceled = m_safe_timer->cancel_event(it->second.second); if (!canceled) { return false; } auto timer_ctx = new C_Task(this, task); it->second.second = timer_ctx; m_safe_timer->add_event_after(seconds, timer_ctx); return true; } void queue(Context *ctx, int r = 0) { m_finisher->queue(ctx, r); } bool queue(const Task& task, Context *ctx) { std::lock_guard l{*m_lock}; typename TaskContexts::iterator it = m_task_contexts.find(task); if (it != m_task_contexts.end()) { if (it->second.second != NULL && m_safe_timer->cancel_event(it->second.second)) { it->second.first->complete(-ECANCELED); } else { // task already scheduled on the finisher ctx->complete(-ECANCELED); return false; } } m_task_contexts[task] = std::make_pair(ctx, reinterpret_cast<Context *>(0)); m_finisher->queue(new C_Task(this, task)); return true; } private: class C_Task : public Context { public: C_Task(TaskFinisher *task_finisher, const Task& task) : m_task_finisher(task_finisher), m_task(task) { } protected: void finish(int r) override { m_task_finisher->complete(m_task); } private: TaskFinisher *m_task_finisher; Task m_task; }; CephContext &m_cct; ceph::mutex *m_lock; Finisher *m_finisher; SafeTimer *m_safe_timer; typedef std::map<Task, std::pair<Context *, Context *> > TaskContexts; TaskContexts m_task_contexts; void complete(const Task& task) { Context *ctx = NULL; { std::lock_guard l{*m_lock}; typename TaskContexts::iterator it = m_task_contexts.find(task); if (it != m_task_contexts.end()) { ctx = it->second.first; m_task_contexts.erase(it); } } if (ctx != NULL) { ctx->complete(0); } } }; } // namespace librbd #endif // LIBRBD_TASK_FINISHER
4,727
25.266667
89
h
null
ceph-main/src/librbd/TrashWatcher.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_TRASH_WATCHER_H #define CEPH_LIBRBD_TRASH_WATCHER_H #include "include/int_types.h" #include "include/rados/librados_fwd.hpp" #include "cls/rbd/cls_rbd_types.h" #include "librbd/ImageCtx.h" #include "librbd/Watcher.h" #include "librbd/trash_watcher/Types.h" namespace librbd { namespace asio { struct ContextWQ; } namespace watcher { namespace util { template <typename> struct HandlePayloadVisitor; } // namespace util } // namespace watcher template <typename ImageCtxT = librbd::ImageCtx> class TrashWatcher : public Watcher { friend struct watcher::util::HandlePayloadVisitor<TrashWatcher<ImageCtxT>>; public: TrashWatcher(librados::IoCtx &io_ctx, asio::ContextWQ *work_queue); static void notify_image_added(librados::IoCtx &io_ctx, const std::string& image_id, const cls::rbd::TrashImageSpec& spec, Context *on_finish); static void notify_image_removed(librados::IoCtx &io_ctx, const std::string& image_id, Context *on_finish); protected: virtual void handle_image_added(const std::string &image_id, const cls::rbd::TrashImageSpec& spec) = 0; virtual void handle_image_removed(const std::string &image_id) = 0; private: void handle_notify(uint64_t notify_id, uint64_t handle, uint64_t notifier_id, bufferlist &bl) override; bool handle_payload(const trash_watcher::ImageAddedPayload &payload, Context *on_notify_ack); bool handle_payload(const trash_watcher::ImageRemovedPayload &payload, Context *on_notify_ack); bool handle_payload(const trash_watcher::UnknownPayload &payload, Context *on_notify_ack); }; } // namespace librbd extern template class librbd::TrashWatcher<librbd::ImageCtx>; #endif // CEPH_LIBRBD_TRASH_WATCHER_H
2,075
34.186441
77
h
null
ceph-main/src/librbd/Types.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef LIBRBD_TYPES_H #define LIBRBD_TYPES_H #include "include/types.h" #include "cls/rbd/cls_rbd_types.h" #include "deep_copy/Types.h" #include <map> #include <memory> #include <string> namespace neorados { class IOContext; } namespace librbd { // Performance counters enum { l_librbd_first = 26000, l_librbd_rd, // read ops l_librbd_rd_bytes, // bytes read l_librbd_rd_latency, // average latency l_librbd_wr, l_librbd_wr_bytes, l_librbd_wr_latency, l_librbd_discard, l_librbd_discard_bytes, l_librbd_discard_latency, l_librbd_flush, l_librbd_flush_latency, l_librbd_ws, l_librbd_ws_bytes, l_librbd_ws_latency, l_librbd_cmp, l_librbd_cmp_bytes, l_librbd_cmp_latency, l_librbd_snap_create, l_librbd_snap_remove, l_librbd_snap_rollback, l_librbd_snap_rename, l_librbd_notify, l_librbd_resize, l_librbd_readahead, l_librbd_readahead_bytes, l_librbd_invalidate_cache, l_librbd_opened_time, l_librbd_lock_acquired_time, l_librbd_last, }; typedef std::shared_ptr<neorados::IOContext> IOContext; typedef std::map<uint64_t, uint64_t> SnapSeqs; /// Full information about an image's parent. struct ParentImageInfo { /// Identification of the parent. cls::rbd::ParentImageSpec spec; /** @brief Where the portion of data shared with the child image ends. * Since images can be resized multiple times, the portion of data shared * with the child image is not necessarily min(parent size, child size). * If the child image is first shrunk and then enlarged, the common portion * will be shorter. */ uint64_t overlap = 0; }; struct SnapInfo { std::string name; cls::rbd::SnapshotNamespace snap_namespace; uint64_t size; ParentImageInfo parent; uint8_t protection_status; uint64_t flags; utime_t timestamp; SnapInfo(std::string _name, const cls::rbd::SnapshotNamespace &_snap_namespace, uint64_t _size, const ParentImageInfo &_parent, uint8_t _protection_status, uint64_t _flags, utime_t _timestamp) : name(_name), snap_namespace(_snap_namespace), size(_size), parent(_parent), protection_status(_protection_status), flags(_flags), timestamp(_timestamp) { } }; enum { OPEN_FLAG_SKIP_OPEN_PARENT = 1 << 0, OPEN_FLAG_OLD_FORMAT = 1 << 1, OPEN_FLAG_IGNORE_MIGRATING = 1 << 2 }; enum ImageReadOnlyFlag { IMAGE_READ_ONLY_FLAG_USER = 1 << 0, IMAGE_READ_ONLY_FLAG_NON_PRIMARY = 1 << 1, }; enum SnapCreateFlag { SNAP_CREATE_FLAG_SKIP_OBJECT_MAP = 1 << 0, SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE = 1 << 1, SNAP_CREATE_FLAG_IGNORE_NOTIFY_QUIESCE_ERROR = 1 << 2, }; struct MigrationInfo { int64_t pool_id = -1; std::string pool_namespace; std::string image_name; std::string image_id; std::string source_spec; deep_copy::SnapMap snap_map; uint64_t overlap = 0; bool flatten = false; MigrationInfo() { } MigrationInfo(int64_t pool_id, const std::string& pool_namespace, const std::string& image_name, const std::string& image_id, const std::string& source_spec, const deep_copy::SnapMap &snap_map, uint64_t overlap, bool flatten) : pool_id(pool_id), pool_namespace(pool_namespace), image_name(image_name), image_id(image_id), source_spec(source_spec), snap_map(snap_map), overlap(overlap), flatten(flatten) { } bool empty() const { return (pool_id == -1 && source_spec.empty()); } }; } // namespace librbd #endif // LIBRBD_TYPES_H
3,665
24.636364
79
h
null
ceph-main/src/librbd/Utils.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_UTILS_H #define CEPH_LIBRBD_UTILS_H #include "include/rados/librados.hpp" #include "include/rbd_types.h" #include "include/ceph_assert.h" #include "include/Context.h" #include "common/snap_types.h" #include "common/zipkin_trace.h" #include "common/RefCountedObj.h" #include <atomic> #include <optional> #include <type_traits> #include <utility> #include <vector> #include <stdio.h> namespace librbd { class ImageCtx; namespace util { namespace detail { template <typename T> void rados_callback(rados_completion_t c, void *arg) { reinterpret_cast<T*>(arg)->complete(rados_aio_get_return_value(c)); } template <typename T, void(T::*MF)(int)> void rados_callback(rados_completion_t c, void *arg) { T *obj = reinterpret_cast<T*>(arg); int r = rados_aio_get_return_value(c); (obj->*MF)(r); } template <typename T, Context*(T::*MF)(int*), bool destroy> void rados_state_callback(rados_completion_t c, void *arg) { T *obj = reinterpret_cast<T*>(arg); int r = rados_aio_get_return_value(c); Context *on_finish = (obj->*MF)(&r); if (on_finish != nullptr) { on_finish->complete(r); if (destroy) { delete obj; } } } template <typename T, void (T::*MF)(int)> class C_CallbackAdapter : public Context { T *obj; public: C_CallbackAdapter(T *obj) : obj(obj) { } protected: void finish(int r) override { (obj->*MF)(r); } }; template <typename T, void (T::*MF)(int)> class C_RefCallbackAdapter : public Context { RefCountedPtr refptr; Context *on_finish; public: C_RefCallbackAdapter(T *obj, RefCountedPtr refptr) : refptr(std::move(refptr)), on_finish(new C_CallbackAdapter<T, MF>(obj)) { } protected: void finish(int r) override { on_finish->complete(r); } }; template <typename T, Context*(T::*MF)(int*), bool destroy> class C_StateCallbackAdapter : public Context { T *obj; public: C_StateCallbackAdapter(T *obj) : obj(obj){ } protected: void complete(int r) override { Context *on_finish = (obj->*MF)(&r); if (on_finish != nullptr) { on_finish->complete(r); if (destroy) { delete obj; } } Context::complete(r); } void finish(int r) override { } }; template <typename T, Context*(T::*MF)(int*)> class C_RefStateCallbackAdapter : public Context { RefCountedPtr refptr; Context *on_finish; public: C_RefStateCallbackAdapter(T *obj, RefCountedPtr refptr) : refptr(std::move(refptr)), on_finish(new C_StateCallbackAdapter<T, MF, true>(obj)) { } protected: void finish(int r) override { on_finish->complete(r); } }; template <typename WQ> struct C_AsyncCallback : public Context { WQ *op_work_queue; Context *on_finish; C_AsyncCallback(WQ *op_work_queue, Context *on_finish) : op_work_queue(op_work_queue), on_finish(on_finish) { } ~C_AsyncCallback() override { delete on_finish; } void finish(int r) override { op_work_queue->queue(on_finish, r); on_finish = nullptr; } }; } // namespace detail std::string generate_image_id(librados::IoCtx &ioctx); template <typename T> inline std::string generate_image_id(librados::IoCtx &ioctx) { return generate_image_id(ioctx); } const std::string group_header_name(const std::string &group_id); const std::string id_obj_name(const std::string &name); const std::string header_name(const std::string &image_id); const std::string old_header_name(const std::string &image_name); std::string unique_lock_name(const std::string &name, void *address); template <typename I> std::string data_object_name(I* image_ctx, uint64_t object_no) { char buf[RBD_MAX_OBJ_NAME_SIZE]; size_t length = snprintf(buf, RBD_MAX_OBJ_NAME_SIZE, image_ctx->format_string, object_no); ceph_assert(length < RBD_MAX_OBJ_NAME_SIZE); std::string oid; oid.reserve(RBD_MAX_OBJ_NAME_SIZE); oid.append(buf, length); return oid; } librados::AioCompletion *create_rados_callback(Context *on_finish); template <typename T> librados::AioCompletion *create_rados_callback(T *obj) { return librados::Rados::aio_create_completion( obj, &detail::rados_callback<T>); } template <typename T, void(T::*MF)(int)> librados::AioCompletion *create_rados_callback(T *obj) { return librados::Rados::aio_create_completion( obj, &detail::rados_callback<T, MF>); } template <typename T, Context*(T::*MF)(int*), bool destroy=true> librados::AioCompletion *create_rados_callback(T *obj) { return librados::Rados::aio_create_completion( obj, &detail::rados_state_callback<T, MF, destroy>); } template <typename T, void(T::*MF)(int) = &T::complete> Context *create_context_callback(T *obj) { return new detail::C_CallbackAdapter<T, MF>(obj); } template <typename T, Context*(T::*MF)(int*), bool destroy=true> Context *create_context_callback(T *obj) { return new detail::C_StateCallbackAdapter<T, MF, destroy>(obj); } //for reference counting objects template <typename T, void(T::*MF)(int) = &T::complete> Context *create_context_callback(T *obj, RefCountedPtr refptr) { return new detail::C_RefCallbackAdapter<T, MF>(obj, refptr); } template <typename T, Context*(T::*MF)(int*)> Context *create_context_callback(T *obj, RefCountedPtr refptr) { return new detail::C_RefStateCallbackAdapter<T, MF>(obj, refptr); } //for objects that don't inherit from RefCountedObj, to handle unit tests template <typename T, void(T::*MF)(int) = &T::complete, typename R> typename std::enable_if<not std::is_base_of<RefCountedPtr, R>::value, Context*>::type create_context_callback(T *obj, R *refptr) { return new detail::C_CallbackAdapter<T, MF>(obj); } template <typename T, Context*(T::*MF)(int*), typename R, bool destroy=true> typename std::enable_if<not std::is_base_of<RefCountedPtr, R>::value, Context*>::type create_context_callback(T *obj, R *refptr) { return new detail::C_StateCallbackAdapter<T, MF, destroy>(obj); } template <typename I> Context *create_async_context_callback(I &image_ctx, Context *on_finish) { // use async callback to acquire a clean lock context return new detail::C_AsyncCallback< typename std::decay<decltype(*image_ctx.op_work_queue)>::type>( image_ctx.op_work_queue, on_finish); } template <typename WQ> Context *create_async_context_callback(WQ *work_queue, Context *on_finish) { // use async callback to acquire a clean lock context return new detail::C_AsyncCallback<WQ>(work_queue, on_finish); } // TODO: temporary until AioCompletion supports templated ImageCtx inline ImageCtx *get_image_ctx(ImageCtx *image_ctx) { return image_ctx; } uint64_t get_rbd_default_features(CephContext* cct); bool calc_sparse_extent(const bufferptr &bp, size_t sparse_size, uint64_t length, size_t *write_offset, size_t *write_length, size_t *offset); template <typename I> inline ZTracer::Trace create_trace(const I &image_ctx, const char *trace_name, const ZTracer::Trace &parent_trace) { if (parent_trace.valid()) { return ZTracer::Trace(trace_name, &image_ctx.trace_endpoint, &parent_trace); } return ZTracer::Trace(); } bool is_metadata_config_override(const std::string& metadata_key, std::string* config_key); int create_ioctx(librados::IoCtx& src_io_ctx, const std::string& pool_desc, int64_t pool_id, const std::optional<std::string>& pool_namespace, librados::IoCtx* dst_io_ctx); int snap_create_flags_api_to_internal(CephContext *cct, uint32_t api_flags, uint64_t *internal_flags); uint32_t get_default_snap_create_flags(ImageCtx *ictx); SnapContext get_snap_context( const std::optional< std::pair<std::uint64_t, std::vector<std::uint64_t>>>& write_snap_context); uint64_t reserve_async_request_id(); bool is_config_key_uri(const std::string& uri); int get_config_key(librados::Rados& rados, const std::string& uri, std::string* value); } // namespace util } // namespace librbd #endif // CEPH_LIBRBD_UTILS_H
8,251
27.752613
85
h
null
ceph-main/src/librbd/WatchNotifyTypes.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef LIBRBD_WATCH_NOTIFY_TYPES_H #define LIBRBD_WATCH_NOTIFY_TYPES_H #include "cls/rbd/cls_rbd_types.h" #include "include/int_types.h" #include "include/buffer_fwd.h" #include "include/encoding.h" #include "librbd/watcher/Types.h" #include <iosfwd> #include <list> #include <memory> #include <string> #include <boost/variant.hpp> namespace ceph { class Formatter; } namespace librbd { namespace watch_notify { using librbd::watcher::ClientId; WRITE_CLASS_ENCODER(ClientId); struct AsyncRequestId { ClientId client_id; uint64_t request_id; AsyncRequestId() : request_id() {} AsyncRequestId(const ClientId &client_id_, uint64_t request_id_) : client_id(client_id_), request_id(request_id_) {} void encode(bufferlist& bl) const; void decode(bufferlist::const_iterator& it); void dump(Formatter *f) const; inline bool operator<(const AsyncRequestId &rhs) const { if (client_id != rhs.client_id) { return client_id < rhs.client_id; } else { return request_id < rhs.request_id; } } inline bool operator!=(const AsyncRequestId &rhs) const { return (client_id != rhs.client_id || request_id != rhs.request_id); } inline operator bool() const { return (*this != AsyncRequestId()); } }; enum NotifyOp { NOTIFY_OP_ACQUIRED_LOCK = 0, NOTIFY_OP_RELEASED_LOCK = 1, NOTIFY_OP_REQUEST_LOCK = 2, NOTIFY_OP_HEADER_UPDATE = 3, NOTIFY_OP_ASYNC_PROGRESS = 4, NOTIFY_OP_ASYNC_COMPLETE = 5, NOTIFY_OP_FLATTEN = 6, NOTIFY_OP_RESIZE = 7, NOTIFY_OP_SNAP_CREATE = 8, NOTIFY_OP_SNAP_REMOVE = 9, NOTIFY_OP_REBUILD_OBJECT_MAP = 10, NOTIFY_OP_SNAP_RENAME = 11, NOTIFY_OP_SNAP_PROTECT = 12, NOTIFY_OP_SNAP_UNPROTECT = 13, NOTIFY_OP_RENAME = 14, NOTIFY_OP_UPDATE_FEATURES = 15, NOTIFY_OP_MIGRATE = 16, NOTIFY_OP_SPARSIFY = 17, NOTIFY_OP_QUIESCE = 18, NOTIFY_OP_UNQUIESCE = 19, NOTIFY_OP_METADATA_UPDATE = 20, }; struct Payload { virtual ~Payload() {} virtual NotifyOp get_notify_op() const = 0; virtual bool check_for_refresh() const = 0; virtual void encode(bufferlist &bl) const = 0; virtual void decode(__u8 version, bufferlist::const_iterator &iter) = 0; virtual void dump(Formatter *f) const = 0; }; struct AcquiredLockPayload : public Payload { ClientId client_id; AcquiredLockPayload() {} AcquiredLockPayload(const ClientId &client_id) : client_id(client_id) {} NotifyOp get_notify_op() const override { return NOTIFY_OP_ACQUIRED_LOCK; } bool check_for_refresh() const override { return false; } void encode(bufferlist &bl) const override; void decode(__u8 version, bufferlist::const_iterator &iter) override; void dump(Formatter *f) const override; }; struct ReleasedLockPayload : public Payload { ClientId client_id; ReleasedLockPayload() {} ReleasedLockPayload(const ClientId &client_id) : client_id(client_id) {} NotifyOp get_notify_op() const override { return NOTIFY_OP_RELEASED_LOCK; } bool check_for_refresh() const override { return false; } void encode(bufferlist &bl) const override; void decode(__u8 version, bufferlist::const_iterator &iter) override; void dump(Formatter *f) const override; }; struct RequestLockPayload : public Payload { ClientId client_id; bool force = false; RequestLockPayload() {} RequestLockPayload(const ClientId &client_id, bool force) : client_id(client_id), force(force) { } NotifyOp get_notify_op() const override { return NOTIFY_OP_REQUEST_LOCK; } bool check_for_refresh() const override { return false; } void encode(bufferlist &bl) const override; void decode(__u8 version, bufferlist::const_iterator &iter) override; void dump(Formatter *f) const override; }; struct HeaderUpdatePayload : public Payload { NotifyOp get_notify_op() const override { return NOTIFY_OP_HEADER_UPDATE; } bool check_for_refresh() const override { return false; } void encode(bufferlist &bl) const override; void decode(__u8 version, bufferlist::const_iterator &iter) override; void dump(Formatter *f) const override; }; struct AsyncRequestPayloadBase : public Payload { public: AsyncRequestId async_request_id; void encode(bufferlist &bl) const override; void decode(__u8 version, bufferlist::const_iterator &iter) override; void dump(Formatter *f) const override; protected: AsyncRequestPayloadBase() {} AsyncRequestPayloadBase(const AsyncRequestId &id) : async_request_id(id) {} }; struct AsyncProgressPayload : public AsyncRequestPayloadBase { uint64_t offset = 0; uint64_t total = 0; AsyncProgressPayload() {} AsyncProgressPayload(const AsyncRequestId &id, uint64_t offset, uint64_t total) : AsyncRequestPayloadBase(id), offset(offset), total(total) {} NotifyOp get_notify_op() const override { return NOTIFY_OP_ASYNC_PROGRESS; } bool check_for_refresh() const override { return false; } void encode(bufferlist &bl) const override; void decode(__u8 version, bufferlist::const_iterator &iter) override; void dump(Formatter *f) const override; }; struct AsyncCompletePayload : public AsyncRequestPayloadBase { int result = 0; AsyncCompletePayload() {} AsyncCompletePayload(const AsyncRequestId &id, int r) : AsyncRequestPayloadBase(id), result(r) {} NotifyOp get_notify_op() const override { return NOTIFY_OP_ASYNC_COMPLETE; } bool check_for_refresh() const override { return false; } void encode(bufferlist &bl) const override; void decode(__u8 version, bufferlist::const_iterator &iter) override; void dump(Formatter *f) const override; }; struct FlattenPayload : public AsyncRequestPayloadBase { FlattenPayload() {} FlattenPayload(const AsyncRequestId &id) : AsyncRequestPayloadBase(id) {} NotifyOp get_notify_op() const override { return NOTIFY_OP_FLATTEN; } bool check_for_refresh() const override { return true; } }; struct ResizePayload : public AsyncRequestPayloadBase { uint64_t size = 0; bool allow_shrink = true; ResizePayload() {} ResizePayload(const AsyncRequestId &id, uint64_t size, bool allow_shrink) : AsyncRequestPayloadBase(id), size(size), allow_shrink(allow_shrink) {} NotifyOp get_notify_op() const override { return NOTIFY_OP_RESIZE; } bool check_for_refresh() const override { return true; } void encode(bufferlist &bl) const override; void decode(__u8 version, bufferlist::const_iterator &iter) override; void dump(Formatter *f) const override; }; struct SnapPayloadBase : public AsyncRequestPayloadBase { public: cls::rbd::SnapshotNamespace snap_namespace; std::string snap_name; bool check_for_refresh() const override { return true; } void encode(bufferlist &bl) const override; void decode(__u8 version, bufferlist::const_iterator &iter) override; void dump(Formatter *f) const override; protected: SnapPayloadBase() {} SnapPayloadBase(const AsyncRequestId &id, const cls::rbd::SnapshotNamespace& snap_namespace, const std::string &name) : AsyncRequestPayloadBase(id), snap_namespace(snap_namespace), snap_name(name) { } }; struct SnapCreatePayload : public SnapPayloadBase { uint64_t flags = 0; SnapCreatePayload() {} SnapCreatePayload(const AsyncRequestId &id, const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &name, uint64_t flags) : SnapPayloadBase(id, snap_namespace, name), flags(flags) { } NotifyOp get_notify_op() const override { return NOTIFY_OP_SNAP_CREATE; } void encode(bufferlist &bl) const override; void decode(__u8 version, bufferlist::const_iterator &iter) override; void dump(Formatter *f) const override; }; struct SnapRenamePayload : public SnapPayloadBase { uint64_t snap_id = 0; SnapRenamePayload() {} SnapRenamePayload(const AsyncRequestId &id, const uint64_t &src_snap_id, const std::string &dst_name) : SnapPayloadBase(id, cls::rbd::UserSnapshotNamespace(), dst_name), snap_id(src_snap_id) { } NotifyOp get_notify_op() const override { return NOTIFY_OP_SNAP_RENAME; } void encode(bufferlist &bl) const override; void decode(__u8 version, bufferlist::const_iterator &iter) override; void dump(Formatter *f) const override; }; struct SnapRemovePayload : public SnapPayloadBase { SnapRemovePayload() {} SnapRemovePayload(const AsyncRequestId &id, const cls::rbd::SnapshotNamespace& snap_namespace, const std::string &name) : SnapPayloadBase(id, snap_namespace, name) { } NotifyOp get_notify_op() const override { return NOTIFY_OP_SNAP_REMOVE; } }; struct SnapProtectPayload : public SnapPayloadBase { SnapProtectPayload() {} SnapProtectPayload(const AsyncRequestId &id, const cls::rbd::SnapshotNamespace& snap_namespace, const std::string &name) : SnapPayloadBase(id, snap_namespace, name) { } NotifyOp get_notify_op() const override { return NOTIFY_OP_SNAP_PROTECT; } }; struct SnapUnprotectPayload : public SnapPayloadBase { SnapUnprotectPayload() {} SnapUnprotectPayload(const AsyncRequestId &id, const cls::rbd::SnapshotNamespace& snap_namespace, const std::string &name) : SnapPayloadBase(id, snap_namespace, name) { } NotifyOp get_notify_op() const override { return NOTIFY_OP_SNAP_UNPROTECT; } }; struct RebuildObjectMapPayload : public AsyncRequestPayloadBase { RebuildObjectMapPayload() {} RebuildObjectMapPayload(const AsyncRequestId &id) : AsyncRequestPayloadBase(id) {} NotifyOp get_notify_op() const override { return NOTIFY_OP_REBUILD_OBJECT_MAP; } bool check_for_refresh() const override { return true; } }; struct RenamePayload : public AsyncRequestPayloadBase { std::string image_name; RenamePayload() {} RenamePayload(const AsyncRequestId &id, const std::string _image_name) : AsyncRequestPayloadBase(id), image_name(_image_name) { } NotifyOp get_notify_op() const override { return NOTIFY_OP_RENAME; } bool check_for_refresh() const override { return true; } void encode(bufferlist &bl) const; void decode(__u8 version, bufferlist::const_iterator &iter); void dump(Formatter *f) const; }; struct UpdateFeaturesPayload : public AsyncRequestPayloadBase { uint64_t features = 0; bool enabled = false; UpdateFeaturesPayload() {} UpdateFeaturesPayload(const AsyncRequestId &id, uint64_t features, bool enabled) : AsyncRequestPayloadBase(id), features(features), enabled(enabled) { } NotifyOp get_notify_op() const override { return NOTIFY_OP_UPDATE_FEATURES; } bool check_for_refresh() const override { return true; } void encode(bufferlist &bl) const override; void decode(__u8 version, bufferlist::const_iterator &iter) override; void dump(Formatter *f) const override; }; struct MigratePayload : public AsyncRequestPayloadBase { MigratePayload() {} MigratePayload(const AsyncRequestId &id) : AsyncRequestPayloadBase(id) {} NotifyOp get_notify_op() const override { return NOTIFY_OP_MIGRATE; } bool check_for_refresh() const override { return true; } }; struct SparsifyPayload : public AsyncRequestPayloadBase { uint64_t sparse_size = 0; SparsifyPayload() {} SparsifyPayload(const AsyncRequestId &id, uint64_t sparse_size) : AsyncRequestPayloadBase(id), sparse_size(sparse_size) { } NotifyOp get_notify_op() const override { return NOTIFY_OP_SPARSIFY; } bool check_for_refresh() const override { return true; } void encode(bufferlist &bl) const override; void decode(__u8 version, bufferlist::const_iterator &iter) override; void dump(Formatter *f) const override; }; struct QuiescePayload : public AsyncRequestPayloadBase { QuiescePayload() {} QuiescePayload(const AsyncRequestId &id) : AsyncRequestPayloadBase(id) {} NotifyOp get_notify_op() const override { return NOTIFY_OP_QUIESCE; } bool check_for_refresh() const override { return false; } }; struct UnquiescePayload : public AsyncRequestPayloadBase { UnquiescePayload() {} UnquiescePayload(const AsyncRequestId &id) : AsyncRequestPayloadBase(id) {} NotifyOp get_notify_op() const override { return NOTIFY_OP_UNQUIESCE; } bool check_for_refresh() const override { return false; } }; struct MetadataUpdatePayload : public AsyncRequestPayloadBase { std::string key; std::optional<std::string> value; MetadataUpdatePayload() {} MetadataUpdatePayload(const AsyncRequestId &id, std::string key, std::optional<std::string> value) : AsyncRequestPayloadBase(id), key(key), value(value) { } NotifyOp get_notify_op() const override { return NOTIFY_OP_METADATA_UPDATE; } bool check_for_refresh() const override { return false; } void encode(bufferlist &bl) const; void decode(__u8 version, bufferlist::const_iterator &iter); void dump(Formatter *f) const; }; struct UnknownPayload : public Payload { NotifyOp get_notify_op() const override { return static_cast<NotifyOp>(-1); } bool check_for_refresh() const override { return false; } void encode(bufferlist &bl) const override; void decode(__u8 version, bufferlist::const_iterator &iter) override; void dump(Formatter *f) const override; }; struct NotifyMessage { NotifyMessage() : payload(new UnknownPayload()) {} NotifyMessage(Payload *payload) : payload(payload) {} std::unique_ptr<Payload> payload; bool check_for_refresh() const; void encode(bufferlist& bl) const; void decode(bufferlist::const_iterator& it); void dump(Formatter *f) const; NotifyOp get_notify_op() const; static void generate_test_instances(std::list<NotifyMessage *> &o); }; struct ResponseMessage { ResponseMessage() : result(0) {} ResponseMessage(int result_) : result(result_) {} int result; void encode(bufferlist& bl) const; void decode(bufferlist::const_iterator& it); void dump(Formatter *f) const; static void generate_test_instances(std::list<ResponseMessage *> &o); }; std::ostream &operator<<(std::ostream &out, const NotifyOp &op); std::ostream &operator<<(std::ostream &out, const AsyncRequestId &request); WRITE_CLASS_ENCODER(AsyncRequestId); WRITE_CLASS_ENCODER(NotifyMessage); WRITE_CLASS_ENCODER(ResponseMessage); } // namespace watch_notify } // namespace librbd #endif // LIBRBD_WATCH_NOTIFY_TYPES_H
14,766
26.705441
81
h
null
ceph-main/src/librbd/Watcher.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_WATCHER_H #define CEPH_LIBRBD_WATCHER_H #include "common/AsyncOpTracker.h" #include "common/ceph_mutex.h" #include "common/RWLock.h" #include "include/rados/librados.hpp" #include "librbd/watcher/Notifier.h" #include "librbd/watcher/Types.h" #include <string> #include <utility> namespace librbd { namespace asio { struct ContextWQ; } namespace watcher { struct NotifyResponse; } class Watcher { public: struct C_NotifyAck : public Context { Watcher *watcher; CephContext *cct; uint64_t notify_id; uint64_t handle; bufferlist out; C_NotifyAck(Watcher *watcher, uint64_t notify_id, uint64_t handle); void finish(int r) override; }; Watcher(librados::IoCtx& ioctx, asio::ContextWQ *work_queue, const std::string& oid); virtual ~Watcher(); void register_watch(Context *on_finish); virtual void unregister_watch(Context *on_finish); void flush(Context *on_finish); bool notifications_blocked() const; virtual void block_notifies(Context *on_finish); void unblock_notifies(); std::string get_oid() const; void set_oid(const std::string& oid); uint64_t get_watch_handle() const { std::shared_lock watch_locker{m_watch_lock}; return m_watch_handle; } bool is_registered() const { std::shared_lock locker{m_watch_lock}; return is_registered(m_watch_lock); } bool is_unregistered() const { std::shared_lock locker{m_watch_lock}; return is_unregistered(m_watch_lock); } bool is_blocklisted() const { std::shared_lock locker{m_watch_lock}; return m_watch_blocklisted; } protected: enum WatchState { WATCH_STATE_IDLE, WATCH_STATE_REGISTERING, WATCH_STATE_REWATCHING }; librados::IoCtx& m_ioctx; asio::ContextWQ *m_work_queue; std::string m_oid; CephContext *m_cct; mutable ceph::shared_mutex m_watch_lock; uint64_t m_watch_handle; watcher::Notifier m_notifier; WatchState m_watch_state; bool m_watch_blocklisted = false; AsyncOpTracker m_async_op_tracker; bool is_registered(const ceph::shared_mutex&) const { return (m_watch_state == WATCH_STATE_IDLE && m_watch_handle != 0); } bool is_unregistered(const ceph::shared_mutex&) const { return (m_watch_state == WATCH_STATE_IDLE && m_watch_handle == 0); } void send_notify(bufferlist &payload, watcher::NotifyResponse *response = nullptr, Context *on_finish = nullptr); virtual void handle_notify(uint64_t notify_id, uint64_t handle, uint64_t notifier_id, bufferlist &bl) = 0; virtual void handle_error(uint64_t cookie, int err); void acknowledge_notify(uint64_t notify_id, uint64_t handle, bufferlist &out); virtual void handle_rewatch_complete(int r) { } private: /** * @verbatim * * <start> * | * v * UNREGISTERED * | * | (register_watch) * | * REGISTERING * | * v (watch error) * REGISTERED * * * * * * * > ERROR * | ^ | * | | | (rewatch) * | | v * | | REWATCHING * | | | * | | | * | \---------------------/ * | * | (unregister_watch) * | * v * UNREGISTERED * | * v * <finish> * * @endverbatim */ struct WatchCtx : public librados::WatchCtx2 { Watcher &watcher; WatchCtx(Watcher &parent) : watcher(parent) {} void handle_notify(uint64_t notify_id, uint64_t handle, uint64_t notifier_id, bufferlist& bl) override; void handle_error(uint64_t handle, int err) override; }; struct C_RegisterWatch : public Context { Watcher *watcher; Context *on_finish; C_RegisterWatch(Watcher *watcher, Context *on_finish) : watcher(watcher), on_finish(on_finish) { } void finish(int r) override { watcher->handle_register_watch(r, on_finish); } }; WatchCtx m_watch_ctx; Context *m_unregister_watch_ctx = nullptr; bool m_watch_error = false; uint32_t m_blocked_count = 0; void handle_register_watch(int r, Context *on_finish); void rewatch(); void handle_rewatch(int r); void handle_rewatch_callback(int r); }; } // namespace librbd #endif // CEPH_LIBRBD_WATCHER_H
4,543
23.695652
71
h
null
ceph-main/src/librbd/internal.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_INTERNAL_H #define CEPH_LIBRBD_INTERNAL_H #include "include/int_types.h" #include <map> #include <set> #include <string> #include <vector> #include "include/buffer_fwd.h" #include "include/rbd/librbd.hpp" #include "include/rbd_types.h" #include "cls/rbd/cls_rbd_types.h" #include "common/ceph_time.h" #include "librbd/Types.h" namespace librbd { struct ImageCtx; namespace io { struct AioCompletion; enum class ImageArea; } class NoOpProgressContext : public ProgressContext { public: NoOpProgressContext() { } int update_progress(uint64_t offset, uint64_t src_size) override { return 0; } }; int detect_format(librados::IoCtx &io_ctx, const std::string &name, bool *old_format, uint64_t *size); bool has_parent(int64_t parent_pool_id, uint64_t off, uint64_t overlap); std::string image_option_name(int optname); void image_options_create(rbd_image_options_t* opts); void image_options_create_ref(rbd_image_options_t* opts, rbd_image_options_t orig); void image_options_copy(rbd_image_options_t *opts, const ImageOptions &orig); void image_options_destroy(rbd_image_options_t opts); int image_options_set(rbd_image_options_t opts, int optname, const std::string& optval); int image_options_set(rbd_image_options_t opts, int optname, uint64_t optval); int image_options_get(rbd_image_options_t opts, int optname, std::string* optval); int image_options_get(rbd_image_options_t opts, int optname, uint64_t* optval); int image_options_is_set(rbd_image_options_t opts, int optname, bool* is_set); int image_options_unset(rbd_image_options_t opts, int optname); void image_options_clear(rbd_image_options_t opts); bool image_options_is_empty(rbd_image_options_t opts); int create(librados::IoCtx& io_ctx, const char *imgname, uint64_t size, int *order); int create(librados::IoCtx& io_ctx, const char *imgname, uint64_t size, bool old_format, uint64_t features, int *order, uint64_t stripe_unit, uint64_t stripe_count); int create(IoCtx& io_ctx, const std::string &image_name, const std::string &image_id, uint64_t size, ImageOptions& opts, const std::string &non_primary_global_image_id, const std::string &primary_mirror_uuid, bool skip_mirror_enable); int clone(IoCtx& p_ioctx, const char *p_name, const char *p_snap_name, IoCtx& c_ioctx, const char *c_name, uint64_t features, int *c_order, uint64_t stripe_unit, int stripe_count); int clone(IoCtx& p_ioctx, const char *p_id, const char *p_name, const char *p_snap_name, IoCtx& c_ioctx, const char *c_id, const char *c_name, ImageOptions& c_opts, const std::string &non_primary_global_image_id, const std::string &primary_mirror_uuid); int rename(librados::IoCtx& io_ctx, const char *srcname, const char *dstname); int info(ImageCtx *ictx, image_info_t& info, size_t image_size); int get_old_format(ImageCtx *ictx, uint8_t *old); int get_size(ImageCtx *ictx, uint64_t *size); int get_features(ImageCtx *ictx, uint64_t *features); int get_overlap(ImageCtx *ictx, uint64_t *overlap); int get_flags(ImageCtx *ictx, uint64_t *flags); int set_image_notification(ImageCtx *ictx, int fd, int type); int is_exclusive_lock_owner(ImageCtx *ictx, bool *is_owner); int lock_acquire(ImageCtx *ictx, rbd_lock_mode_t lock_mode); int lock_release(ImageCtx *ictx); int lock_get_owners(ImageCtx *ictx, rbd_lock_mode_t *lock_mode, std::list<std::string> *lock_owners); int lock_break(ImageCtx *ictx, rbd_lock_mode_t lock_mode, const std::string &lock_owner); int copy(ImageCtx *ictx, IoCtx& dest_md_ctx, const char *destname, ImageOptions& opts, ProgressContext &prog_ctx, size_t sparse_size); int copy(ImageCtx *src, ImageCtx *dest, ProgressContext &prog_ctx, size_t sparse_size); /* cooperative locking */ int list_lockers(ImageCtx *ictx, std::list<locker_t> *locks, bool *exclusive, std::string *tag); int lock(ImageCtx *ictx, bool exclusive, const std::string& cookie, const std::string& tag); int lock_shared(ImageCtx *ictx, const std::string& cookie, const std::string& tag); int unlock(ImageCtx *ictx, const std::string& cookie); int break_lock(ImageCtx *ictx, const std::string& client, const std::string& cookie); int read_header_bl(librados::IoCtx& io_ctx, const std::string& md_oid, ceph::bufferlist& header, uint64_t *ver); int read_header(librados::IoCtx& io_ctx, const std::string& md_oid, struct rbd_obj_header_ondisk *header, uint64_t *ver); int tmap_set(librados::IoCtx& io_ctx, const std::string& imgname); int tmap_rm(librados::IoCtx& io_ctx, const std::string& imgname); void image_info(const ImageCtx *ictx, image_info_t& info, size_t info_size); uint64_t oid_to_object_no(const std::string& oid, const std::string& object_prefix); int clip_io(ImageCtx* ictx, uint64_t off, uint64_t* len, io::ImageArea area); void init_rbd_header(struct rbd_obj_header_ondisk& ondisk, uint64_t size, int order, uint64_t bid); int64_t read_iterate(ImageCtx *ictx, uint64_t off, uint64_t len, int (*cb)(uint64_t, size_t, const char *, void *), void *arg); int invalidate_cache(ImageCtx *ictx); int poll_io_events(ImageCtx *ictx, io::AioCompletion **comps, int numcomp); int metadata_list(ImageCtx *ictx, const std::string &last, uint64_t max, std::map<std::string, bufferlist> *pairs); int metadata_get(ImageCtx *ictx, const std::string &key, std::string *value); int list_watchers(ImageCtx *ictx, std::list<librbd::image_watcher_t> &watchers); } std::ostream &operator<<(std::ostream &os, const librbd::ImageOptions &opts); #endif
5,956
39.80137
89
h
null
ceph-main/src/librbd/api/Config.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_API_CONFIG_H #define CEPH_LIBRBD_API_CONFIG_H #include "common/config_fwd.h" #include "include/common_fwd.h" #include "include/rbd/librbd.hpp" #include "include/rados/librados_fwd.hpp" namespace librbd { class ImageCtx; namespace api { template <typename ImageCtxT = librbd::ImageCtx> class Config { public: static bool is_option_name(librados::IoCtx& io_ctx, const std::string &name); static int list(librados::IoCtx& io_ctx, std::vector<config_option_t> *options); static bool is_option_name(ImageCtxT *image_ctx, const std::string &name); static int list(ImageCtxT *image_ctx, std::vector<config_option_t> *options); static void apply_pool_overrides(librados::IoCtx& io_ctx, ConfigProxy* config); }; } // namespace api } // namespace librbd extern template class librbd::api::Config<librbd::ImageCtx>; #endif // CEPH_LIBRBD_API_CONFIG_H
1,032
26.184211
79
h
null
ceph-main/src/librbd/api/DiffIterate.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_API_DIFF_ITERATE_H #define CEPH_LIBRBD_API_DIFF_ITERATE_H #include "include/int_types.h" #include "common/bit_vector.hpp" #include "cls/rbd/cls_rbd_types.h" namespace librbd { class ImageCtx; namespace api { template <typename ImageCtxT = librbd::ImageCtx> class DiffIterate { public: typedef int (*Callback)(uint64_t, size_t, int, void *); static int diff_iterate(ImageCtxT *ictx, const cls::rbd::SnapshotNamespace& from_snap_namespace, const char *fromsnapname, uint64_t off, uint64_t len, bool include_parent, bool whole_object, int (*cb)(uint64_t, size_t, int, void *), void *arg); private: ImageCtxT &m_image_ctx; cls::rbd::SnapshotNamespace m_from_snap_namespace; const char* m_from_snap_name; uint64_t m_offset; uint64_t m_length; bool m_include_parent; bool m_whole_object; Callback m_callback; void *m_callback_arg; DiffIterate(ImageCtxT &image_ctx, const cls::rbd::SnapshotNamespace& from_snap_namespace, const char *from_snap_name, uint64_t off, uint64_t len, bool include_parent, bool whole_object, Callback callback, void *callback_arg) : m_image_ctx(image_ctx), m_from_snap_namespace(from_snap_namespace), m_from_snap_name(from_snap_name), m_offset(off), m_length(len), m_include_parent(include_parent), m_whole_object(whole_object), m_callback(callback), m_callback_arg(callback_arg) { } int execute(); int diff_object_map(uint64_t from_snap_id, uint64_t to_snap_id, BitVector<2>* object_diff_state); }; } // namespace api } // namespace librbd extern template class librbd::api::DiffIterate<librbd::ImageCtx>; #endif // CEPH_LIBRBD_API_DIFF_ITERATE_H
1,897
27.328358
74
h
null
ceph-main/src/librbd/api/Group.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_API_GROUP_H #define CEPH_LIBRBD_API_GROUP_H #include "include/rbd/librbd.hpp" #include "include/rados/librados_fwd.hpp" #include <string> #include <vector> namespace librbd { struct ImageCtx; namespace api { template <typename ImageCtxT = librbd::ImageCtx> struct Group { static int create(librados::IoCtx& io_ctx, const char *group_name); static int remove(librados::IoCtx& io_ctx, const char *group_name); static int list(librados::IoCtx& io_ctx, std::vector<std::string> *names); static int rename(librados::IoCtx& io_ctx, const char *src_group_name, const char *dest_group_name); static int image_add(librados::IoCtx& group_ioctx, const char *group_name, librados::IoCtx& image_ioctx, const char *image_name); static int image_remove(librados::IoCtx& group_ioctx, const char *group_name, librados::IoCtx& image_ioctx, const char *image_name); static int image_remove_by_id(librados::IoCtx& group_ioctx, const char *group_name, librados::IoCtx& image_ioctx, const char *image_id); static int image_list(librados::IoCtx& group_ioctx, const char *group_name, std::vector<group_image_info_t> *images); static int image_get_group(ImageCtxT *ictx, group_info_t *group_info); static int snap_create(librados::IoCtx& group_ioctx, const char *group_name, const char *snap_name, uint32_t flags); static int snap_remove(librados::IoCtx& group_ioctx, const char *group_name, const char *snap_name); static int snap_rename(librados::IoCtx& group_ioctx, const char *group_name, const char *old_snap_name, const char *new_snap_name); static int snap_list(librados::IoCtx& group_ioctx, const char *group_name, std::vector<group_snap_info_t> *snaps); static int snap_rollback(librados::IoCtx& group_ioctx, const char *group_name, const char *snap_name, ProgressContext& pctx); }; } // namespace api } // namespace librbd extern template class librbd::api::Group<librbd::ImageCtx>; #endif // CEPH_LIBRBD_API_GROUP_H
2,386
38.131148
79
h
null
ceph-main/src/librbd/api/Image.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef LIBRBD_API_IMAGE_H #define LIBRBD_API_IMAGE_H #include "include/rbd/librbd.hpp" #include "include/rados/librados_fwd.hpp" #include "librbd/Types.h" #include <map> #include <set> #include <string> namespace librbd { class ImageOptions; class ProgressContext; struct ImageCtx; namespace api { template <typename ImageCtxT = librbd::ImageCtx> struct Image { typedef std::map<std::string, std::string> ImageNameToIds; static int64_t get_data_pool_id(ImageCtxT *ictx); static int get_op_features(ImageCtxT *ictx, uint64_t *op_features); static int list_images(librados::IoCtx& io_ctx, std::vector<image_spec_t> *images); static int list_images_v2(librados::IoCtx& io_ctx, ImageNameToIds *images); static int get_parent(ImageCtxT *ictx, librbd::linked_image_spec_t *parent_image, librbd::snap_spec_t *parent_snap); static int list_children(ImageCtxT *ictx, std::vector<librbd::linked_image_spec_t> *images); static int list_children(ImageCtxT *ictx, const cls::rbd::ParentImageSpec &parent_spec, std::vector<librbd::linked_image_spec_t> *images); static int list_descendants(IoCtx& io_ctx, const std::string &image_id, const std::optional<size_t> &max_level, std::vector<librbd::linked_image_spec_t> *images); static int list_descendants(ImageCtxT *ictx, const std::optional<size_t> &max_level, std::vector<librbd::linked_image_spec_t> *images); static int list_descendants(ImageCtxT *ictx, const cls::rbd::ParentImageSpec &parent_spec, const std::optional<size_t> &max_level, std::vector<librbd::linked_image_spec_t> *images); static int deep_copy(ImageCtxT *ictx, librados::IoCtx& dest_md_ctx, const char *destname, ImageOptions& opts, ProgressContext &prog_ctx); static int deep_copy(ImageCtxT *src, ImageCtxT *dest, bool flatten, ProgressContext &prog_ctx); static int snap_set(ImageCtxT *ictx, const cls::rbd::SnapshotNamespace &snap_namespace, const char *snap_name); static int snap_set(ImageCtxT *ictx, uint64_t snap_id); static int remove(librados::IoCtx& io_ctx, const std::string &image_name, ProgressContext& prog_ctx); static int flatten_children(ImageCtxT *ictx, const char* snap_name, ProgressContext& pctx); static int encryption_format(ImageCtxT *ictx, encryption_format_t format, encryption_options_t opts, size_t opts_size, bool c_api); static int encryption_load(ImageCtxT *ictx, const encryption_spec_t *specs, size_t spec_count, bool c_api); }; } // namespace api } // namespace librbd extern template class librbd::api::Image<librbd::ImageCtx>; #endif // LIBRBD_API_IMAGE_H
3,265
36.976744
93
h
null
ceph-main/src/librbd/api/Io.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef LIBRBD_API_IO_H #define LIBRBD_API_IO_H #include "include/int_types.h" #include "librbd/io/ReadResult.h" namespace librbd { struct ImageCtx; namespace io { struct AioCompletion; } namespace api { template<typename ImageCtxT = ImageCtx> struct Io { static ssize_t read(ImageCtxT &image_ctx, uint64_t off, uint64_t len, io::ReadResult &&read_result, int op_flags); static ssize_t write(ImageCtxT &image_ctx, uint64_t off, uint64_t len, bufferlist &&bl, int op_flags); static ssize_t discard(ImageCtxT &image_ctx, uint64_t off, uint64_t len, uint32_t discard_granularity_bytes); static ssize_t write_same(ImageCtxT &image_ctx, uint64_t off, uint64_t len, bufferlist &&bl, int op_flags); static ssize_t write_zeroes(ImageCtxT &image_ctx, uint64_t off, uint64_t len, int zero_flags, int op_flags); static ssize_t compare_and_write(ImageCtxT &image_ctx, uint64_t off, uint64_t len, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_off, int op_flags); static int flush(ImageCtxT &image_ctx); static void aio_read(ImageCtxT &image_ctx, io::AioCompletion *c, uint64_t off, uint64_t len, io::ReadResult &&read_result, int op_flags, bool native_async); static void aio_write(ImageCtxT &image_ctx, io::AioCompletion *c, uint64_t off, uint64_t len, bufferlist &&bl, int op_flags, bool native_async); static void aio_discard(ImageCtxT &image_ctx, io::AioCompletion *c, uint64_t off, uint64_t len, uint32_t discard_granularity_bytes, bool native_async); static void aio_write_same(ImageCtxT &image_ctx, io::AioCompletion *c, uint64_t off, uint64_t len, bufferlist &&bl, int op_flags, bool native_async); static void aio_write_zeroes(ImageCtxT &image_ctx, io::AioCompletion *c, uint64_t off, uint64_t len, int zero_flags, int op_flags, bool native_async); static void aio_compare_and_write(ImageCtxT &image_ctx, io::AioCompletion *c, uint64_t off, uint64_t len, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_off, int op_flags, bool native_async); static void aio_flush(ImageCtxT &image_ctx, io::AioCompletion *c, bool native_async); }; } // namespace api } // namespace librbd extern template class librbd::api::Io<librbd::ImageCtx>; #endif // LIBRBD_API_IO_H
3,000
44.469697
80
h
null
ceph-main/src/librbd/api/Migration.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_API_MIGRATION_H #define CEPH_LIBRBD_API_MIGRATION_H #include "include/int_types.h" #include "include/rados/librados_fwd.hpp" #include "include/rbd/librbd.hpp" #include "cls/rbd/cls_rbd_types.h" #include <vector> namespace librbd { class ImageCtx; namespace api { template <typename ImageCtxT = librbd::ImageCtx> class Migration { public: static int prepare(librados::IoCtx& io_ctx, const std::string &image_name, librados::IoCtx& dest_io_ctx, const std::string &dest_image_name, ImageOptions& opts); static int prepare_import(const std::string& source_spec, librados::IoCtx& dest_io_ctx, const std::string &dest_image_name, ImageOptions& opts); static int execute(librados::IoCtx& io_ctx, const std::string &image_name, ProgressContext &prog_ctx); static int abort(librados::IoCtx& io_ctx, const std::string &image_name, ProgressContext &prog_ctx); static int commit(librados::IoCtx& io_ctx, const std::string &image_name, ProgressContext &prog_ctx); static int status(librados::IoCtx& io_ctx, const std::string &image_name, image_migration_status_t *status); static int get_source_spec(ImageCtxT* image_ctx, std::string* source_spec); private: CephContext* m_cct; ImageCtx* m_src_image_ctx; ImageCtx* m_dst_image_ctx; librados::IoCtx m_dst_io_ctx; std::string m_dst_image_name; std::string m_dst_image_id; std::string m_dst_header_oid; ImageOptions &m_image_options; bool m_flatten; bool m_mirroring; cls::rbd::MirrorImageMode m_mirror_image_mode; ProgressContext *m_prog_ctx; cls::rbd::MigrationSpec m_src_migration_spec; cls::rbd::MigrationSpec m_dst_migration_spec; Migration(ImageCtx* src_image_ctx, ImageCtx* dst_image_ctx, const cls::rbd::MigrationSpec& dst_migration_spec, ImageOptions& opts, ProgressContext *prog_ctx); int prepare(); int prepare_import(); int execute(); int abort(); int commit(); int status(image_migration_status_t *status); int set_state(ImageCtxT* image_ctx, const std::string& image_description, cls::rbd::MigrationState state, const std::string &description); int set_state(cls::rbd::MigrationState state, const std::string &description); int list_src_snaps(ImageCtxT* image_ctx, std::vector<librbd::snap_info_t> *snaps); int validate_src_snaps(ImageCtxT* image_ctx); int disable_mirroring(ImageCtxT* image_ctx, bool *was_enabled, cls::rbd::MirrorImageMode *mirror_image_mode); int enable_mirroring(ImageCtxT* image_ctx, bool was_enabled, cls::rbd::MirrorImageMode mirror_image_mode); int set_src_migration(ImageCtxT* image_ctx); int unlink_src_image(ImageCtxT* image_ctx); int relink_src_image(ImageCtxT* image_ctx); int create_dst_image(ImageCtxT** image_ctx); int remove_group(ImageCtxT* image_ctx, group_info_t *group_info); int add_group(ImageCtxT* image_ctx, group_info_t &group_info); int update_group(ImageCtxT *from_image_ctx, ImageCtxT *to_image_ctx); int remove_migration(ImageCtxT* image_ctx); int relink_children(ImageCtxT *from_image_ctx, ImageCtxT *to_image_ctx); int remove_src_image(ImageCtxT** image_ctx); int v1_set_src_migration(ImageCtxT* image_ctx); int v2_set_src_migration(ImageCtxT* image_ctx); int v1_unlink_src_image(ImageCtxT* image_ctx); int v2_unlink_src_image(ImageCtxT* image_ctx); int v1_relink_src_image(ImageCtxT* image_ctx); int v2_relink_src_image(ImageCtxT* image_ctx); int relink_child(ImageCtxT *from_image_ctx, ImageCtxT *to_image_ctx, const librbd::snap_info_t &src_snap, const librbd::linked_image_spec_t &child_image, bool migration_abort, bool reattach_child); int revert_data(ImageCtxT* src_image_ctx, ImageCtxT* dst_image_ctx, ProgressContext *prog_ctx); }; } // namespace api } // namespace librbd extern template class librbd::api::Migration<librbd::ImageCtx>; #endif // CEPH_LIBRBD_API_MIGRATION_H
4,328
36.973684
80
h
null
ceph-main/src/librbd/api/Mirror.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef LIBRBD_API_MIRROR_H #define LIBRBD_API_MIRROR_H #include "include/rbd/librbd.hpp" #include <map> #include <string> #include <vector> struct Context; namespace librbd { struct ImageCtx; namespace asio { struct ContextWQ; } namespace api { template <typename ImageCtxT = librbd::ImageCtx> struct Mirror { typedef std::map<std::string, std::string> Attributes; typedef std::map<std::string, mirror_image_global_status_t> IdToMirrorImageGlobalStatus; typedef std::map<mirror_image_status_state_t, int> MirrorImageStatusStates; static int site_name_get(librados::Rados& rados, std::string* name); static int site_name_set(librados::Rados& rados, const std::string& name); static int mode_get(librados::IoCtx& io_ctx, rbd_mirror_mode_t *mirror_mode); static int mode_set(librados::IoCtx& io_ctx, rbd_mirror_mode_t mirror_mode); static int uuid_get(librados::IoCtx& io_ctx, std::string* mirror_uuid); static void uuid_get(librados::IoCtx& io_ctx, std::string* mirror_uuid, Context* on_finish); static int peer_bootstrap_create(librados::IoCtx& io_ctx, std::string* token); static int peer_bootstrap_import(librados::IoCtx& io_ctx, rbd_mirror_peer_direction_t direction, const std::string& token); static int peer_site_add(librados::IoCtx& io_ctx, std::string *uuid, mirror_peer_direction_t direction, const std::string &site_name, const std::string &client_name); static int peer_site_remove(librados::IoCtx& io_ctx, const std::string &uuid); static int peer_site_list(librados::IoCtx& io_ctx, std::vector<mirror_peer_site_t> *peers); static int peer_site_set_client(librados::IoCtx& io_ctx, const std::string &uuid, const std::string &client_name); static int peer_site_set_name(librados::IoCtx& io_ctx, const std::string &uuid, const std::string &site_name); static int peer_site_set_direction(librados::IoCtx& io_ctx, const std::string &uuid, mirror_peer_direction_t direction); static int peer_site_get_attributes(librados::IoCtx& io_ctx, const std::string &uuid, Attributes* attributes); static int peer_site_set_attributes(librados::IoCtx& io_ctx, const std::string &uuid, const Attributes& attributes); static int image_global_status_list(librados::IoCtx& io_ctx, const std::string &start_id, size_t max, IdToMirrorImageGlobalStatus *images); static int image_status_summary(librados::IoCtx& io_ctx, MirrorImageStatusStates *states); static int image_instance_id_list(librados::IoCtx& io_ctx, const std::string &start_image_id, size_t max, std::map<std::string, std::string> *ids); static int image_info_list( librados::IoCtx& io_ctx, mirror_image_mode_t *mode_filter, const std::string &start_id, size_t max, std::map<std::string, std::pair<mirror_image_mode_t, mirror_image_info_t>> *entries); static int image_enable(ImageCtxT *ictx, mirror_image_mode_t mode, bool relax_same_pool_parent_check); static int image_disable(ImageCtxT *ictx, bool force); static int image_promote(ImageCtxT *ictx, bool force); static void image_promote(ImageCtxT *ictx, bool force, Context *on_finish); static int image_demote(ImageCtxT *ictx); static void image_demote(ImageCtxT *ictx, Context *on_finish); static int image_resync(ImageCtxT *ictx); static int image_get_info(ImageCtxT *ictx, mirror_image_info_t *mirror_image_info); static void image_get_info(ImageCtxT *ictx, mirror_image_info_t *mirror_image_info, Context *on_finish); static int image_get_info(librados::IoCtx& io_ctx, asio::ContextWQ *op_work_queue, const std::string &image_id, mirror_image_info_t *mirror_image_info); static void image_get_info(librados::IoCtx& io_ctx, asio::ContextWQ *op_work_queue, const std::string &image_id, mirror_image_info_t *mirror_image_info, Context *on_finish); static int image_get_mode(ImageCtxT *ictx, mirror_image_mode_t *mode); static void image_get_mode(ImageCtxT *ictx, mirror_image_mode_t *mode, Context *on_finish); static int image_get_global_status(ImageCtxT *ictx, mirror_image_global_status_t *status); static void image_get_global_status(ImageCtxT *ictx, mirror_image_global_status_t *status, Context *on_finish); static int image_get_instance_id(ImageCtxT *ictx, std::string *instance_id); static int image_snapshot_create(ImageCtxT *ictx, uint32_t flags, uint64_t *snap_id); static void image_snapshot_create(ImageCtxT *ictx, uint32_t flags, uint64_t *snap_id, Context *on_finish); }; } // namespace api } // namespace librbd extern template class librbd::api::Mirror<librbd::ImageCtx>; #endif // LIBRBD_API_MIRROR_H
5,990
46.173228
80
h
null
ceph-main/src/librbd/api/Namespace.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_API_NAMESPACE_H #define CEPH_LIBRBD_API_NAMESPACE_H #include "include/rados/librados_fwd.hpp" #include "include/rbd/librbd.hpp" #include <string> #include <vector> namespace librbd { struct ImageCtx; namespace api { template <typename ImageCtxT = librbd::ImageCtx> struct Namespace { static int create(librados::IoCtx& io_ctx, const std::string& name); static int remove(librados::IoCtx& io_ctx, const std::string& name); static int list(librados::IoCtx& io_ctx, std::vector<std::string>* names); static int exists(librados::IoCtx& io_ctx, const std::string& name, bool *exists); }; } // namespace api } // namespace librbd extern template class librbd::api::Namespace<librbd::ImageCtx>; #endif // CEPH_LIBRBD_API_NAMESPACE_H
863
24.411765
84
h
null
ceph-main/src/librbd/api/Pool.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_API_POOL_H #define CEPH_LIBRBD_API_POOL_H #include "include/int_types.h" #include "include/rados/librados_fwd.hpp" #include "include/rbd/librbd.h" #include <map> namespace librbd { struct ImageCtx; namespace api { template <typename ImageCtxT = librbd::ImageCtx> class Pool { public: typedef std::map<rbd_pool_stat_option_t, uint64_t*> StatOptions; static int init(librados::IoCtx& io_ctx, bool force); static int add_stat_option(StatOptions* stat_options, rbd_pool_stat_option_t option, uint64_t* value); static int get_stats(librados::IoCtx& io_ctx, StatOptions* stat_options); }; } // namespace api } // namespace librbd extern template class librbd::api::Pool<librbd::ImageCtx>; #endif // CEPH_LIBRBD_API_POOL_H
913
22.435897
75
h
null
ceph-main/src/librbd/api/PoolMetadata.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_API_POOL_METADATA_H #define CEPH_LIBRBD_API_POOL_METADATA_H #include "include/buffer_fwd.h" #include "include/rados/librados_fwd.hpp" #include <cstdint> #include <map> #include <string> namespace librbd { class ImageCtx; namespace api { template <typename ImageCtxT = librbd::ImageCtx> class PoolMetadata { public: static int get(librados::IoCtx& io_ctx, const std::string &key, std::string *value); static int set(librados::IoCtx& io_ctx, const std::string &key, const std::string &value); static int remove(librados::IoCtx& io_ctx, const std::string &key); static int list(librados::IoCtx& io_ctx, const std::string &start, uint64_t max, std::map<std::string, ceph::bufferlist> *pairs); }; } // namespace api } // namespace librbd extern template class librbd::api::PoolMetadata<librbd::ImageCtx>; #endif // CEPH_LIBRBD_API_POOL_METADATA_H
1,030
26.131579
80
h
null
ceph-main/src/librbd/api/Snapshot.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_API_SNAPSHOT_H #define CEPH_LIBRBD_API_SNAPSHOT_H #include "include/rbd/librbd.hpp" #include "cls/rbd/cls_rbd_types.h" #include <string> namespace librbd { struct ImageCtx; namespace api { template <typename ImageCtxT = librbd::ImageCtx> struct Snapshot { static int get_group_namespace(ImageCtxT *ictx, uint64_t snap_id, snap_group_namespace_t *group_snap); static int get_trash_namespace(ImageCtxT *ictx, uint64_t snap_id, std::string *original_name); static int get_mirror_namespace( ImageCtxT *ictx, uint64_t snap_id, snap_mirror_namespace_t *mirror_snap); static int get_namespace_type(ImageCtxT *ictx, uint64_t snap_id, snap_namespace_type_t *namespace_type); static int remove(ImageCtxT *ictx, uint64_t snap_id); static int get_name(ImageCtxT *ictx, uint64_t snap_id, std::string *snap_name); static int get_id(ImageCtxT *ictx, const std::string& snap_name, uint64_t *snap_id); static int list(ImageCtxT *ictx, std::vector<snap_info_t>& snaps); static int exists(ImageCtxT *ictx, const cls::rbd::SnapshotNamespace& snap_namespace, const char *snap_name, bool *exists); static int create(ImageCtxT *ictx, const char *snap_name, uint32_t flags, ProgressContext& pctx); static int remove(ImageCtxT *ictx, const char *snap_name, uint32_t flags, ProgressContext& pctx); static int get_limit(ImageCtxT *ictx, uint64_t *limit); static int set_limit(ImageCtxT *ictx, uint64_t limit); static int get_timestamp(ImageCtxT *ictx, uint64_t snap_id, struct timespec *timestamp); static int is_protected(ImageCtxT *ictx, const char *snap_name, bool *protect); static int get_namespace(ImageCtxT *ictx, const char *snap_name, cls::rbd::SnapshotNamespace *snap_namespace); }; } // namespace api } // namespace librbd extern template class librbd::api::Snapshot<librbd::ImageCtx>; #endif // CEPH_LIBRBD_API_SNAPSHOT_H
2,125
30.264706
99
h
null
ceph-main/src/librbd/api/Trash.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef LIBRBD_API_TRASH_H #define LIBRBD_API_TRASH_H #include "include/rados/librados_fwd.hpp" #include "include/rbd/librbd.hpp" #include "cls/rbd/cls_rbd_types.h" #include <set> #include <string> #include <vector> namespace librbd { class ProgressContext; struct ImageCtx; namespace api { template <typename ImageCtxT = librbd::ImageCtx> struct Trash { typedef std::set<cls::rbd::TrashImageSource> TrashImageSources; static const TrashImageSources ALLOWED_RESTORE_SOURCES; static int move(librados::IoCtx &io_ctx, rbd_trash_image_source_t source, const std::string &image_name, uint64_t delay); static int move(librados::IoCtx &io_ctx, rbd_trash_image_source_t source, const std::string &image_name, const std::string &image_id, uint64_t delay); static int get(librados::IoCtx &io_ctx, const std::string &id, trash_image_info_t *info); static int list(librados::IoCtx &io_ctx, std::vector<trash_image_info_t> &entries, bool exclude_user_remove_source); static int purge(IoCtx& io_ctx, time_t expire_ts, float threshold, ProgressContext& pctx); static int remove(librados::IoCtx &io_ctx, const std::string &image_id, bool force, ProgressContext& prog_ctx); static int restore(librados::IoCtx &io_ctx, const TrashImageSources& trash_image_sources, const std::string &image_id, const std::string &image_new_name); }; } // namespace api } // namespace librbd extern template class librbd::api::Trash<librbd::ImageCtx>; #endif // LIBRBD_API_TRASH_H
1,783
32.037037
77
h
null
ceph-main/src/librbd/api/Utils.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_API_UTILS_H #define CEPH_LIBRBD_API_UTILS_H #include "include/rbd/librbd.hpp" #include "librbd/ImageCtx.h" #include "librbd/crypto/EncryptionFormat.h" namespace librbd { struct ImageCtx; namespace api { namespace util { template <typename ImageCtxT = librbd::ImageCtx> int create_encryption_format( CephContext* cct, encryption_format_t format, encryption_options_t opts, size_t opts_size, bool c_api, crypto::EncryptionFormat<ImageCtxT>** result_format); } // namespace util } // namespace api } // namespace librbd #endif // CEPH_LIBRBD_API_UTILS_H
701
23.206897
70
h
null
ceph-main/src/librbd/asio/ContextWQ.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_ASIO_CONTEXT_WQ_H #define CEPH_LIBRBD_ASIO_CONTEXT_WQ_H #include "include/common_fwd.h" #include "include/Context.h" #include <atomic> #include <memory> #include <boost/asio/io_context.hpp> #include <boost/asio/io_context_strand.hpp> #include <boost/asio/post.hpp> namespace librbd { namespace asio { class ContextWQ { public: explicit ContextWQ(CephContext* cct, boost::asio::io_context& io_context); ~ContextWQ(); void drain(); void queue(Context *ctx, int r = 0) { ++m_queued_ops; // ensure all legacy ContextWQ users are dispatched sequentially for // backwards compatibility (i.e. might not be concurrent thread-safe) boost::asio::post(*m_strand, [this, ctx, r]() { ctx->complete(r); ceph_assert(m_queued_ops > 0); --m_queued_ops; }); } private: CephContext* m_cct; boost::asio::io_context& m_io_context; std::unique_ptr<boost::asio::io_context::strand> m_strand; std::atomic<uint64_t> m_queued_ops; void drain_handler(Context* ctx); }; } // namespace asio } // namespace librbd #endif // CEPH_LIBRBD_ASIO_CONTEXT_WQ_H
1,214
21.924528
76
h
null
ceph-main/src/librbd/asio/Utils.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_ASIO_UTILS_H #define CEPH_LIBRBD_ASIO_UTILS_H #include "include/Context.h" #include "include/rados/librados_fwd.hpp" #include <boost/system/error_code.hpp> namespace librbd { namespace asio { namespace util { template <typename T> auto get_context_adapter(T&& t) { return [t = std::move(t)](boost::system::error_code ec) { t->complete(-ec.value()); }; } template <typename T> auto get_callback_adapter(T&& t) { return [t = std::move(t)](boost::system::error_code ec, auto&& ... args) { t(-ec.value(), std::forward<decltype(args)>(args)...); }; } } // namespace util } // namespace asio } // namespace librbd #endif // CEPH_LIBRBD_ASIO_UTILS_H
792
22.323529
76
h
null
ceph-main/src/librbd/cache/ImageWriteback.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_IMAGE_WRITEBACK #define CEPH_LIBRBD_CACHE_IMAGE_WRITEBACK #include "include/buffer_fwd.h" #include "include/int_types.h" #include "librbd/io/Types.h" #include <vector> class Context; namespace librbd { struct ImageCtx; namespace cache { class ImageWritebackInterface { public: typedef std::vector<std::pair<uint64_t,uint64_t> > Extents; virtual ~ImageWritebackInterface() { } virtual void aio_read(Extents &&image_extents, ceph::bufferlist *bl, int fadvise_flags, Context *on_finish) = 0; virtual void aio_write(Extents &&image_extents, ceph::bufferlist&& bl, int fadvise_flags, Context *on_finish) = 0; virtual void aio_discard(uint64_t offset, uint64_t length, uint32_t discard_granularity_bytes, Context *on_finish) = 0; virtual void aio_flush(io::FlushSource flush_source, Context *on_finish) = 0 ; virtual void aio_writesame(uint64_t offset, uint64_t length, ceph::bufferlist&& bl, int fadvise_flags, Context *on_finish) = 0; virtual void aio_compare_and_write(Extents &&image_extents, ceph::bufferlist&& cmp_bl, ceph::bufferlist&& bl, uint64_t *mismatch_offset, int fadvise_flags, Context *on_finish) = 0; }; /** * client-side, image extent cache writeback handler */ template <typename ImageCtxT = librbd::ImageCtx> class ImageWriteback : public ImageWritebackInterface { public: using ImageWritebackInterface::Extents; explicit ImageWriteback(ImageCtxT &image_ctx); void aio_read(Extents &&image_extents, ceph::bufferlist *bl, int fadvise_flags, Context *on_finish); void aio_write(Extents &&image_extents, ceph::bufferlist&& bl, int fadvise_flags, Context *on_finish); void aio_discard(uint64_t offset, uint64_t length, uint32_t discard_granularity_bytes, Context *on_finish); void aio_flush(io::FlushSource flush_source, Context *on_finish); void aio_writesame(uint64_t offset, uint64_t length, ceph::bufferlist&& bl, int fadvise_flags, Context *on_finish); void aio_compare_and_write(Extents &&image_extents, ceph::bufferlist&& cmp_bl, ceph::bufferlist&& bl, uint64_t *mismatch_offset, int fadvise_flags, Context *on_finish); private: ImageCtxT &m_image_ctx; }; } // namespace cache } // namespace librbd extern template class librbd::cache::ImageWriteback<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CACHE_IMAGE_WRITEBACK
2,899
36.179487
87
h
null
ceph-main/src/librbd/cache/ObjectCacherObjectDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_OBJECT_CACHER_OBJECT_DISPATCH_H #define CEPH_LIBRBD_CACHE_OBJECT_CACHER_OBJECT_DISPATCH_H #include "librbd/io/ObjectDispatchInterface.h" #include "common/ceph_mutex.h" #include "osdc/ObjectCacher.h" struct WritebackHandler; namespace librbd { class ImageCtx; namespace cache { /** * Facade around the OSDC object cacher to make it align with * the object dispatcher interface */ template <typename ImageCtxT = ImageCtx> class ObjectCacherObjectDispatch : public io::ObjectDispatchInterface { public: static ObjectCacherObjectDispatch* create(ImageCtxT* image_ctx, size_t max_dirty, bool writethrough_until_flush) { return new ObjectCacherObjectDispatch(image_ctx, max_dirty, writethrough_until_flush); } ObjectCacherObjectDispatch(ImageCtxT* image_ctx, size_t max_dirty, bool writethrough_until_flush); ~ObjectCacherObjectDispatch() override; io::ObjectDispatchLayer get_dispatch_layer() const override { return io::OBJECT_DISPATCH_LAYER_CACHE; } void init(); void shut_down(Context* on_finish) override; bool read( uint64_t object_no, io::ReadExtents* extents, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t* version, int* object_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool discard( uint64_t object_no, uint64_t object_off, uint64_t object_len, IOContext io_context, int discard_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int write_flags, std::optional<uint64_t> assert_version, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write_same( uint64_t object_no, uint64_t object_off, uint64_t object_len, io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool compare_and_write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool flush( io::FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool list_snaps( uint64_t object_no, io::Extents&& extents, io::SnapIds&& snap_ids, int list_snap_flags, const ZTracer::Trace &parent_trace, io::SnapshotDelta* snapshot_delta, int* object_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool invalidate_cache(Context* on_finish) override; bool reset_existence_cache(Context* on_finish) override; void extent_overwritten( uint64_t object_no, uint64_t object_off, uint64_t object_len, uint64_t journal_tid, uint64_t new_journal_tid) { } int prepare_copyup( uint64_t object_no, io::SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override { return 0; } private: struct C_InvalidateCache; ImageCtxT* m_image_ctx; size_t m_max_dirty; bool m_writethrough_until_flush; ceph::mutex m_cache_lock; ObjectCacher *m_object_cacher = nullptr; ObjectCacher::ObjectSet *m_object_set = nullptr; WritebackHandler *m_writeback_handler = nullptr; bool m_user_flushed = false; }; } // namespace cache } // namespace librbd extern template class librbd::cache::ObjectCacherObjectDispatch<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CACHE_OBJECT_CACHER_OBJECT_DISPATCH_H
4,671
34.12782
82
h
null
ceph-main/src/librbd/cache/ObjectCacherWriteback.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_OBJECT_CACHER_WRITEBACK_H #define CEPH_LIBRBD_CACHE_OBJECT_CACHER_WRITEBACK_H #include "common/snap_types.h" #include "osd/osd_types.h" #include "osdc/WritebackHandler.h" #include <queue> class Context; namespace librbd { struct ImageCtx; namespace cache { class ObjectCacherWriteback : public WritebackHandler { public: static const int READ_FLAGS_MASK = 0xF000; static const int READ_FLAGS_SHIFT = 24; ObjectCacherWriteback(ImageCtx *ictx, ceph::mutex& lock); // Note that oloc, trunc_size, and trunc_seq are ignored void read(const object_t& oid, uint64_t object_no, const object_locator_t& oloc, uint64_t off, uint64_t len, snapid_t snapid, bufferlist *pbl, uint64_t trunc_size, __u32 trunc_seq, int op_flags, const ZTracer::Trace &parent_trace, Context *onfinish) override; // Determine whether a read to this extent could be affected by a // write-triggered copy-on-write bool may_copy_on_write(const object_t& oid, uint64_t read_off, uint64_t read_len, snapid_t snapid) override; // Note that oloc, trunc_size, and trunc_seq are ignored ceph_tid_t write(const object_t& oid, const object_locator_t& oloc, uint64_t off, uint64_t len, const SnapContext& snapc, const bufferlist &bl, ceph::real_time mtime, uint64_t trunc_size, __u32 trunc_seq, ceph_tid_t journal_tid, const ZTracer::Trace &parent_trace, Context *oncommit) override; using WritebackHandler::write; void overwrite_extent(const object_t& oid, uint64_t off, uint64_t len, ceph_tid_t original_journal_tid, ceph_tid_t new_journal_tid) override; struct write_result_d { bool done; int ret; std::string oid; Context *oncommit; write_result_d(const std::string& oid, Context *oncommit) : done(false), ret(0), oid(oid), oncommit(oncommit) {} private: write_result_d(const write_result_d& rhs); const write_result_d& operator=(const write_result_d& rhs); }; private: void complete_writes(const std::string& oid); ceph_tid_t m_tid; ceph::mutex& m_lock; librbd::ImageCtx *m_ictx; ceph::unordered_map<std::string, std::queue<write_result_d*> > m_writes; friend class C_OrderedWrite; }; } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_OBJECT_CACHER_WRITEBACK_H
2,594
31.848101
76
h
null
ceph-main/src/librbd/cache/ParentCacheObjectDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H #define CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H #include "librbd/io/ObjectDispatchInterface.h" #include "common/ceph_mutex.h" #include "librbd/cache/TypeTraits.h" #include "tools/immutable_object_cache/CacheClient.h" #include "tools/immutable_object_cache/Types.h" namespace librbd { class ImageCtx; namespace plugin { template <typename> struct Api; } namespace cache { template <typename ImageCtxT = ImageCtx> class ParentCacheObjectDispatch : public io::ObjectDispatchInterface { // mock unit testing support typedef cache::TypeTraits<ImageCtxT> TypeTraits; typedef typename TypeTraits::CacheClient CacheClient; public: static ParentCacheObjectDispatch* create(ImageCtxT* image_ctx, plugin::Api<ImageCtxT>& plugin_api) { return new ParentCacheObjectDispatch(image_ctx, plugin_api); } ParentCacheObjectDispatch(ImageCtxT* image_ctx, plugin::Api<ImageCtxT>& plugin_api); ~ParentCacheObjectDispatch() override; io::ObjectDispatchLayer get_dispatch_layer() const override { return io::OBJECT_DISPATCH_LAYER_PARENT_CACHE; } void init(Context* on_finish = nullptr); void shut_down(Context* on_finish) { m_image_ctx->op_work_queue->queue(on_finish, 0); } bool read( uint64_t object_no, io::ReadExtents* extents, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t* version, int* object_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool discard( uint64_t object_no, uint64_t object_off, uint64_t object_len, IOContext io_context, int discard_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { return false; } bool write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int write_flags, std::optional<uint64_t> assert_version, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { return false; } bool write_same( uint64_t object_no, uint64_t object_off, uint64_t object_len, io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { return false; } bool compare_and_write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { return false; } bool flush( io::FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t* journal_id, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) { return false; } bool list_snaps( uint64_t object_no, io::Extents&& extents, io::SnapIds&& snap_ids, int list_snap_flags, const ZTracer::Trace &parent_trace, io::SnapshotDelta* snapshot_delta, int* object_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool invalidate_cache(Context* on_finish) { return false; } bool reset_existence_cache(Context* on_finish) { return false; } void extent_overwritten( uint64_t object_no, uint64_t object_off, uint64_t object_len, uint64_t journal_tid, uint64_t new_journal_tid) { } int prepare_copyup( uint64_t object_no, io::SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override { return 0; } ImageCtxT* get_image_ctx() { return m_image_ctx; } CacheClient* get_cache_client() { return m_cache_client; } private: int read_object(std::string file_path, ceph::bufferlist* read_data, uint64_t offset, uint64_t length, Context *on_finish); void handle_read_cache(ceph::immutable_obj_cache::ObjectCacheRequest* ack, uint64_t object_no, io::ReadExtents* extents, IOContext io_context, int read_flags, const ZTracer::Trace &parent_trace, io::DispatchResult* dispatch_result, Context* on_dispatched); int handle_register_client(bool reg); void create_cache_session(Context* on_finish, bool is_reconnect); ImageCtxT* m_image_ctx; plugin::Api<ImageCtxT>& m_plugin_api; ceph::mutex m_lock; CacheClient *m_cache_client = nullptr; bool m_connecting = false; }; } // namespace cache } // namespace librbd extern template class librbd::cache::ParentCacheObjectDispatch<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H
5,482
32.845679
81
h
null
ceph-main/src/librbd/cache/WriteAroundObjectDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_WRITE_AROUND_OBJECT_DISPATCH_H #define CEPH_LIBRBD_CACHE_WRITE_AROUND_OBJECT_DISPATCH_H #include "librbd/io/ObjectDispatchInterface.h" #include "include/interval_set.h" #include "common/ceph_mutex.h" #include "librbd/io/Types.h" #include <map> #include <set> #include <string> struct Context; namespace librbd { struct ImageCtx; namespace cache { template <typename ImageCtxT = ImageCtx> class WriteAroundObjectDispatch : public io::ObjectDispatchInterface { public: static WriteAroundObjectDispatch* create(ImageCtxT* image_ctx, size_t max_dirty, bool writethrough_until_flush) { return new WriteAroundObjectDispatch(image_ctx, max_dirty, writethrough_until_flush); } WriteAroundObjectDispatch(ImageCtxT* image_ctx, size_t max_dirty, bool writethrough_until_flush); ~WriteAroundObjectDispatch() override; io::ObjectDispatchLayer get_dispatch_layer() const override { return io::OBJECT_DISPATCH_LAYER_CACHE; } void init(); void shut_down(Context* on_finish) override; bool read( uint64_t object_no, io::ReadExtents* extents, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t* version, int* object_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool discard( uint64_t object_no, uint64_t object_off, uint64_t object_len, IOContext io_context, int discard_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context**on_finish, Context* on_dispatched) override; bool write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int write_flags, std::optional<uint64_t> assert_version, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context**on_finish, Context* on_dispatched) override; bool write_same( uint64_t object_no, uint64_t object_off, uint64_t object_len, io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context**on_finish, Context* on_dispatched) override; bool compare_and_write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool flush( io::FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool list_snaps( uint64_t object_no, io::Extents&& extents, io::SnapIds&& snap_ids, int list_snap_flags, const ZTracer::Trace &parent_trace, io::SnapshotDelta* snapshot_delta, int* object_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool invalidate_cache(Context* on_finish) override { return false; } bool reset_existence_cache(Context* on_finish) override { return false; } void extent_overwritten( uint64_t object_no, uint64_t object_off, uint64_t object_len, uint64_t journal_tid, uint64_t new_journal_tid) override { } int prepare_copyup( uint64_t object_no, io::SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override { return 0; } private: struct QueuedIO { QueuedIO(uint64_t length, Context* on_finish, Context* on_dispatched) : length(length), on_finish(on_finish), on_dispatched(on_dispatched) { } uint64_t length; Context* on_finish; Context* on_dispatched; }; struct QueuedFlush { QueuedFlush(Context* on_finish, Context* on_dispatched) : on_finish(on_finish), on_dispatched(on_dispatched) { } Context* on_finish; Context* on_dispatched; }; struct BlockedIO : public QueuedIO { BlockedIO(uint64_t offset, uint64_t length, Context* on_finish, Context* on_dispatched) : QueuedIO(length, on_finish, on_dispatched), offset(offset) { } uint64_t offset; }; typedef std::map<uint64_t, QueuedIO> QueuedIOs; typedef std::map<uint64_t, QueuedFlush> QueuedFlushes; typedef std::map<uint64_t, BlockedIO> BlockedObjectIOs; typedef std::map<uint64_t, BlockedObjectIOs> BlockedIOs; typedef std::map<uint64_t, Context*> Contexts; typedef std::set<uint64_t> Tids; typedef interval_set<uint64_t> InFlightObjectExtents; typedef std::map<uint64_t, InFlightObjectExtents> InFlightExtents; ImageCtxT* m_image_ctx; size_t m_init_max_dirty; size_t m_max_dirty; ceph::mutex m_lock; bool m_user_flushed = false; uint64_t m_last_tid = 0; uint64_t m_in_flight_bytes = 0; Tids m_in_flight_io_tids; InFlightExtents m_in_flight_extents; BlockedIOs m_blocked_ios; QueuedIOs m_queued_ios; Tids m_queued_or_blocked_io_tids; BlockedIOs m_blocked_unoptimized_ios; QueuedFlushes m_queued_flushes; Contexts m_in_flight_flushes; Contexts m_pending_flushes; int m_pending_flush_error = 0; bool dispatch_unoptimized_io(uint64_t object_no, uint64_t object_off, uint64_t object_len, io::DispatchResult* dispatch_result, Context* on_dispatched); bool dispatch_io(uint64_t object_no, uint64_t object_off, uint64_t object_len, int op_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatch); bool block_overlapping_io(InFlightObjectExtents* in_flight_object_extents, uint64_t object_off, uint64_t object_len); void unblock_overlapping_ios(uint64_t object_no, uint64_t object_off, uint64_t object_len, Contexts* unoptimized_io_dispatches); bool can_dispatch_io(uint64_t tid, uint64_t length); void handle_in_flight_io_complete(int r, uint64_t tid, uint64_t object_no, uint64_t object_off, uint64_t object_len); void handle_in_flight_flush_complete(int r, uint64_t tid); QueuedIOs collect_ready_ios(); Contexts collect_ready_flushes(); Contexts collect_finished_flushes(); }; } // namespace cache } // namespace librbd extern template class librbd::cache::WriteAroundObjectDispatch<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CACHE_WRITE_AROUND_OBJECT_DISPATCH_H
7,237
32.981221
81
h
null
ceph-main/src/librbd/cache/WriteLogImageDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_WRITELOG_IMAGE_DISPATCH_H #define CEPH_LIBRBD_WRITELOG_IMAGE_DISPATCH_H #include "librbd/io/ImageDispatchInterface.h" #include "include/int_types.h" #include "include/buffer.h" #include "common/zipkin_trace.h" #include "librbd/io/ReadResult.h" #include "librbd/io/Types.h" #include "librbd/plugin/Api.h" struct Context; namespace librbd { struct ImageCtx; namespace cache { namespace pwl { template <typename> class AbstractWriteLog; } template <typename ImageCtxT> class WriteLogImageDispatch : public io::ImageDispatchInterface { public: WriteLogImageDispatch(ImageCtxT* image_ctx, pwl::AbstractWriteLog<ImageCtx> *image_cache, plugin::Api<ImageCtxT>& plugin_api) : m_image_ctx(image_ctx), m_image_cache(image_cache), m_plugin_api(plugin_api) { } io::ImageDispatchLayer get_dispatch_layer() const override { return io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE; } void shut_down(Context* on_finish) override; bool read( io::AioCompletion* aio_comp, io::Extents &&image_extents, io::ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool discard( io::AioCompletion* aio_comp, io::Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write_same( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool compare_and_write( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool flush( io::AioCompletion* aio_comp, io::FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool list_snaps( io::AioCompletion* aio_comp, io::Extents&& image_extents, io::SnapIds&& snap_ids, int list_snaps_flags, io::SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool invalidate_cache(Context* on_finish) override; private: ImageCtxT* m_image_ctx; pwl::AbstractWriteLog<ImageCtx> *m_image_cache; plugin::Api<ImageCtxT>& m_plugin_api; bool preprocess_length( io::AioCompletion* aio_comp, io::Extents &image_extents) const; }; } // namespace cache } // namespace librbd extern template class librbd::cache::WriteLogImageDispatch<librbd::ImageCtx>; #endif // CEPH_LIBRBD_WRITELOG_IMAGE_DISPATCH_H
3,942
36.198113
80
h
null
ceph-main/src/librbd/cache/pwl/AbstractWriteLog.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PARENT_WRITE_LOG #define CEPH_LIBRBD_CACHE_PARENT_WRITE_LOG #include "common/Timer.h" #include "common/RWLock.h" #include "common/WorkQueue.h" #include "common/AsyncOpTracker.h" #include "librbd/cache/ImageWriteback.h" #include "librbd/Utils.h" #include "librbd/BlockGuard.h" #include "librbd/cache/Types.h" #include "librbd/cache/pwl/LogOperation.h" #include "librbd/cache/pwl/ReadRequest.h" #include "librbd/cache/pwl/Request.h" #include "librbd/cache/pwl/LogMap.h" #include "librbd/cache/pwl/Builder.h" #include <functional> #include <list> class Context; namespace librbd { struct ImageCtx; namespace plugin { template <typename> struct Api; } namespace cache { namespace pwl { class GenericLogEntry; class GenericWriteLogEntry; class SyncPointLogEntry; class WriteLogEntry; struct WriteLogCacheEntry; typedef std::list<std::shared_ptr<WriteLogEntry>> WriteLogEntries; typedef std::list<std::shared_ptr<GenericLogEntry>> GenericLogEntries; typedef std::list<std::shared_ptr<GenericWriteLogEntry>> GenericWriteLogEntries; typedef std::vector<std::shared_ptr<GenericLogEntry>> GenericLogEntriesVector; typedef LogMapEntries<GenericWriteLogEntry> WriteLogMapEntries; typedef LogMap<GenericWriteLogEntry> WriteLogMap; /**** Write log entries end ****/ typedef librbd::BlockGuard<GuardedRequest> WriteLogGuard; class DeferredContexts; template <typename> class ImageCacheState; template<typename T> class Builder; template <typename T> struct C_BlockIORequest; template <typename T> struct C_WriteRequest; using GenericLogOperations = std::list<GenericLogOperationSharedPtr>; template <typename ImageCtxT> class AbstractWriteLog { public: typedef io::Extent Extent; typedef io::Extents Extents; using This = AbstractWriteLog<ImageCtxT>; Builder<This> *m_builder; AbstractWriteLog(ImageCtxT &image_ctx, librbd::cache::pwl::ImageCacheState<ImageCtxT>* cache_state, Builder<This> *builder, cache::ImageWritebackInterface& image_writeback, plugin::Api<ImageCtxT>& plugin_api); virtual ~AbstractWriteLog(); AbstractWriteLog(const AbstractWriteLog&) = delete; AbstractWriteLog &operator=(const AbstractWriteLog&) = delete; /// IO methods void read( Extents&& image_extents, ceph::bufferlist *bl, int fadvise_flags, Context *on_finish); void write( Extents&& image_extents, ceph::bufferlist&& bl, int fadvise_flags, Context *on_finish); void discard( uint64_t offset, uint64_t length, uint32_t discard_granularity_bytes, Context *on_finish); void flush( io::FlushSource flush_source, Context *on_finish); void writesame( uint64_t offset, uint64_t length, ceph::bufferlist&& bl, int fadvise_flags, Context *on_finish); void compare_and_write( Extents&& image_extents, ceph::bufferlist&& cmp_bl, ceph::bufferlist&& bl, uint64_t *mismatch_offset,int fadvise_flags, Context *on_finish); /// internal state methods void init(Context *on_finish); void shut_down(Context *on_finish); void invalidate(Context *on_finish); void flush(Context *on_finish); using C_WriteRequestT = pwl::C_WriteRequest<This>; using C_BlockIORequestT = pwl::C_BlockIORequest<This>; using C_FlushRequestT = pwl::C_FlushRequest<This>; using C_DiscardRequestT = pwl::C_DiscardRequest<This>; using C_WriteSameRequestT = pwl::C_WriteSameRequest<This>; CephContext * get_context(); void release_guarded_request(BlockGuardCell *cell); void release_write_lanes(C_BlockIORequestT *req); virtual bool alloc_resources(C_BlockIORequestT *req) = 0; virtual void setup_schedule_append( pwl::GenericLogOperationsVector &ops, bool do_early_flush, C_BlockIORequestT *req) = 0; void schedule_append(pwl::GenericLogOperationsVector &ops, C_BlockIORequestT *req = nullptr); void schedule_append(pwl::GenericLogOperationSharedPtr op, C_BlockIORequestT *req = nullptr); void flush_new_sync_point(C_FlushRequestT *flush_req, pwl::DeferredContexts &later); std::shared_ptr<pwl::SyncPoint> get_current_sync_point() { return m_current_sync_point; } bool get_persist_on_flush() { return m_persist_on_flush; } void inc_last_op_sequence_num() { m_perfcounter->inc(l_librbd_pwl_log_ops, 1); ++m_last_op_sequence_num; } uint64_t get_last_op_sequence_num() { return m_last_op_sequence_num; } uint64_t get_current_sync_gen() { return m_current_sync_gen; } unsigned int get_free_lanes() { return m_free_lanes; } uint32_t get_free_log_entries() { return m_free_log_entries; } void add_into_log_map(pwl::GenericWriteLogEntries &log_entries, C_BlockIORequestT *req); virtual void complete_user_request(Context *&user_req, int r) = 0; virtual void copy_bl_to_buffer( WriteRequestResources *resources, std::unique_ptr<WriteLogOperationSet> &op_set) {} private: typedef std::list<pwl::C_WriteRequest<This> *> C_WriteRequests; typedef std::list<pwl::C_BlockIORequest<This> *> C_BlockIORequests; std::atomic<bool> m_initialized = {false}; uint64_t m_bytes_dirty = 0; /* Total bytes yet to flush to RBD */ utime_t m_last_alloc_fail; /* Entry or buffer allocation fail seen */ pwl::WriteLogGuard m_write_log_guard; /* Starts at 0 for a new write log. Incremented on every flush. */ uint64_t m_current_sync_gen = 0; /* Starts at 0 on each sync gen increase. Incremented before applied to an operation */ uint64_t m_last_op_sequence_num = 0; bool m_persist_on_write_until_flush = true; pwl::WriteLogGuard m_flush_guard; mutable ceph::mutex m_flush_guard_lock; /* Debug counters for the places m_async_op_tracker is used */ std::atomic<int> m_async_complete_ops = {0}; std::atomic<int> m_async_null_flush_finish = {0}; std::atomic<int> m_async_process_work = {0}; /* Hold m_deferred_dispatch_lock while consuming from m_deferred_ios. */ mutable ceph::mutex m_deferred_dispatch_lock; /* Used in release/detain to make BlockGuard preserve submission order */ mutable ceph::mutex m_blockguard_lock; /* Use m_blockguard_lock for the following 3 things */ bool m_barrier_in_progress = false; BlockGuardCell *m_barrier_cell = nullptr; bool m_wake_up_enabled = true; Contexts m_flush_complete_contexts; std::shared_ptr<pwl::SyncPoint> m_current_sync_point = nullptr; bool m_persist_on_flush = false; //If false, persist each write before completion int m_flush_ops_in_flight = 0; int m_flush_bytes_in_flight = 0; uint64_t m_lowest_flushing_sync_gen = 0; /* Writes that have left the block guard, but are waiting for resources */ C_BlockIORequests m_deferred_ios; /* Throttle writes concurrently allocating & replicating */ unsigned int m_free_lanes = pwl::MAX_CONCURRENT_WRITES; SafeTimer *m_timer = nullptr; /* Used with m_timer_lock */ mutable ceph::mutex *m_timer_lock = nullptr; /* Used with and by m_timer */ Context *m_timer_ctx = nullptr; ThreadPool m_thread_pool; uint32_t m_discard_granularity_bytes; BlockGuardCell* detain_guarded_request_helper(pwl::GuardedRequest &req); BlockGuardCell* detain_guarded_request_barrier_helper( pwl::GuardedRequest &req); void detain_guarded_request(C_BlockIORequestT *request, pwl::GuardedRequestFunctionContext *guarded_ctx, bool is_barrier); void perf_start(const std::string name); void perf_stop(); void log_perf(); void periodic_stats(); void arm_periodic_stats(); void pwl_init(Context *on_finish, pwl::DeferredContexts &later); void check_image_cache_state_clean(); void flush_dirty_entries(Context *on_finish); bool can_flush_entry(const std::shared_ptr<pwl::GenericLogEntry> log_entry); bool handle_flushed_sync_point( std::shared_ptr<pwl::SyncPointLogEntry> log_entry); void sync_point_writer_flushed( std::shared_ptr<pwl::SyncPointLogEntry> log_entry); void init_flush_new_sync_point(pwl::DeferredContexts &later); void new_sync_point(pwl::DeferredContexts &later); pwl::C_FlushRequest<AbstractWriteLog<ImageCtxT>>* make_flush_req( Context *on_finish); void flush_new_sync_point_if_needed(C_FlushRequestT *flush_req, pwl::DeferredContexts &later); void alloc_and_dispatch_io_req(C_BlockIORequestT *write_req); void schedule_complete_op_log_entries(pwl::GenericLogOperations &&ops, const int r); void internal_flush(bool invalidate, Context *on_finish); protected: librbd::cache::pwl::ImageCacheState<ImageCtxT>* m_cache_state = nullptr; std::atomic<bool> m_shutting_down = {false}; std::atomic<bool> m_invalidating = {false}; ImageCtxT &m_image_ctx; std::string m_log_pool_name; uint64_t m_log_pool_size; uint32_t m_total_log_entries = 0; uint32_t m_free_log_entries = 0; std::atomic<uint64_t> m_bytes_allocated = {0}; /* Total bytes allocated in write buffers */ uint64_t m_bytes_cached = 0; /* Total bytes used in write buffers */ uint64_t m_bytes_allocated_cap = 0; std::atomic<bool> m_alloc_failed_since_retire = {false}; cache::ImageWritebackInterface& m_image_writeback; plugin::Api<ImageCtxT>& m_plugin_api; /* * When m_first_free_entry == m_first_valid_entry, the log is * empty. There is always at least one free entry, which can't be * used. */ uint64_t m_first_free_entry = 0; /* Entries from here to m_first_valid_entry-1 are free */ uint64_t m_first_valid_entry = 0; /* Entries from here to m_first_free_entry-1 are valid */ /* All writes bearing this and all prior sync gen numbers are flushed */ uint64_t m_flushed_sync_gen = 0; AsyncOpTracker m_async_op_tracker; /* Debug counters for the places m_async_op_tracker is used */ std::atomic<int> m_async_flush_ops = {0}; std::atomic<int> m_async_append_ops = {0}; /* Acquire locks in order declared here */ mutable ceph::mutex m_log_retire_lock; /* Hold a read lock on m_entry_reader_lock to add readers to log entry * bufs. Hold a write lock to prevent readers from being added (e.g. when * removing log entries from the map). No lock required to remove readers. */ mutable RWLock m_entry_reader_lock; /* Hold m_log_append_lock while appending or retiring log entries. */ mutable ceph::mutex m_log_append_lock; /* Used for most synchronization */ mutable ceph::mutex m_lock; /* Use m_blockguard_lock for the following 3 things */ pwl::WriteLogGuard::BlockOperations m_awaiting_barrier; bool m_wake_up_requested = false; bool m_wake_up_scheduled = false; bool m_appending = false; bool m_dispatching_deferred_ops = false; pwl::GenericLogOperations m_ops_to_flush; /* Write ops needing flush in local log */ pwl::GenericLogOperations m_ops_to_append; /* Write ops needing event append in local log */ pwl::WriteLogMap m_blocks_to_log_entries; /* New entries are at the back. Oldest at the front */ pwl::GenericLogEntries m_log_entries; pwl::GenericLogEntries m_dirty_log_entries; PerfCounters *m_perfcounter = nullptr; unsigned int m_unpublished_reserves = 0; ContextWQ m_work_queue; void wake_up(); void update_entries( std::shared_ptr<pwl::GenericLogEntry> *log_entry, pwl::WriteLogCacheEntry *cache_entry, std::map<uint64_t, bool> &missing_sync_points, std::map<uint64_t, std::shared_ptr<pwl::SyncPointLogEntry>> &sync_point_entries, uint64_t entry_index); void update_sync_points( std::map<uint64_t, bool> &missing_sync_points, std::map<uint64_t, std::shared_ptr<pwl::SyncPointLogEntry>> &sync_point_entries, pwl::DeferredContexts &later); virtual void inc_allocated_cached_bytes( std::shared_ptr<pwl::GenericLogEntry> log_entry) = 0; Context *construct_flush_entry( const std::shared_ptr<pwl::GenericLogEntry> log_entry, bool invalidating); void detain_flush_guard_request(std::shared_ptr<GenericLogEntry> log_entry, GuardedRequestFunctionContext *guarded_ctx); void process_writeback_dirty_entries(); bool can_retire_entry(const std::shared_ptr<pwl::GenericLogEntry> log_entry); void dispatch_deferred_writes(void); void complete_op_log_entries(pwl::GenericLogOperations &&ops, const int r); bool check_allocation( C_BlockIORequestT *req, uint64_t bytes_cached, uint64_t bytes_dirtied, uint64_t bytes_allocated, uint32_t num_lanes, uint32_t num_log_entries, uint32_t num_unpublished_reserves); void append_scheduled( pwl::GenericLogOperations &ops, bool &ops_remain, bool &appending, bool isRWL=false); virtual void process_work() = 0; virtual void append_scheduled_ops(void) = 0; virtual void schedule_append_ops(pwl::GenericLogOperations &ops, C_BlockIORequestT *req) = 0; virtual void remove_pool_file() = 0; virtual bool initialize_pool(Context *on_finish, pwl::DeferredContexts &later) = 0; virtual void collect_read_extents( uint64_t read_buffer_offset, LogMapEntry<GenericWriteLogEntry> map_entry, std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read, std::vector<bufferlist*> &bls_to_read, uint64_t entry_hit_length, Extent hit_extent, pwl::C_ReadRequest *read_ctx) = 0; virtual void complete_read( std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read, std::vector<bufferlist*> &bls_to_read, Context *ctx) = 0; virtual void write_data_to_buffer( std::shared_ptr<pwl::WriteLogEntry> ws_entry, pwl::WriteLogCacheEntry *cache_entry) {} virtual void release_ram( const std::shared_ptr<pwl::GenericLogEntry> log_entry) {} virtual void alloc_op_log_entries(pwl::GenericLogOperations &ops) {} virtual bool retire_entries(const unsigned long int frees_per_tx) { return false; } virtual void schedule_flush_and_append( pwl::GenericLogOperationsVector &ops) {} virtual void persist_last_flushed_sync_gen() {} virtual void reserve_cache(C_BlockIORequestT *req, bool &alloc_succeeds, bool &no_space) {} virtual void construct_flush_entries(pwl::GenericLogEntries entries_to_flush, DeferredContexts &post_unlock, bool has_write_entry) = 0; virtual uint64_t get_max_extent() { return 0; } void update_image_cache_state(void); void write_image_cache_state(std::unique_lock<ceph::mutex>& locker); void handle_write_image_cache_state(int r); }; } // namespace pwl } // namespace cache } // namespace librbd extern template class librbd::cache::pwl::AbstractWriteLog<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CACHE_PARENT_WRITE_LOG
14,906
35.270073
95
h
null
ceph-main/src/librbd/cache/pwl/Builder.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PWL_BUILDER_H #define CEPH_LIBRBD_CACHE_PWL_BUILDER_H namespace librbd { namespace cache { namespace pwl { template <typename T> class Builder { public: virtual ~Builder() {} virtual std::shared_ptr<WriteLogEntry> create_write_log_entry( uint64_t image_offset_bytes, uint64_t write_bytes) = 0; virtual std::shared_ptr<WriteLogEntry> create_write_log_entry( std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes) = 0; virtual std::shared_ptr<WriteLogEntry> create_writesame_log_entry( uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) = 0; virtual std::shared_ptr<WriteLogEntry> create_writesame_log_entry( std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) = 0; virtual C_WriteRequest<T> *create_write_request( T &pwl, utime_t arrived, io::Extents &&image_extents, bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) = 0; virtual C_WriteSameRequest<T> *create_writesame_request( T &pwl, utime_t arrived, io::Extents &&image_extents, bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) = 0; virtual C_WriteRequest<T> *create_comp_and_write_request( T &pwl, utime_t arrived, io::Extents &&image_extents, bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) = 0; virtual std::shared_ptr<WriteLogOperation> create_write_log_operation( WriteLogOperationSet &set, uint64_t image_offset_bytes, uint64_t write_bytes, CephContext *cct, std::shared_ptr<WriteLogEntry> write_log_entry) = 0; virtual std::shared_ptr<WriteLogOperation> create_write_log_operation( WriteLogOperationSet &set, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_len, CephContext *cct, std::shared_ptr<WriteLogEntry> writesame_log_entry) = 0; virtual std::shared_ptr<pwl::DiscardLogOperation> create_discard_log_operation( std::shared_ptr<SyncPoint> sync_point, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t discard_granularity_bytes, utime_t dispatch_time, PerfCounters *perfcounter, CephContext *cct) = 0; virtual C_ReadRequest *create_read_request(CephContext *cct, utime_t arrived, PerfCounters *perfcounter, ceph::bufferlist *bl, Context *on_finish) = 0; }; } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_PWL_BUILDER_H
2,842
44.854839
81
h
null
ceph-main/src/librbd/cache/pwl/ImageCacheState.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_RWL_IMAGE_CACHE_STATE_H #define CEPH_LIBRBD_CACHE_RWL_IMAGE_CACHE_STATE_H #include "json_spirit/json_spirit.h" #include "librbd/ImageCtx.h" #include "librbd/cache/Types.h" #include <string> namespace ceph { class Formatter; } namespace librbd { namespace plugin { template <typename> struct Api; } namespace cache { namespace pwl { template <typename ImageCtxT = ImageCtx> class ImageCacheState { private: ImageCtxT* m_image_ctx; plugin::Api<ImageCtxT>& m_plugin_api; public: bool present = false; bool empty = true; bool clean = true; std::string host; std::string path; std::string mode; uint64_t size = 0; /* After reloading, the following data does not need to be read, * but recalculated. */ utime_t stats_timestamp; uint64_t allocated_bytes = 0; uint64_t cached_bytes = 0; uint64_t dirty_bytes = 0; uint64_t free_bytes = 0; uint64_t hits_full = 0; uint64_t hits_partial = 0; uint64_t misses = 0; uint64_t hit_bytes = 0; uint64_t miss_bytes = 0; ImageCacheState(ImageCtxT* image_ctx, plugin::Api<ImageCtxT>& plugin_api) : m_image_ctx(image_ctx), m_plugin_api(plugin_api) {} ~ImageCacheState() {} ImageCacheType get_image_cache_mode() const { if (mode == "rwl") { return IMAGE_CACHE_TYPE_RWL; } else if (mode == "ssd") { return IMAGE_CACHE_TYPE_SSD; } return IMAGE_CACHE_TYPE_UNKNOWN; } void init_from_config(); bool init_from_metadata(json_spirit::mValue& json_root); void write_image_cache_state(std::unique_lock<ceph::mutex>& locker, Context *on_finish); void clear_image_cache_state(Context *on_finish); static ImageCacheState<ImageCtxT>* create_image_cache_state( ImageCtxT* image_ctx, plugin::Api<ImageCtxT>& plugin_api, int &r); static ImageCacheState<ImageCtxT>* get_image_cache_state( ImageCtxT* image_ctx, plugin::Api<ImageCtxT>& plugin_api); bool is_valid(); }; } // namespace pwl } // namespace cache } // namespace librbd extern template class librbd::cache::pwl::ImageCacheState<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CACHE_RWL_IMAGE_CACHE_STATE_H
2,251
24.885057
76
h
null
ceph-main/src/librbd/cache/pwl/LogEntry.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PWL_LOG_ENTRY_H #define CEPH_LIBRBD_CACHE_PWL_LOG_ENTRY_H #include "common/ceph_mutex.h" #include "librbd/Utils.h" #include "librbd/cache/pwl/Types.h" #include <atomic> #include <memory> namespace librbd { namespace cache { class ImageWritebackInterface; namespace pwl { class SyncPointLogEntry; class GenericWriteLogEntry; class WriteLogEntry; typedef std::list<std::shared_ptr<GenericWriteLogEntry>> GenericWriteLogEntries; class GenericLogEntry { public: WriteLogCacheEntry ram_entry; WriteLogCacheEntry *cache_entry = nullptr; uint64_t log_entry_index = 0; bool completed = false; BlockGuardCell* m_cell = nullptr; GenericLogEntry(uint64_t image_offset_bytes = 0, uint64_t write_bytes = 0) : ram_entry(image_offset_bytes, write_bytes) { }; virtual ~GenericLogEntry() { }; GenericLogEntry(const GenericLogEntry&) = delete; GenericLogEntry &operator=(const GenericLogEntry&) = delete; virtual bool can_writeback() const { return false; } virtual bool can_retire() const { return false; } virtual void set_flushed(bool flushed) { ceph_assert(false); } virtual unsigned int write_bytes() const { return 0; }; virtual unsigned int bytes_dirty() const { return 0; }; virtual std::shared_ptr<SyncPointLogEntry> get_sync_point_entry() { return nullptr; } virtual void writeback(librbd::cache::ImageWritebackInterface &image_writeback, Context *ctx) { ceph_assert(false); }; virtual void writeback_bl(librbd::cache::ImageWritebackInterface &image_writeback, Context *ctx, ceph::bufferlist &&bl) { ceph_assert(false); } virtual bool is_write_entry() const { return false; } virtual bool is_writesame_entry() const { return false; } virtual bool is_sync_point() const { return false; } virtual unsigned int get_aligned_data_size() const { return 0; } virtual void remove_cache_bl() {} virtual std::ostream& format(std::ostream &os) const; friend std::ostream &operator<<(std::ostream &os, const GenericLogEntry &entry); }; class SyncPointLogEntry : public GenericLogEntry { public: /* Writing entries using this sync gen number */ std::atomic<unsigned int> writes = {0}; /* Total bytes for all writing entries using this sync gen number */ std::atomic<uint64_t> bytes = {0}; /* Writing entries using this sync gen number that have completed */ std::atomic<unsigned int> writes_completed = {0}; /* Writing entries using this sync gen number that have completed flushing to the writeback interface */ std::atomic<unsigned int> writes_flushed = {0}; /* All writing entries using all prior sync gen numbers have been flushed */ std::atomic<bool> prior_sync_point_flushed = {true}; std::shared_ptr<SyncPointLogEntry> next_sync_point_entry = nullptr; SyncPointLogEntry(uint64_t sync_gen_number) { ram_entry.sync_gen_number = sync_gen_number; ram_entry.set_sync_point(true); }; ~SyncPointLogEntry() override {}; SyncPointLogEntry(const SyncPointLogEntry&) = delete; SyncPointLogEntry &operator=(const SyncPointLogEntry&) = delete; bool can_retire() const override { return this->completed; } bool is_sync_point() const override { return true; } std::ostream& format(std::ostream &os) const; friend std::ostream &operator<<(std::ostream &os, const SyncPointLogEntry &entry); }; class GenericWriteLogEntry : public GenericLogEntry { public: uint32_t referring_map_entries = 0; std::shared_ptr<SyncPointLogEntry> sync_point_entry; GenericWriteLogEntry(std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes) : GenericLogEntry(image_offset_bytes, write_bytes), sync_point_entry(sync_point_entry) { } GenericWriteLogEntry(uint64_t image_offset_bytes, uint64_t write_bytes) : GenericLogEntry(image_offset_bytes, write_bytes), sync_point_entry(nullptr) { } ~GenericWriteLogEntry() override {}; GenericWriteLogEntry(const GenericWriteLogEntry&) = delete; GenericWriteLogEntry &operator=(const GenericWriteLogEntry&) = delete; unsigned int write_bytes() const override { /* The valid bytes in this ops data buffer. Discard and WS override. */ return ram_entry.write_bytes; }; unsigned int bytes_dirty() const override { /* The bytes in the image this op makes dirty. Discard and WS override. */ return write_bytes(); }; BlockExtent block_extent() { return ram_entry.block_extent(); } uint32_t get_map_ref() { return(referring_map_entries); } void inc_map_ref() { referring_map_entries++; } void dec_map_ref() { referring_map_entries--; } bool can_writeback() const override; std::shared_ptr<SyncPointLogEntry> get_sync_point_entry() override { return sync_point_entry; } virtual void copy_cache_bl(bufferlist *out_bl) = 0; void set_flushed(bool flushed) override { m_flushed = flushed; } bool get_flushed() const { return m_flushed; } std::ostream &format(std::ostream &os) const; friend std::ostream &operator<<(std::ostream &os, const GenericWriteLogEntry &entry); private: bool m_flushed = false; /* or invalidated */ }; class WriteLogEntry : public GenericWriteLogEntry { protected: bool is_writesame = false; buffer::ptr cache_bp; buffer::list cache_bl; std::atomic<int> bl_refs = {0}; /* The refs held on cache_bp by cache_bl */ /* Used in WriteLogEntry::get_cache_bl() to synchronize between threads making entries readable */ mutable ceph::mutex m_entry_bl_lock; virtual void init_cache_bp() {} virtual void init_bl(buffer::ptr &bp, buffer::list &bl) {} public: uint8_t *cache_buffer = nullptr; WriteLogEntry(std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes) : GenericWriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes), m_entry_bl_lock(ceph::make_mutex(pwl::unique_lock_name( "librbd::cache::pwl::WriteLogEntry::m_entry_bl_lock", this))) { } WriteLogEntry(uint64_t image_offset_bytes, uint64_t write_bytes) : GenericWriteLogEntry(nullptr, image_offset_bytes, write_bytes), m_entry_bl_lock(ceph::make_mutex(pwl::unique_lock_name( "librbd::cache::pwl::WriteLogEntry::m_entry_bl_lock", this))) { } WriteLogEntry(std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) : WriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes) { ram_entry.set_writesame(true); ram_entry.ws_datalen = data_length; is_writesame = true; }; WriteLogEntry(uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) : WriteLogEntry(nullptr, image_offset_bytes, write_bytes) { ram_entry.set_writesame(true); ram_entry.ws_datalen = data_length; is_writesame = true; }; ~WriteLogEntry() override {}; WriteLogEntry(const WriteLogEntry&) = delete; WriteLogEntry &operator=(const WriteLogEntry&) = delete; unsigned int write_bytes() const override { // The valid bytes in this ops data buffer. if(is_writesame) { return ram_entry.ws_datalen; } return ram_entry.write_bytes; }; unsigned int bytes_dirty() const override { // The bytes in the image this op makes dirty. return ram_entry.write_bytes; }; void init(bool has_data, uint64_t current_sync_gen, uint64_t last_op_sequence_num, bool persist_on_flush); virtual void init_cache_buffer(std::vector<WriteBufferAllocation>::iterator allocation) {} virtual void init_cache_bl(bufferlist &src_bl, uint64_t off, uint64_t len) {} /* Returns a ref to a bl containing bufferptrs to the entry cache buffer */ virtual buffer::list &get_cache_bl() = 0; BlockExtent block_extent(); virtual unsigned int reader_count() const = 0; /* Constructs a new bl containing copies of cache_bp */ bool can_retire() const override { return (this->completed && this->get_flushed() && (0 == reader_count())); } bool is_write_entry() const override { return true; } bool is_writesame_entry() const override { return is_writesame; } std::ostream &format(std::ostream &os) const; friend std::ostream &operator<<(std::ostream &os, const WriteLogEntry &entry); }; class DiscardLogEntry : public GenericWriteLogEntry { public: DiscardLogEntry(std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t discard_granularity_bytes) : GenericWriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes), m_discard_granularity_bytes(discard_granularity_bytes) { ram_entry.set_discard(true); }; DiscardLogEntry(uint64_t image_offset_bytes, uint64_t write_bytes) : GenericWriteLogEntry(nullptr, image_offset_bytes, write_bytes) { ram_entry.set_discard(true); }; DiscardLogEntry(const DiscardLogEntry&) = delete; DiscardLogEntry &operator=(const DiscardLogEntry&) = delete; unsigned int write_bytes() const override { /* The valid bytes in this ops data buffer. */ return 0; }; unsigned int bytes_dirty() const override { /* The bytes in the image this op makes dirty. */ return ram_entry.write_bytes; }; bool can_retire() const override { return this->completed; } void copy_cache_bl(bufferlist *out_bl) override { ceph_assert(false); } void writeback(librbd::cache::ImageWritebackInterface &image_writeback, Context *ctx) override; void init(uint64_t current_sync_gen, bool persist_on_flush, uint64_t last_op_sequence_num); std::ostream &format(std::ostream &os) const; friend std::ostream &operator<<(std::ostream &os, const DiscardLogEntry &entry); private: uint32_t m_discard_granularity_bytes; }; } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_PWL_LOG_ENTRY_H
10,315
35.711744
106
h
null
ceph-main/src/librbd/cache/pwl/LogMap.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_RWL_LOG_MAP_H #define CEPH_LIBRBD_CACHE_RWL_LOG_MAP_H #include "librbd/BlockGuard.h" #include <list> namespace librbd { namespace cache { namespace pwl { /** * WriteLogMap: maps block extents to GenericWriteLogEntries * * A WriteLogMapEntry (based on LogMapEntry) refers to a portion of a GenericWriteLogEntry */ template <typename T> class LogMapEntry { public: BlockExtent block_extent; std::shared_ptr<T> log_entry; LogMapEntry(BlockExtent block_extent, std::shared_ptr<T> log_entry = nullptr); LogMapEntry(std::shared_ptr<T> log_entry); template <typename U> friend std::ostream &operator<<(std::ostream &os, LogMapEntry<U> &e); }; template <typename T> using LogMapEntries = std::list<LogMapEntry<T>>; template <typename T> class LogMap { public: LogMap(CephContext *cct); LogMap(const LogMap&) = delete; LogMap &operator=(const LogMap&) = delete; void add_log_entry(std::shared_ptr<T> log_entry); void add_log_entries(std::list<std::shared_ptr<T>> &log_entries); void remove_log_entry(std::shared_ptr<T> log_entry); void remove_log_entries(std::list<std::shared_ptr<T>> &log_entries); std::list<std::shared_ptr<T>> find_log_entries(BlockExtent block_extent); LogMapEntries<T> find_map_entries(BlockExtent block_extent); private: void add_log_entry_locked(std::shared_ptr<T> log_entry); void remove_log_entry_locked(std::shared_ptr<T> log_entry); void add_map_entry_locked(LogMapEntry<T> &map_entry); void remove_map_entry_locked(LogMapEntry<T> &map_entry); void adjust_map_entry_locked(LogMapEntry<T> &map_entry, BlockExtent &new_extent); void split_map_entry_locked(LogMapEntry<T> &map_entry, BlockExtent &removed_extent); std::list<std::shared_ptr<T>> find_log_entries_locked(const BlockExtent &block_extent); LogMapEntries<T> find_map_entries_locked(const BlockExtent &block_extent); using LogMapEntryT = LogMapEntry<T>; class LogMapEntryCompare { public: bool operator()(const LogMapEntryT &lhs, const LogMapEntryT &rhs) const; }; using BlockExtentToLogMapEntries = std::set<LogMapEntryT, LogMapEntryCompare>; CephContext *m_cct; ceph::mutex m_lock; BlockExtentToLogMapEntries m_block_to_log_entry_map; }; } //namespace pwl } //namespace cache } //namespace librbd #endif //CEPH_LIBRBD_CACHE_RWL_LOG_MAP_H
2,540
29.987805
90
h
null
ceph-main/src/librbd/cache/pwl/LogOperation.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_RWL_LOG_OPERATION_H #define CEPH_LIBRBD_CACHE_RWL_LOG_OPERATION_H #include "include/utime.h" #include "librbd/cache/pwl/LogEntry.h" #include "librbd/cache/pwl/SyncPoint.h" namespace librbd { namespace cache { namespace pwl { struct WriteBufferAllocation; class WriteLogOperationSet; class WriteLogOperation; class GenericWriteLogOperation; class SyncPointLogOperation; class GenericLogOperation; template <typename T> class AbstractWriteLog; using GenericLogOperationSharedPtr = std::shared_ptr<GenericLogOperation>; using GenericLogOperationsVector = std::vector<GenericLogOperationSharedPtr>; class GenericLogOperation { protected: PerfCounters *m_perfcounter = nullptr; public: utime_t dispatch_time; // When op created utime_t buf_persist_start_time; // When buffer persist begins utime_t buf_persist_comp_time; // When buffer persist completes utime_t log_append_start_time; // When log append begins utime_t log_append_comp_time; // When log append completes GenericLogOperation(utime_t dispatch_time, PerfCounters *perfcounter); virtual ~GenericLogOperation() { }; GenericLogOperation(const GenericLogOperation&) = delete; GenericLogOperation &operator=(const GenericLogOperation&) = delete; virtual std::ostream &format(std::ostream &os) const; friend std::ostream &operator<<(std::ostream &os, const GenericLogOperation &op); virtual const std::shared_ptr<GenericLogEntry> get_log_entry() = 0; virtual void appending() = 0; virtual void complete(int r) = 0; virtual void mark_log_entry_completed() {}; virtual bool reserved_allocated() const { return false; } virtual bool is_writing_op() const { return false; } virtual void init_op(uint64_t current_sync_gen, bool persist_on_flush, uint64_t last_op_sequence_num, Context *write_persist, Context *write_append) {}; virtual void copy_bl_to_cache_buffer( std::vector<WriteBufferAllocation>::iterator allocation) {}; }; class SyncPointLogOperation : public GenericLogOperation { private: CephContext *m_cct; ceph::mutex &m_lock; std::vector<Context*> append_sync_point(); void clear_earlier_sync_point(); std::vector<Context*> swap_on_sync_point_persisted(); public: std::shared_ptr<SyncPoint> sync_point; SyncPointLogOperation(ceph::mutex &lock, std::shared_ptr<SyncPoint> sync_point, utime_t dispatch_time, PerfCounters *perfcounter, CephContext *cct); ~SyncPointLogOperation() override; SyncPointLogOperation(const SyncPointLogOperation&) = delete; SyncPointLogOperation &operator=(const SyncPointLogOperation&) = delete; std::ostream &format(std::ostream &os) const; friend std::ostream &operator<<(std::ostream &os, const SyncPointLogOperation &op); const std::shared_ptr<GenericLogEntry> get_log_entry() override { return sync_point->log_entry; } void appending() override; void complete(int r) override; }; class GenericWriteLogOperation : public GenericLogOperation { protected: ceph::mutex m_lock; CephContext *m_cct; public: std::shared_ptr<SyncPoint> sync_point; Context *on_write_append = nullptr; /* Completion for things waiting on this * write's position in the log to be * guaranteed */ Context *on_write_persist = nullptr; /* Completion for things waiting on this * write to persist */ GenericWriteLogOperation(std::shared_ptr<SyncPoint> sync_point, utime_t dispatch_time, PerfCounters *perfcounter, CephContext *cct); ~GenericWriteLogOperation() override; GenericWriteLogOperation(const GenericWriteLogOperation&) = delete; GenericWriteLogOperation &operator=(const GenericWriteLogOperation&) = delete; std::ostream &format(std::ostream &os) const; friend std::ostream &operator<<(std::ostream &os, const GenericWriteLogOperation &op); void mark_log_entry_completed() override{ sync_point->log_entry->writes_completed++; } bool reserved_allocated() const override { return true; } bool is_writing_op() const override { return true; } void appending() override; void complete(int r) override; }; class WriteLogOperation : public GenericWriteLogOperation { public: using GenericWriteLogOperation::m_lock; using GenericWriteLogOperation::sync_point; using GenericWriteLogOperation::on_write_append; using GenericWriteLogOperation::on_write_persist; std::shared_ptr<WriteLogEntry> log_entry; bufferlist bl; bool is_writesame = false; WriteBufferAllocation *buffer_alloc = nullptr; WriteLogOperation(WriteLogOperationSet &set, uint64_t image_offset_bytes, uint64_t write_bytes, CephContext *cct, std::shared_ptr<WriteLogEntry> write_log_entry); WriteLogOperation(WriteLogOperationSet &set, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_len, CephContext *cct, std::shared_ptr<WriteLogEntry> writesame_log_entry); ~WriteLogOperation() override; WriteLogOperation(const WriteLogOperation&) = delete; WriteLogOperation &operator=(const WriteLogOperation&) = delete; void init(bool has_data, std::vector<WriteBufferAllocation>::iterator allocation, uint64_t current_sync_gen, uint64_t last_op_sequence_num, bufferlist &write_req_bl, uint64_t buffer_offset, bool persist_on_flush); std::ostream &format(std::ostream &os) const; friend std::ostream &operator<<(std::ostream &os, const WriteLogOperation &op); const std::shared_ptr<GenericLogEntry> get_log_entry() override { return log_entry; } void complete(int r) override; }; class WriteLogOperationSet { private: CephContext *m_cct; Context *m_on_finish; public: bool persist_on_flush; BlockGuardCell *cell; C_Gather *extent_ops_appending; Context *on_ops_appending; C_Gather *extent_ops_persist; Context *on_ops_persist; GenericLogOperationsVector operations; utime_t dispatch_time; /* When set created */ PerfCounters *perfcounter = nullptr; std::shared_ptr<SyncPoint> sync_point; WriteLogOperationSet(utime_t dispatched, PerfCounters *perfcounter, std::shared_ptr<SyncPoint> sync_point, const bool persist_on_flush, CephContext *cct, Context *on_finish); ~WriteLogOperationSet(); WriteLogOperationSet(const WriteLogOperationSet&) = delete; WriteLogOperationSet &operator=(const WriteLogOperationSet&) = delete; friend std::ostream &operator<<(std::ostream &os, const WriteLogOperationSet &s); }; class DiscardLogOperation : public GenericWriteLogOperation { public: using GenericWriteLogOperation::m_lock; using GenericWriteLogOperation::sync_point; using GenericWriteLogOperation::on_write_append; using GenericWriteLogOperation::on_write_persist; std::shared_ptr<DiscardLogEntry> log_entry; DiscardLogOperation(std::shared_ptr<SyncPoint> sync_point, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t discard_granularity_bytes, utime_t dispatch_time, PerfCounters *perfcounter, CephContext *cct); ~DiscardLogOperation() override; DiscardLogOperation(const DiscardLogOperation&) = delete; DiscardLogOperation &operator=(const DiscardLogOperation&) = delete; const std::shared_ptr<GenericLogEntry> get_log_entry() override { return log_entry; } bool reserved_allocated() const override { return false; } std::ostream &format(std::ostream &os) const; friend std::ostream &operator<<(std::ostream &os, const DiscardLogOperation &op); }; } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_RWL_LOG_OPERATION_H
8,440
36.515556
80
h
null
ceph-main/src/librbd/cache/pwl/ReadRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_RWL_READ_REQUEST_H #define CEPH_LIBRBD_CACHE_RWL_READ_REQUEST_H #include "include/Context.h" #include "librbd/cache/pwl/Types.h" namespace librbd { namespace cache { namespace pwl { typedef std::vector<std::shared_ptr<pwl::ImageExtentBuf>> ImageExtentBufs; class C_ReadRequest : public Context { public: io::Extents miss_extents; // move back to caller ImageExtentBufs read_extents; bufferlist miss_bl; C_ReadRequest( CephContext *cct, utime_t arrived, PerfCounters *perfcounter, bufferlist *out_bl, Context *on_finish) : m_cct(cct), m_on_finish(on_finish), m_out_bl(out_bl), m_arrived_time(arrived), m_perfcounter(perfcounter) {} ~C_ReadRequest() {} const char *get_name() const { return "C_ReadRequest"; } protected: CephContext *m_cct; Context *m_on_finish; bufferlist *m_out_bl; utime_t m_arrived_time; PerfCounters *m_perfcounter; }; } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_RWL_READ_REQUEST_H
1,132
23.630435
74
h
null
ceph-main/src/librbd/cache/pwl/Request.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PWL_REQUEST_H #define CEPH_LIBRBD_CACHE_PWL_REQUEST_H #include "include/Context.h" #include "librbd/cache/pwl/Types.h" #include "librbd/cache/pwl/LogOperation.h" namespace librbd { class BlockGuardCell; namespace cache { namespace pwl { class GuardedRequestFunctionContext; struct WriteRequestResources { bool allocated = false; std::vector<WriteBufferAllocation> buffers; }; /** * A request that can be deferred in a BlockGuard to sequence * overlapping operations. * This is the custodian of the BlockGuard cell for this IO, and the * state information about the progress of this IO. This object lives * until the IO is persisted in all (live) log replicas. User request * may be completed from here before the IO persists. */ template <typename T> class C_BlockIORequest : public Context { public: T &pwl; io::Extents image_extents; bufferlist bl; int fadvise_flags; Context *user_req; /* User write request */ ExtentsSummary<io::Extents> image_extents_summary; bool detained = false; /* Detained in blockguard (overlapped with a prior IO) */ utime_t allocated_time; /* When allocation began */ C_BlockIORequest(T &pwl, const utime_t arrived, io::Extents &&extents, bufferlist&& bl, const int fadvise_flags, Context *user_req); ~C_BlockIORequest() override; C_BlockIORequest(const C_BlockIORequest&) = delete; C_BlockIORequest &operator=(const C_BlockIORequest&) = delete; void set_cell(BlockGuardCell *cell); BlockGuardCell *get_cell(void); void release_cell(); void complete_user_request(int r); void finish(int r); virtual void finish_req(int r) = 0; virtual bool alloc_resources() = 0; void deferred(); virtual void deferred_handler() = 0; virtual void dispatch() = 0; virtual void copy_cache() {}; virtual const char *get_name() const { return "C_BlockIORequest"; } uint64_t get_image_extents_size() { return image_extents.size(); } std::vector<WriteBufferAllocation>& get_resources_buffers() { return m_resources.buffers; } void set_allocated(bool allocated) { if (allocated) { m_resources.allocated = true; } else { m_resources.buffers.clear(); } } virtual void setup_buffer_resources( uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated, uint64_t *number_lanes, uint64_t *number_log_entries, uint64_t *number_unpublished_reserves) = 0; protected: utime_t m_arrived_time; utime_t m_dispatched_time; /* When dispatch began */ utime_t m_user_req_completed_time; std::atomic<bool> m_deferred = {false}; /* Deferred because this or a prior IO had to wait for write resources */ WriteRequestResources m_resources; private: std::atomic<bool> m_user_req_completed = {false}; std::atomic<bool> m_finish_called = {false}; std::atomic<bool> m_cell_released = {false}; BlockGuardCell* m_cell = nullptr; template <typename U> friend std::ostream &operator<<(std::ostream &os, const C_BlockIORequest<U> &req); }; /** * This is the custodian of the BlockGuard cell for this write. Block * guard is not released until the write persists everywhere (this is * how we guarantee to each log replica that they will never see * overlapping writes). */ template <typename T> class C_WriteRequest : public C_BlockIORequest<T> { public: using C_BlockIORequest<T>::pwl; bool compare_succeeded = false; uint64_t *mismatch_offset; bufferlist cmp_bl; bufferlist read_bl; bool is_comp_and_write = false; std::unique_ptr<WriteLogOperationSet> op_set = nullptr; C_WriteRequest(T &pwl, const utime_t arrived, io::Extents &&image_extents, bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req); C_WriteRequest(T &pwl, const utime_t arrived, io::Extents &&image_extents, bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset, int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req); ~C_WriteRequest() override; void blockguard_acquired(GuardedRequestFunctionContext &guard_ctx); /* Common finish to plain write and compare-and-write (if it writes) */ void finish_req(int r) override; /* Compare and write will override this */ virtual void update_req_stats(utime_t &now); bool alloc_resources() override; void deferred_handler() override { } void dispatch() override; void copy_cache() override; virtual std::shared_ptr<WriteLogOperation> create_operation(uint64_t offset, uint64_t len); virtual void setup_log_operations(DeferredContexts &on_exit); bool append_write_request(std::shared_ptr<SyncPoint> sync_point); virtual void schedule_append(); const char *get_name() const override { return "C_WriteRequest"; } protected: using C_BlockIORequest<T>::m_resources; PerfCounters *m_perfcounter = nullptr; private: bool m_do_early_flush = false; std::atomic<int> m_appended = {0}; bool m_queued = false; ceph::mutex &m_lock; template <typename U> friend std::ostream &operator<<(std::ostream &os, const C_WriteRequest<U> &req); }; /** * This is the custodian of the BlockGuard cell for this * aio_flush. Block guard is released as soon as the new * sync point (if required) is created. Subsequent IOs can * proceed while this flush waits for prior IOs to complete * and any required sync points to be persisted. */ template <typename T> class C_FlushRequest : public C_BlockIORequest<T> { public: using C_BlockIORequest<T>::pwl; bool internal = false; std::shared_ptr<SyncPoint> to_append; C_FlushRequest(T &pwl, const utime_t arrived, io::Extents &&image_extents, bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req); ~C_FlushRequest() override {} bool alloc_resources() override; void dispatch() override; const char *get_name() const override { return "C_FlushRequest"; } void setup_buffer_resources( uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated, uint64_t *number_lanes, uint64_t *number_log_entries, uint64_t *number_unpublished_reserves) override; private: std::shared_ptr<SyncPointLogOperation> op; ceph::mutex &m_lock; PerfCounters *m_perfcounter = nullptr; void finish_req(int r) override; void deferred_handler() override { m_perfcounter->inc(l_librbd_pwl_aio_flush_def, 1); } template <typename U> friend std::ostream &operator<<(std::ostream &os, const C_FlushRequest<U> &req); }; /** * This is the custodian of the BlockGuard cell for this discard. As in the * case of write, the block guard is not released until the discard persists * everywhere. */ template <typename T> class C_DiscardRequest : public C_BlockIORequest<T> { public: using C_BlockIORequest<T>::pwl; std::shared_ptr<DiscardLogOperation> op; C_DiscardRequest(T &pwl, const utime_t arrived, io::Extents &&image_extents, uint32_t discard_granularity_bytes, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req); ~C_DiscardRequest() override; void finish_req(int r) override {} bool alloc_resources() override; void deferred_handler() override { } void setup_log_operations(); void dispatch() override; void blockguard_acquired(GuardedRequestFunctionContext &guard_ctx); const char *get_name() const override { return "C_DiscardRequest"; } void setup_buffer_resources( uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated, uint64_t *number_lanes, uint64_t *number_log_entries, uint64_t *number_unpublished_reserves) override; private: uint32_t m_discard_granularity_bytes; ceph::mutex &m_lock; PerfCounters *m_perfcounter = nullptr; template <typename U> friend std::ostream &operator<<(std::ostream &os, const C_DiscardRequest<U> &req); }; /** * This is the custodian of the BlockGuard cell for this write same. * * A writesame allocates and persists a data buffer like a write, but the * data buffer is usually much shorter than the write same. */ template <typename T> class C_WriteSameRequest : public C_WriteRequest<T> { public: using C_BlockIORequest<T>::pwl; C_WriteSameRequest(T &pwl, const utime_t arrived, io::Extents &&image_extents, bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req); ~C_WriteSameRequest() override; void update_req_stats(utime_t &now) override; std::shared_ptr<WriteLogOperation> create_operation(uint64_t offset, uint64_t len) override; const char *get_name() const override { return "C_WriteSameRequest"; } template<typename U> friend std::ostream &operator<<(std::ostream &os, const C_WriteSameRequest<U> &req); }; struct BlockGuardReqState { bool barrier = false; /* This is a barrier request */ bool current_barrier = false; /* This is the currently active barrier */ bool detained = false; bool queued = false; /* Queued for barrier */ friend std::ostream &operator<<(std::ostream &os, const BlockGuardReqState &r) { os << "barrier=" << r.barrier << ", current_barrier=" << r.current_barrier << ", detained=" << r.detained << ", queued=" << r.queued; return os; } }; class GuardedRequestFunctionContext : public Context { public: BlockGuardCell *cell = nullptr; BlockGuardReqState state; GuardedRequestFunctionContext(boost::function<void(GuardedRequestFunctionContext&)> &&callback) : m_callback(std::move(callback)){ } ~GuardedRequestFunctionContext(void) override { }; GuardedRequestFunctionContext(const GuardedRequestFunctionContext&) = delete; GuardedRequestFunctionContext &operator=(const GuardedRequestFunctionContext&) = delete; private: boost::function<void(GuardedRequestFunctionContext&)> m_callback; void finish(int r) override { ceph_assert(cell); m_callback(*this); } }; class GuardedRequest { public: const BlockExtent block_extent; GuardedRequestFunctionContext *guard_ctx; /* Work to do when guard on range obtained */ GuardedRequest(const BlockExtent block_extent, GuardedRequestFunctionContext *on_guard_acquire, bool barrier = false) : block_extent(block_extent), guard_ctx(on_guard_acquire) { guard_ctx->state.barrier = barrier; } friend std::ostream &operator<<(std::ostream &os, const GuardedRequest &r) { os << "guard_ctx->state=[" << r.guard_ctx->state << "], block_extent.block_start=" << r.block_extent.block_start << ", block_extent.block_end=" << r.block_extent.block_end; return os; } }; } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_PWL_REQUEST_H
11,409
30.519337
115
h
null
ceph-main/src/librbd/cache/pwl/SyncPoint.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_RWL_SYNC_POINT_H #define CEPH_LIBRBD_CACHE_RWL_SYNC_POINT_H #include "librbd/ImageCtx.h" #include "librbd/cache/pwl/LogEntry.h" #include "librbd/cache/pwl/Types.h" namespace librbd { namespace cache { namespace pwl { class SyncPoint: public std::enable_shared_from_this<SyncPoint> { public: std::shared_ptr<SyncPointLogEntry> log_entry; /* Use lock for earlier/later links */ std::shared_ptr<SyncPoint> earlier_sync_point; /* NULL if earlier has completed */ std::shared_ptr<SyncPoint> later_sync_point; bool appending = false; /* Signal these when this sync point is appending to the log, and its order * of appearance is guaranteed. One of these is is a sub-operation of the * next sync point's m_prior_log_entries_persisted Gather. */ std::vector<Context*> on_sync_point_appending; /* Signal these when this sync point is appended and persisted. User * aio_flush() calls are added to this. */ std::vector<Context*> on_sync_point_persisted; SyncPoint(uint64_t sync_gen_num, CephContext *cct); ~SyncPoint(); SyncPoint(const SyncPoint&) = delete; SyncPoint &operator=(const SyncPoint&) = delete; void persist_gather_activate(); Context* persist_gather_new_sub(); void persist_gather_set_finisher(Context *ctx); void prior_persisted_gather_activate(); Context* prior_persisted_gather_new_sub(); void prior_persisted_gather_set_finisher(); void add_in_on_persisted_ctxs(Context* cxt); void add_in_on_appending_ctxs(Context* cxt); void setup_earlier_sync_point(std::shared_ptr<SyncPoint> sync_point, uint64_t last_op_sequence_num); private: CephContext *m_cct; bool m_append_scheduled = false; uint64_t m_final_op_sequence_num = 0; /* A sync point can't appear in the log until all the writes bearing * it and all the prior sync points have been appended and * persisted. * * Writes bearing this sync gen number and the prior sync point will be * sub-ops of this Gather. This sync point will not be appended until all * these complete to the point where their persist order is guaranteed. */ C_Gather *m_prior_log_entries_persisted; /* The finisher for this will append the sync point to the log. The finisher * for m_prior_log_entries_persisted will be a sub-op of this. */ C_Gather *m_sync_point_persist; int m_prior_log_entries_persisted_result = 0; int m_prior_log_entries_persisted_complete = false; friend std::ostream &operator<<(std::ostream &os, const SyncPoint &p); }; } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_RWL_SYNC_POINT_H
2,776
38.671429
84
h
null
ceph-main/src/librbd/cache/pwl/Types.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PWL_TYPES_H #define CEPH_LIBRBD_CACHE_PWL_TYPES_H #include "acconfig.h" #ifdef WITH_RBD_RWL #include "libpmemobj.h" #endif #include <vector> #include "librbd/BlockGuard.h" #include "librbd/io/Types.h" namespace ceph { class Formatter; } class Context; enum { l_librbd_pwl_first = 26500, // All read requests l_librbd_pwl_rd_req, // read requests l_librbd_pwl_rd_bytes, // bytes read l_librbd_pwl_rd_latency, // average req completion latency // Read requests completed from RWL (no misses) l_librbd_pwl_rd_hit_req, // read requests l_librbd_pwl_rd_hit_bytes, // bytes read l_librbd_pwl_rd_hit_latency, // average req completion latency // Reed requests with hit and miss extents l_librbd_pwl_rd_part_hit_req, // read ops // Per SyncPoint's LogEntry number and write bytes distribution l_librbd_pwl_syncpoint_hist, // All write requests l_librbd_pwl_wr_req, // write requests l_librbd_pwl_wr_bytes, // bytes written l_librbd_pwl_wr_req_def, // write requests deferred for resources l_librbd_pwl_wr_req_def_lanes, // write requests deferred for lanes l_librbd_pwl_wr_req_def_log, // write requests deferred for log entries l_librbd_pwl_wr_req_def_buf, // write requests deferred for buffer space l_librbd_pwl_wr_req_overlap, // write requests detained for overlap l_librbd_pwl_wr_req_queued, // write requests queued for prior barrier // Write log operations (1 .. n per request that appends to the log) l_librbd_pwl_log_ops, // log append ops l_librbd_pwl_log_op_bytes, // average bytes written per log op /* Req and op average latencies to the beginning of and over various phases: +------------------------------+------+-------------------------------+ | Phase | Name | Description | +------------------------------+------+-------------------------------+ | Arrive at RWL | arr |Arrives as a request | +------------------------------+------+-------------------------------+ | Allocate resources | all |time spent in block guard for | | | |overlap sequencing occurs | | | |before this point | +------------------------------+------+-------------------------------+ | Dispatch | dis |Op lifetime begins here. time | | | |spent in allocation waiting for| | | |resources occurs before this | | | |point | +------------------------------+------+-------------------------------+ | Payload buffer persist and | buf |time spent queued for | |replicate | |replication occurs before here | +------------------------------+------+-------------------------------+ | Payload buffer persist | bufc |bufc - buf is just the persist | |complete | |time | +------------------------------+------+-------------------------------+ | Log append | app |time spent queued for append | | | |occurs before here | +------------------------------+------+-------------------------------+ | Append complete | appc |appc - app is just the time | | | |spent in the append operation | +------------------------------+------+-------------------------------+ | Complete | cmp |write persisted, replicated, | | | |and globally visible | +------------------------------+------+-------------------------------+ */ /* Request times */ l_librbd_pwl_req_arr_to_all_t, // arrival to allocation elapsed time - same as time deferred in block guard l_librbd_pwl_req_arr_to_dis_t, // arrival to dispatch elapsed time l_librbd_pwl_req_all_to_dis_t, // Time spent allocating or waiting to allocate resources l_librbd_pwl_wr_latency, // average req (persist) completion latency l_librbd_pwl_wr_latency_hist, // Histogram of write req (persist) completion latency vs. bytes written l_librbd_pwl_wr_caller_latency, // average req completion (to caller) latency /* Request times for requests that never waited for space*/ l_librbd_pwl_nowait_req_arr_to_all_t, // arrival to allocation elapsed time - same as time deferred in block guard l_librbd_pwl_nowait_req_arr_to_dis_t, // arrival to dispatch elapsed time l_librbd_pwl_nowait_req_all_to_dis_t, // Time spent allocating or waiting to allocate resources l_librbd_pwl_nowait_wr_latency, // average req (persist) completion latency l_librbd_pwl_nowait_wr_latency_hist, // Histogram of write req (persist) completion latency vs. bytes written l_librbd_pwl_nowait_wr_caller_latency, // average req completion (to caller) latency /* Log operation times */ l_librbd_pwl_log_op_alloc_t, // elapsed time of pmemobj_reserve() l_librbd_pwl_log_op_alloc_t_hist, // Histogram of elapsed time of pmemobj_reserve() l_librbd_pwl_log_op_dis_to_buf_t, // dispatch to buffer persist elapsed time l_librbd_pwl_log_op_dis_to_app_t, // dispatch to log append elapsed time l_librbd_pwl_log_op_dis_to_cmp_t, // dispatch to persist completion elapsed time l_librbd_pwl_log_op_dis_to_cmp_t_hist, // Histogram of dispatch to persist completion elapsed time l_librbd_pwl_log_op_buf_to_app_t, // data buf persist + append wait time l_librbd_pwl_log_op_buf_to_bufc_t,// data buf persist / replicate elapsed time l_librbd_pwl_log_op_buf_to_bufc_t_hist,// data buf persist time vs bytes histogram l_librbd_pwl_log_op_app_to_cmp_t, // log entry append + completion wait time l_librbd_pwl_log_op_app_to_appc_t, // log entry append / replicate elapsed time l_librbd_pwl_log_op_app_to_appc_t_hist, // log entry append time (vs. op bytes) histogram l_librbd_pwl_discard, l_librbd_pwl_discard_bytes, l_librbd_pwl_discard_latency, l_librbd_pwl_aio_flush, l_librbd_pwl_aio_flush_def, l_librbd_pwl_aio_flush_latency, l_librbd_pwl_ws, l_librbd_pwl_ws_bytes, // Bytes modified by write same, probably much larger than WS payload bytes l_librbd_pwl_ws_latency, l_librbd_pwl_cmp, l_librbd_pwl_cmp_bytes, l_librbd_pwl_cmp_latency, l_librbd_pwl_cmp_fails, l_librbd_pwl_internal_flush, l_librbd_pwl_writeback_latency, l_librbd_pwl_invalidate_cache, l_librbd_pwl_invalidate_discard_cache, l_librbd_pwl_append_tx_t, l_librbd_pwl_retire_tx_t, l_librbd_pwl_append_tx_t_hist, l_librbd_pwl_retire_tx_t_hist, l_librbd_pwl_last, }; enum { WRITE_LOG_CACHE_ENTRY_VALID = 1U << 0, /* if 0, this entry is free */ WRITE_LOG_CACHE_ENTRY_SYNC_POINT = 1U << 1, /* No data. No write sequence number. Marks sync point for this sync gen number */ WRITE_LOG_CACHE_ENTRY_SEQUENCED = 1U << 2, /* write sequence number is valid */ WRITE_LOG_CACHE_ENTRY_HAS_DATA = 1U << 3, /* write_data field is valid (else ignore) */ WRITE_LOG_CACHE_ENTRY_DISCARD = 1U << 4, /* has_data will be 0 if this is a discard */ WRITE_LOG_CACHE_ENTRY_WRITESAME = 1U << 5, /* ws_datalen indicates length of data at write_bytes */ }; namespace librbd { namespace cache { namespace pwl { class ImageExtentBuf; const int IN_FLIGHT_FLUSH_WRITE_LIMIT = 64; const int IN_FLIGHT_FLUSH_BYTES_LIMIT = (1 * 1024 * 1024); /* Limit work between sync points */ const uint64_t MAX_WRITES_PER_SYNC_POINT = 256; const uint64_t MAX_BYTES_PER_SYNC_POINT = (1024 * 1024 * 8); const uint32_t MIN_WRITE_ALLOC_SIZE = 512; const uint32_t MIN_WRITE_ALLOC_SSD_SIZE = 4096; const uint32_t LOG_STATS_INTERVAL_SECONDS = 5; /**** Write log entries ****/ const unsigned long int MAX_ALLOC_PER_TRANSACTION = 8; const unsigned long int MAX_FREE_PER_TRANSACTION = 1; const unsigned int MAX_CONCURRENT_WRITES = (1024 * 1024); const uint64_t DEFAULT_POOL_SIZE = 1u<<30; const uint64_t MIN_POOL_SIZE = DEFAULT_POOL_SIZE; const uint64_t POOL_SIZE_ALIGN = 1 << 20; constexpr double USABLE_SIZE = (7.0 / 10); const uint64_t BLOCK_ALLOC_OVERHEAD_BYTES = 16; const uint8_t RWL_LAYOUT_VERSION = 1; const uint8_t SSD_LAYOUT_VERSION = 1; const uint64_t MAX_LOG_ENTRIES = (1024 * 1024); const double AGGRESSIVE_RETIRE_HIGH_WATER = 0.75; const double RETIRE_HIGH_WATER = 0.50; const double RETIRE_LOW_WATER = 0.40; const int RETIRE_BATCH_TIME_LIMIT_MS = 250; const uint64_t CONTROL_BLOCK_MAX_LOG_ENTRIES = 32; const uint64_t SPAN_MAX_DATA_LEN = (16 * 1024 * 1024); /* offset of ring on SSD */ const uint64_t DATA_RING_BUFFER_OFFSET = 8192; /* Defer a set of Contexts until destruct/exit. Used for deferring * work on a given thread until a required lock is dropped. */ class DeferredContexts { private: std::vector<Context*> contexts; public: ~DeferredContexts(); void add(Context* ctx); }; /* Pmem structures */ #ifdef WITH_RBD_RWL POBJ_LAYOUT_BEGIN(rbd_pwl); POBJ_LAYOUT_ROOT(rbd_pwl, struct WriteLogPoolRoot); POBJ_LAYOUT_TOID(rbd_pwl, uint8_t); POBJ_LAYOUT_TOID(rbd_pwl, struct WriteLogCacheEntry); POBJ_LAYOUT_END(rbd_pwl); #endif struct WriteLogCacheEntry { uint64_t sync_gen_number = 0; uint64_t write_sequence_number = 0; uint64_t image_offset_bytes; uint64_t write_bytes; #ifdef WITH_RBD_RWL TOID(uint8_t) write_data; #endif #ifdef WITH_RBD_SSD_CACHE uint64_t write_data_pos = 0; /* SSD data offset */ #endif uint8_t flags = 0; uint32_t ws_datalen = 0; /* Length of data buffer (writesame only) */ uint32_t entry_index = 0; /* For debug consistency check. Can be removed if * we need the space */ WriteLogCacheEntry(uint64_t image_offset_bytes=0, uint64_t write_bytes=0) : image_offset_bytes(image_offset_bytes), write_bytes(write_bytes) {} BlockExtent block_extent(); uint64_t get_offset_bytes(); uint64_t get_write_bytes(); bool is_entry_valid() const { return flags & WRITE_LOG_CACHE_ENTRY_VALID; } bool is_sync_point() const { return flags & WRITE_LOG_CACHE_ENTRY_SYNC_POINT; } bool is_sequenced() const { return flags & WRITE_LOG_CACHE_ENTRY_SEQUENCED; } bool has_data() const { return flags & WRITE_LOG_CACHE_ENTRY_HAS_DATA; } bool is_discard() const { return flags & WRITE_LOG_CACHE_ENTRY_DISCARD; } bool is_writesame() const { return flags & WRITE_LOG_CACHE_ENTRY_WRITESAME; } bool is_write() const { /* Log entry is a basic write */ return !is_sync_point() && !is_discard() && !is_writesame(); } bool is_writer() const { /* Log entry is any type that writes data */ return is_write() || is_discard() || is_writesame(); } void set_entry_valid(bool flag) { if (flag) { flags |= WRITE_LOG_CACHE_ENTRY_VALID; } else { flags &= ~WRITE_LOG_CACHE_ENTRY_VALID; } } void set_sync_point(bool flag) { if (flag) { flags |= WRITE_LOG_CACHE_ENTRY_SYNC_POINT; } else { flags &= ~WRITE_LOG_CACHE_ENTRY_SYNC_POINT; } } void set_sequenced(bool flag) { if (flag) { flags |= WRITE_LOG_CACHE_ENTRY_SEQUENCED; } else { flags &= ~WRITE_LOG_CACHE_ENTRY_SEQUENCED; } } void set_has_data(bool flag) { if (flag) { flags |= WRITE_LOG_CACHE_ENTRY_HAS_DATA; } else { flags &= ~WRITE_LOG_CACHE_ENTRY_HAS_DATA; } } void set_discard(bool flag) { if (flag) { flags |= WRITE_LOG_CACHE_ENTRY_DISCARD; } else { flags &= ~WRITE_LOG_CACHE_ENTRY_DISCARD; } } void set_writesame(bool flag) { if (flag) { flags |= WRITE_LOG_CACHE_ENTRY_WRITESAME; } else { flags &= ~WRITE_LOG_CACHE_ENTRY_WRITESAME; } } friend std::ostream& operator<<(std::ostream& os, const WriteLogCacheEntry &entry); #ifdef WITH_RBD_SSD_CACHE DENC(WriteLogCacheEntry, v, p) { DENC_START(1, 1, p); denc(v.sync_gen_number, p); denc(v.write_sequence_number, p); denc(v.image_offset_bytes, p); denc(v.write_bytes, p); denc(v.write_data_pos, p); denc(v.flags, p); denc(v.ws_datalen, p); denc(v.entry_index, p); DENC_FINISH(p); } #endif void dump(ceph::Formatter *f) const; static void generate_test_instances(std::list<WriteLogCacheEntry*>& ls); }; struct WriteLogPoolRoot { #ifdef WITH_RBD_RWL union { struct { uint8_t layout_version; }; uint64_t _u64; } header; TOID(struct WriteLogCacheEntry) log_entries; /* contiguous array of log entries */ #endif #ifdef WITH_RBD_SSD_CACHE uint64_t layout_version = 0; uint64_t cur_sync_gen = 0; /* TODO: remove it when changing disk format */ #endif uint64_t pool_size; uint64_t flushed_sync_gen; /* All writing entries with this or a lower * sync gen number are flushed. */ uint32_t block_size; uint32_t num_log_entries; uint64_t first_free_entry; /* The free entry following the latest valid * entry, which is going to be written */ uint64_t first_valid_entry; /* The oldest valid entry to be retired */ #ifdef WITH_RBD_SSD_CACHE DENC(WriteLogPoolRoot, v, p) { DENC_START(1, 1, p); denc(v.layout_version, p); denc(v.cur_sync_gen, p); denc(v.pool_size, p); denc(v.flushed_sync_gen, p); denc(v.block_size, p); denc(v.num_log_entries, p); denc(v.first_free_entry, p); denc(v.first_valid_entry, p); DENC_FINISH(p); } #endif void dump(ceph::Formatter *f) const; static void generate_test_instances(std::list<WriteLogPoolRoot*>& ls); }; struct WriteBufferAllocation { unsigned int allocation_size = 0; #ifdef WITH_RBD_RWL pobj_action buffer_alloc_action; TOID(uint8_t) buffer_oid = OID_NULL; #endif bool allocated = false; utime_t allocation_lat; }; static inline io::Extent image_extent(const BlockExtent& block_extent) { return io::Extent(block_extent.block_start, block_extent.block_end - block_extent.block_start); } template <typename ExtentsType> class ExtentsSummary { public: uint64_t total_bytes; uint64_t first_image_byte; uint64_t last_image_byte; explicit ExtentsSummary(const ExtentsType &extents); friend std::ostream &operator<<(std::ostream &os, const ExtentsSummary &s) { os << "total_bytes=" << s.total_bytes << ", first_image_byte=" << s.first_image_byte << ", last_image_byte=" << s.last_image_byte; return os; } BlockExtent block_extent() { return BlockExtent(first_image_byte, last_image_byte); } io::Extent image_extent() { return librbd::cache::pwl::image_extent(block_extent()); } }; io::Extent whole_volume_extent(); BlockExtent block_extent(const io::Extent& image_extent); Context * override_ctx(int r, Context *ctx); class ImageExtentBuf : public io::Extent { public: bufferlist m_bl; bool need_to_truncate; int truncate_offset; bool writesame; ImageExtentBuf() {} ImageExtentBuf(io::Extent extent, bool need_to_truncate = false, uint64_t truncate_offset = 0, bool writesame = false) : io::Extent(extent), need_to_truncate(need_to_truncate), truncate_offset(truncate_offset), writesame(writesame) {} ImageExtentBuf(io::Extent extent, bufferlist bl, bool need_to_truncate = false, uint64_t truncate_offset = 0, bool writesame = false) : io::Extent(extent), m_bl(bl), need_to_truncate(need_to_truncate), truncate_offset(truncate_offset), writesame(writesame) {} }; std::string unique_lock_name(const std::string &name, void *address); } // namespace pwl } // namespace cache } // namespace librbd #ifdef WITH_RBD_SSD_CACHE WRITE_CLASS_DENC(librbd::cache::pwl::WriteLogCacheEntry) WRITE_CLASS_DENC(librbd::cache::pwl::WriteLogPoolRoot) #endif #endif // CEPH_LIBRBD_CACHE_PWL_TYPES_H
16,288
35.522422
118
h
null
ceph-main/src/librbd/cache/pwl/rwl/Builder.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PWL_RWL_BUILDER_H #define CEPH_LIBRBD_CACHE_PWL_RWL_BUILDER_H #include <iostream> #include "LogEntry.h" #include "ReadRequest.h" #include "Request.h" #include "LogOperation.h" #include "librbd/cache/ImageWriteback.h" #include "librbd/cache/pwl/Builder.h" namespace librbd { namespace cache { namespace pwl { namespace rwl { template <typename T> class Builder : public pwl::Builder<T> { public: std::shared_ptr<pwl::WriteLogEntry> create_write_log_entry( uint64_t image_offset_bytes, uint64_t write_bytes) override { return std::make_shared<WriteLogEntry>(image_offset_bytes, write_bytes); } std::shared_ptr<pwl::WriteLogEntry> create_write_log_entry( std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes) override { return std::make_shared<WriteLogEntry>( sync_point_entry, image_offset_bytes, write_bytes); } std::shared_ptr<pwl::WriteLogEntry> create_writesame_log_entry( uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) override { return std::make_shared<WriteSameLogEntry>( image_offset_bytes, write_bytes, data_length); } std::shared_ptr<pwl::WriteLogEntry> create_writesame_log_entry( std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) override { return std::make_shared<WriteSameLogEntry>( sync_point_entry, image_offset_bytes, write_bytes, data_length); } pwl::C_WriteRequest<T> *create_write_request( T &pwl, utime_t arrived, io::Extents &&image_extents, bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) override { return new C_WriteRequest<T>( pwl, arrived, std::move(image_extents), std::move(bl), fadvise_flags, lock, perfcounter, user_req); } pwl::C_WriteSameRequest<T> *create_writesame_request( T &pwl, utime_t arrived, io::Extents &&image_extents, bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) override { return new C_WriteSameRequest<T>( pwl, arrived, std::move(image_extents), std::move(bl), fadvise_flags, lock, perfcounter, user_req); } pwl::C_WriteRequest<T> *create_comp_and_write_request( T &pwl, utime_t arrived, io::Extents &&image_extents, bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) override { return new rwl::C_CompAndWriteRequest<T>( pwl, arrived, std::move(image_extents), std::move(cmp_bl), std::move(bl), mismatch_offset, fadvise_flags, lock, perfcounter, user_req); } std::shared_ptr<pwl::WriteLogOperation> create_write_log_operation( WriteLogOperationSet &set, uint64_t image_offset_bytes, uint64_t write_bytes, CephContext *cct, std::shared_ptr<pwl::WriteLogEntry> write_log_entry) { return std::make_shared<WriteLogOperation>( set, image_offset_bytes, write_bytes, cct, write_log_entry); } std::shared_ptr<pwl::WriteLogOperation> create_write_log_operation( WriteLogOperationSet &set, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_len, CephContext *cct, std::shared_ptr<pwl::WriteLogEntry> writesame_log_entry) { return std::make_shared<WriteLogOperation>( set, image_offset_bytes, write_bytes, data_len, cct, writesame_log_entry); } std::shared_ptr<pwl::DiscardLogOperation> create_discard_log_operation( std::shared_ptr<SyncPoint> sync_point, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t discard_granularity_bytes, utime_t dispatch_time, PerfCounters *perfcounter, CephContext *cct) { return std::make_shared<DiscardLogOperation>( sync_point, image_offset_bytes, write_bytes, discard_granularity_bytes, dispatch_time, perfcounter, cct); } C_ReadRequest *create_read_request(CephContext *cct, utime_t arrived, PerfCounters *perfcounter, ceph::bufferlist *bl, Context *on_finish) { return new C_ReadRequest(cct, arrived, perfcounter, bl, on_finish); } }; } // namespace rwl } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_PWL_RWL_BUILDER_H
4,540
41.046296
79
h
null
ceph-main/src/librbd/cache/pwl/rwl/LogEntry.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PWL_RWL_LOG_ENTRY_H #define CEPH_LIBRBD_CACHE_PWL_RWL_LOG_ENTRY_H #include "librbd/cache/pwl/LogEntry.h" namespace librbd { namespace cache { class ImageWritebackInterface; namespace pwl { namespace rwl { class WriteLogEntry : public pwl::WriteLogEntry { public: WriteLogEntry(std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes) : pwl::WriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes) {} WriteLogEntry(uint64_t image_offset_bytes, uint64_t write_bytes) : pwl::WriteLogEntry(image_offset_bytes, write_bytes) {} WriteLogEntry(std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) : pwl::WriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes, data_length) {} WriteLogEntry(uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) : pwl::WriteLogEntry(image_offset_bytes, write_bytes, data_length) {} ~WriteLogEntry() {} WriteLogEntry(const WriteLogEntry&) = delete; WriteLogEntry &operator=(const WriteLogEntry&) = delete; void writeback(librbd::cache::ImageWritebackInterface &image_writeback, Context *ctx) override; void init_cache_bp() override; void init_bl(buffer::ptr &bp, buffer::list &bl) override; void init_cache_buffer( std::vector<WriteBufferAllocation>::iterator allocation) override; buffer::list &get_cache_bl() override; void copy_cache_bl(bufferlist *out_bl) override; unsigned int reader_count() const override; }; class WriteSameLogEntry : public WriteLogEntry { public: WriteSameLogEntry(std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) : WriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes, data_length) {} WriteSameLogEntry(uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) : WriteLogEntry(image_offset_bytes, write_bytes, data_length) {} ~WriteSameLogEntry() {} WriteSameLogEntry(const WriteSameLogEntry&) = delete; WriteSameLogEntry &operator=(const WriteSameLogEntry&) = delete; void writeback(librbd::cache::ImageWritebackInterface &image_writeback, Context *ctx) override; }; } // namespace rwl } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_PWL_RWL_LOG_ENTRY_H
2,712
38.318841
78
h
null
ceph-main/src/librbd/cache/pwl/rwl/LogOperation.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PWL_RWL_LOG_OPERATION_H #define CEPH_LIBRBD_CACHE_PWL_RWL_LOG_OPERATION_H #include "librbd/cache/pwl/LogOperation.h" namespace librbd { namespace cache { namespace pwl { namespace rwl { class WriteLogOperation : public pwl::WriteLogOperation { public: WriteLogOperation( WriteLogOperationSet &set, uint64_t image_offset_bytes, uint64_t write_bytes, CephContext *cct, std::shared_ptr<pwl::WriteLogEntry> write_log_entry) : pwl::WriteLogOperation(set, image_offset_bytes, write_bytes, cct, write_log_entry) {} WriteLogOperation( WriteLogOperationSet &set, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_len, CephContext *cct, std::shared_ptr<pwl::WriteLogEntry> writesame_log_entry) : pwl::WriteLogOperation(set, image_offset_bytes, write_bytes, cct, writesame_log_entry) {} void copy_bl_to_cache_buffer( std::vector<WriteBufferAllocation>::iterator allocation) override; }; class DiscardLogOperation : public pwl::DiscardLogOperation { public: DiscardLogOperation( std::shared_ptr<SyncPoint> sync_point, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t discard_granularity_bytes, utime_t dispatch_time, PerfCounters *perfcounter, CephContext *cct) : pwl::DiscardLogOperation(sync_point, image_offset_bytes, write_bytes, discard_granularity_bytes, dispatch_time, perfcounter, cct) {} void init_op( uint64_t current_sync_gen, bool persist_on_flush, uint64_t last_op_sequence_num, Context *write_persist, Context *write_append) override; }; } // namespace rwl } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_PWL_RWL_LOG_OPERATION_H
1,957
33.964286
76
h
null
ceph-main/src/librbd/cache/pwl/rwl/ReadRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PWL_RWL_READ_REQUEST_H #define CEPH_LIBRBD_CACHE_PWL_RWL_READ_REQUEST_H #include "librbd/cache/pwl/ReadRequest.h" namespace librbd { namespace cache { namespace pwl { namespace rwl { typedef std::vector<pwl::ImageExtentBuf> ImageExtentBufs; class C_ReadRequest : public pwl::C_ReadRequest { protected: using pwl::C_ReadRequest::m_cct; using pwl::C_ReadRequest::m_on_finish; using pwl::C_ReadRequest::m_out_bl; using pwl::C_ReadRequest::m_arrived_time; using pwl::C_ReadRequest::m_perfcounter; public: C_ReadRequest(CephContext *cct, utime_t arrived, PerfCounters *perfcounter, bufferlist *out_bl, Context *on_finish) : pwl::C_ReadRequest(cct, arrived, perfcounter, out_bl, on_finish) {} void finish(int r) override; }; } // namespace rwl } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_PWL_RWL_READ_REQUEST_H
1,000
27.6
117
h
null
ceph-main/src/librbd/cache/pwl/rwl/Request.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_RWL_REQUEST_H #define CEPH_LIBRBD_CACHE_RWL_REQUEST_H #include "librbd/cache/pwl/Request.h" namespace librbd { class BlockGuardCell; namespace cache { namespace pwl { namespace rwl { template <typename T> class C_WriteRequest : public pwl::C_WriteRequest<T> { public: C_WriteRequest( T &pwl, const utime_t arrived, io::Extents &&image_extents, bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) : pwl::C_WriteRequest<T>( pwl, arrived, std::move(image_extents), std::move(cmp_bl), std::move(bl), mismatch_offset, fadvise_flags, lock, perfcounter, user_req) {} C_WriteRequest( T &pwl, const utime_t arrived, io::Extents &&image_extents, bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) : pwl::C_WriteRequest<T>( pwl, arrived, std::move(image_extents), std::move(bl), fadvise_flags, lock, perfcounter, user_req) {} protected: //Plain writes will allocate one buffer per request extent void setup_buffer_resources( uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated, uint64_t *number_lanes, uint64_t *number_log_entries, uint64_t *number_unpublished_reserves) override; }; template <typename T> class C_CompAndWriteRequest : public C_WriteRequest<T> { public: C_CompAndWriteRequest( T &pwl, const utime_t arrived, io::Extents &&image_extents, bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) : C_WriteRequest<T>( pwl, arrived, std::move(image_extents), std::move(cmp_bl), std::move(bl), mismatch_offset, fadvise_flags, lock, perfcounter, user_req) {} const char *get_name() const override { return "C_CompAndWriteRequest"; } template <typename U> friend std::ostream &operator<<(std::ostream &os, const C_CompAndWriteRequest<U> &req); }; template <typename T> class C_WriteSameRequest : public pwl::C_WriteSameRequest<T> { public: C_WriteSameRequest( T &pwl, const utime_t arrived, io::Extents &&image_extents, bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) : pwl::C_WriteSameRequest<T>( pwl, arrived, std::move(image_extents), std::move(bl), fadvise_flags, lock, perfcounter, user_req) {} void setup_buffer_resources( uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated, uint64_t *number_lanes, uint64_t *number_log_entries, uint64_t *number_unpublished_reserves) override; }; } // namespace rwl } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_RWL_REQUEST_H
3,097
33.043956
77
h
null
ceph-main/src/librbd/cache/pwl/rwl/WriteLog.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_REPLICATED_WRITE_LOG #define CEPH_LIBRBD_CACHE_REPLICATED_WRITE_LOG #include <functional> #include <libpmemobj.h> #include <list> #include "common/Timer.h" #include "common/RWLock.h" #include "common/WorkQueue.h" #include "common/AsyncOpTracker.h" #include "librbd/cache/ImageWriteback.h" #include "librbd/Utils.h" #include "librbd/BlockGuard.h" #include "librbd/cache/Types.h" #include "librbd/cache/pwl/AbstractWriteLog.h" #include "librbd/cache/pwl/LogMap.h" #include "librbd/cache/pwl/LogOperation.h" #include "librbd/cache/pwl/Request.h" #include "librbd/cache/pwl/rwl/Builder.h" class Context; namespace librbd { struct ImageCtx; namespace cache { namespace pwl { namespace rwl { template <typename ImageCtxT> class WriteLog : public AbstractWriteLog<ImageCtxT> { public: WriteLog( ImageCtxT &image_ctx, librbd::cache::pwl::ImageCacheState<ImageCtxT>* cache_state, ImageWritebackInterface& image_writeback, plugin::Api<ImageCtxT>& plugin_api); ~WriteLog(); WriteLog(const WriteLog&) = delete; WriteLog &operator=(const WriteLog&) = delete; typedef io::Extent Extent; using This = AbstractWriteLog<ImageCtxT>; using C_WriteRequestT = pwl::C_WriteRequest<This>; using C_WriteSameRequestT = pwl::C_WriteSameRequest<This>; void copy_bl_to_buffer( WriteRequestResources *resources, std::unique_ptr<WriteLogOperationSet> &op_set) override; void complete_user_request(Context *&user_req, int r) override; private: using C_BlockIORequestT = pwl::C_BlockIORequest<This>; using C_FlushRequestT = pwl::C_FlushRequest<This>; using C_DiscardRequestT = pwl::C_DiscardRequest<This>; PMEMobjpool *m_log_pool = nullptr; Builder<This> *m_builderobj; const char* m_pwl_pool_layout_name; const uint64_t MAX_EXTENT_SIZE = 1048576; Builder<This>* create_builder(); void remove_pool_file(); void load_existing_entries(pwl::DeferredContexts &later); void alloc_op_log_entries(pwl::GenericLogOperations &ops); int append_op_log_entries(pwl::GenericLogOperations &ops); void flush_then_append_scheduled_ops(void); void enlist_op_flusher(); void flush_op_log_entries(pwl::GenericLogOperationsVector &ops); template <typename V> void flush_pmem_buffer(V& ops); void inc_allocated_cached_bytes( std::shared_ptr<pwl::GenericLogEntry> log_entry) override; protected: using AbstractWriteLog<ImageCtxT>::m_lock; using AbstractWriteLog<ImageCtxT>::m_log_entries; using AbstractWriteLog<ImageCtxT>::m_image_ctx; using AbstractWriteLog<ImageCtxT>::m_perfcounter; using AbstractWriteLog<ImageCtxT>::m_ops_to_flush; using AbstractWriteLog<ImageCtxT>::m_cache_state; using AbstractWriteLog<ImageCtxT>::m_first_free_entry; using AbstractWriteLog<ImageCtxT>::m_first_valid_entry; void process_work() override; void schedule_append_ops(pwl::GenericLogOperations &ops, C_BlockIORequestT *req) override; void append_scheduled_ops(void) override; void reserve_cache(C_BlockIORequestT *req, bool &alloc_succeeds, bool &no_space) override; void collect_read_extents( uint64_t read_buffer_offset, LogMapEntry<GenericWriteLogEntry> map_entry, std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read, std::vector<bufferlist*> &bls_to_read, uint64_t entry_hit_length, Extent hit_extent, pwl::C_ReadRequest *read_ctx) override; void complete_read( std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read, std::vector<bufferlist*> &bls_to_read, Context *ctx) override; bool retire_entries(const unsigned long int frees_per_tx) override; void persist_last_flushed_sync_gen() override; bool alloc_resources(C_BlockIORequestT *req) override; void schedule_flush_and_append(pwl::GenericLogOperationsVector &ops) override; void setup_schedule_append( pwl::GenericLogOperationsVector &ops, bool do_early_flush, C_BlockIORequestT *req) override; void construct_flush_entries(pwl::GenericLogEntries entries_to_flush, DeferredContexts &post_unlock, bool has_write_entry) override; bool initialize_pool(Context *on_finish, pwl::DeferredContexts &later) override; void write_data_to_buffer( std::shared_ptr<pwl::WriteLogEntry> ws_entry, pwl::WriteLogCacheEntry *pmem_entry) override; uint64_t get_max_extent() override { return MAX_EXTENT_SIZE; } }; } // namespace rwl } // namespace pwl } // namespace cache } // namespace librbd extern template class librbd::cache::pwl::rwl::WriteLog<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CACHE_REPLICATED_WRITE_LOG
4,695
36.568
96
h
null
ceph-main/src/librbd/cache/pwl/ssd/Builder.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PWL_SSD_BUILDER_H #define CEPH_LIBRBD_CACHE_PWL_SSD_BUILDER_H #include <iostream> #include "LogEntry.h" #include "ReadRequest.h" #include "Request.h" #include "LogOperation.h" #include "librbd/cache/ImageWriteback.h" #include "librbd/cache/pwl/Builder.h" namespace librbd { namespace cache { namespace pwl { namespace ssd { template <typename T> class Builder : public pwl::Builder<T> { public: std::shared_ptr<pwl::WriteLogEntry> create_write_log_entry( uint64_t image_offset_bytes, uint64_t write_bytes) override { return std::make_shared<WriteLogEntry>(image_offset_bytes, write_bytes); } std::shared_ptr<pwl::WriteLogEntry> create_write_log_entry( std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes) override { return std::make_shared<WriteLogEntry>( sync_point_entry, image_offset_bytes, write_bytes); } std::shared_ptr<pwl::WriteLogEntry> create_writesame_log_entry( uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) override { return std::make_shared<WriteSameLogEntry>( image_offset_bytes, write_bytes, data_length); } std::shared_ptr<pwl::WriteLogEntry> create_writesame_log_entry( std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) override { return std::make_shared<WriteSameLogEntry>( sync_point_entry, image_offset_bytes, write_bytes, data_length); } pwl::C_WriteRequest<T> *create_write_request( T &pwl, utime_t arrived, io::Extents &&image_extents, bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) override { return new C_WriteRequest<T>( pwl, arrived, std::move(image_extents), std::move(bl), fadvise_flags, lock, perfcounter, user_req); } pwl::C_WriteSameRequest<T> *create_writesame_request( T &pwl, utime_t arrived, io::Extents &&image_extents, bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) override { return new C_WriteSameRequest<T>( pwl, arrived, std::move(image_extents), std::move(bl), fadvise_flags, lock, perfcounter, user_req); } pwl::C_WriteRequest<T> *create_comp_and_write_request( T &pwl, utime_t arrived, io::Extents &&image_extents, bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) override { return new C_CompAndWriteRequest<T>( pwl, arrived, std::move(image_extents), std::move(cmp_bl), std::move(bl), mismatch_offset, fadvise_flags, lock, perfcounter, user_req); } std::shared_ptr<pwl::WriteLogOperation> create_write_log_operation( WriteLogOperationSet &set, uint64_t image_offset_bytes, uint64_t write_bytes, CephContext *cct, std::shared_ptr<pwl::WriteLogEntry> write_log_entry) { return std::make_shared<WriteLogOperation>( set, image_offset_bytes, write_bytes, cct, write_log_entry); } std::shared_ptr<pwl::WriteLogOperation> create_write_log_operation( WriteLogOperationSet &set, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_len, CephContext *cct, std::shared_ptr<pwl::WriteLogEntry> writesame_log_entry) { return std::make_shared<WriteLogOperation>( set, image_offset_bytes, write_bytes, data_len, cct, writesame_log_entry); } std::shared_ptr<pwl::DiscardLogOperation> create_discard_log_operation( std::shared_ptr<SyncPoint> sync_point, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t discard_granularity_bytes, utime_t dispatch_time, PerfCounters *perfcounter, CephContext *cct) { return std::make_shared<DiscardLogOperation>( sync_point, image_offset_bytes, write_bytes, discard_granularity_bytes, dispatch_time, perfcounter, cct); } C_ReadRequest *create_read_request(CephContext *cct, utime_t arrived, PerfCounters *perfcounter, ceph::bufferlist *bl, Context *on_finish) { return new C_ReadRequest(cct, arrived, perfcounter, bl, on_finish); } }; } // namespace ssd } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_PWL_SSD_BUILDER_H
4,536
40.623853
79
h
null
ceph-main/src/librbd/cache/pwl/ssd/LogEntry.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PWL_SSD_LOG_ENTRY_H #define CEPH_LIBRBD_CACHE_PWL_SSD_LOG_ENTRY_H #include "librbd/cache/pwl/LogEntry.h" namespace librbd { namespace cache { class ImageWritebackInterface; namespace pwl { namespace ssd { class WriteLogEntry : public pwl::WriteLogEntry { public: WriteLogEntry( std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes) : pwl::WriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes) {} WriteLogEntry( uint64_t image_offset_bytes, uint64_t write_bytes) : pwl::WriteLogEntry(image_offset_bytes, write_bytes) {} WriteLogEntry( std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) : pwl::WriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes, data_length) {} WriteLogEntry( uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) : pwl::WriteLogEntry(image_offset_bytes, write_bytes, data_length) {} ~WriteLogEntry() {} WriteLogEntry(const WriteLogEntry&) = delete; WriteLogEntry &operator=(const WriteLogEntry&) = delete; void writeback_bl(librbd::cache::ImageWritebackInterface &image_writeback, Context *ctx, ceph::bufferlist &&bl) override; void init_cache_bl(bufferlist &src_bl, uint64_t off, uint64_t len) override; buffer::list &get_cache_bl() override; void copy_cache_bl(bufferlist *out) override; void remove_cache_bl() override; unsigned int get_aligned_data_size() const override; void inc_bl_refs() { bl_refs++; }; void dec_bl_refs() { bl_refs--; }; unsigned int reader_count() const override { return bl_refs; } }; class WriteSameLogEntry : public WriteLogEntry { public: WriteSameLogEntry( std::shared_ptr<SyncPointLogEntry> sync_point_entry, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) : WriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes, data_length) {} WriteSameLogEntry( uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t data_length) : WriteLogEntry(image_offset_bytes, write_bytes, data_length) {} ~WriteSameLogEntry() {} WriteSameLogEntry(const WriteSameLogEntry&) = delete; WriteSameLogEntry &operator=(const WriteSameLogEntry&) = delete; void writeback_bl(librbd::cache::ImageWritebackInterface &image_writeback, Context *ctx, ceph::bufferlist &&bl) override; }; } // namespace ssd } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_PWL_SSD_LOG_ENTRY_H
2,804
35.907895
78
h
null
ceph-main/src/librbd/cache/pwl/ssd/LogOperation.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PWL_SSD_LOG_OPERATION_H #define CEPH_LIBRBD_CACHE_PWL_SSD_LOG_OPERATION_H #include "librbd/cache/pwl/LogOperation.h" namespace librbd { namespace cache { namespace pwl { namespace ssd { class DiscardLogOperation : public pwl::DiscardLogOperation { public: DiscardLogOperation( std::shared_ptr<SyncPoint> sync_point, uint64_t image_offset_bytes, uint64_t write_bytes, uint32_t discard_granularity_bytes, utime_t dispatch_time, PerfCounters *perfcounter, CephContext *cct) : pwl::DiscardLogOperation(sync_point, image_offset_bytes, write_bytes, discard_granularity_bytes, dispatch_time, perfcounter, cct) {} void init_op( uint64_t current_sync_gen, bool persist_on_flush, uint64_t last_op_sequence_num, Context *write_persist, Context *write_append) override; }; } // namespace ssd } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_PWL_SSD_LOG_OPERATION_H
1,131
30.444444
75
h
null
ceph-main/src/librbd/cache/pwl/ssd/ReadRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PWL_SSD_READ_REQUEST_H #define CEPH_LIBRBD_CACHE_PWL_SSD_READ_REQUEST_H #include "librbd/cache/pwl/ReadRequest.h" namespace librbd { namespace cache { namespace pwl { namespace ssd { typedef std::vector<pwl::ImageExtentBuf> ImageExtentBufs; class C_ReadRequest : public pwl::C_ReadRequest { protected: using pwl::C_ReadRequest::m_cct; using pwl::C_ReadRequest::m_on_finish; using pwl::C_ReadRequest::m_out_bl; using pwl::C_ReadRequest::m_arrived_time; using pwl::C_ReadRequest::m_perfcounter; public: C_ReadRequest(CephContext *cct, utime_t arrived, PerfCounters *perfcounter, bufferlist *out_bl, Context *on_finish) : pwl::C_ReadRequest(cct, arrived, perfcounter, out_bl, on_finish) {} void finish(int r) override; }; } // namespace ssd } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_PWL_SSD_READ_REQUEST_H
1,000
27.6
117
h
null
ceph-main/src/librbd/cache/pwl/ssd/Request.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_SSD_REQUEST_H #define CEPH_LIBRBD_CACHE_SSD_REQUEST_H #include "librbd/cache/pwl/Request.h" namespace librbd { class BlockGuardCell; namespace cache { namespace pwl { template<typename T> class AbstractWriteLog; namespace ssd { template <typename T> class C_WriteRequest : public pwl::C_WriteRequest<T> { public: C_WriteRequest( T &pwl, const utime_t arrived, io::Extents &&image_extents, bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) : pwl::C_WriteRequest<T>( pwl, arrived, std::move(image_extents), std::move(cmp_bl), std::move(bl), mismatch_offset, fadvise_flags, lock, perfcounter, user_req) {} C_WriteRequest( T &pwl, const utime_t arrived, io::Extents &&image_extents, bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) : pwl::C_WriteRequest<T>( pwl, arrived, std::move(image_extents), std::move(bl), fadvise_flags, lock, perfcounter, user_req) {} protected: void setup_buffer_resources( uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated, uint64_t *number_lanes, uint64_t *number_log_entries, uint64_t *number_unpublished_reserves) override; }; template <typename T> class C_CompAndWriteRequest : public C_WriteRequest<T> { public: C_CompAndWriteRequest( T &pwl, const utime_t arrived, io::Extents &&image_extents, bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) : C_WriteRequest<T>( pwl, arrived, std::move(image_extents), std::move(cmp_bl), std::move(bl), mismatch_offset,fadvise_flags, lock, perfcounter, user_req) {} const char *get_name() const override { return "C_CompAndWriteRequest"; } template <typename U> friend std::ostream &operator<<(std::ostream &os, const C_CompAndWriteRequest<U> &req); }; template <typename T> class C_WriteSameRequest : public pwl::C_WriteSameRequest<T> { public: C_WriteSameRequest( T &pwl, const utime_t arrived, io::Extents &&image_extents, bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock, PerfCounters *perfcounter, Context *user_req) : pwl::C_WriteSameRequest<T>( pwl, arrived, std::move(image_extents), std::move(bl), fadvise_flags, lock, perfcounter, user_req) {} void setup_buffer_resources( uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated, uint64_t *number_lanes, uint64_t *number_log_entries, uint64_t *number_unpublished_reserves) override; }; } // namespace ssd } // namespace pwl } // namespace cache } // namespace librbd #endif // CEPH_LIBRBD_CACHE_SSD_REQUEST_H
3,081
32.139785
77
h
null
ceph-main/src/librbd/cache/pwl/ssd/Types.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_SSD_TYPES_H #define CEPH_LIBRBD_CACHE_SSD_TYPES_H #include "acconfig.h" #include "librbd/io/Types.h" #include "librbd/cache/pwl/Types.h" namespace librbd { namespace cache { namespace pwl { namespace ssd { struct SuperBlock{ WriteLogPoolRoot root; DENC(SuperBlock, v, p) { DENC_START(1, 1, p); denc(v.root, p); DENC_FINISH(p); } void dump(Formatter *f) const { f->dump_object("super", root); } static void generate_test_instances(std::list<SuperBlock*>& ls) { ls.push_back(new SuperBlock()); ls.push_back(new SuperBlock); ls.back()->root.layout_version = 3; ls.back()->root.cur_sync_gen = 1; ls.back()->root.pool_size = 10737418240; ls.back()->root.flushed_sync_gen = 1; ls.back()->root.block_size = 4096; ls.back()->root.num_log_entries = 0; ls.back()->root.first_free_entry = 30601; ls.back()->root.first_valid_entry = 2; } }; } // namespace ssd } // namespace pwl } // namespace cache } // namespace librbd WRITE_CLASS_DENC(librbd::cache::pwl::ssd::SuperBlock) #endif // CEPH_LIBRBD_CACHE_SSD_TYPES_H
1,221
22.5
70
h
null
ceph-main/src/librbd/cache/pwl/ssd/WriteLog.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CACHE_PWL_SSD_WRITE_LOG #define CEPH_LIBRBD_CACHE_PWL_SSD_WRITE_LOG #include "blk/BlockDevice.h" #include "common/AsyncOpTracker.h" #include "common/Checksummer.h" #include "common/environment.h" #include "common/RWLock.h" #include "common/WorkQueue.h" #include "librbd/BlockGuard.h" #include "librbd/Utils.h" #include "librbd/cache/ImageWriteback.h" #include "librbd/cache/Types.h" #include "librbd/cache/pwl/AbstractWriteLog.h" #include "librbd/cache/pwl/LogMap.h" #include "librbd/cache/pwl/LogOperation.h" #include "librbd/cache/pwl/Request.h" #include "librbd/cache/pwl/ssd/Builder.h" #include "librbd/cache/pwl/ssd/Types.h" #include <functional> #include <list> namespace librbd { struct ImageCtx; namespace cache { namespace pwl { namespace ssd { template <typename ImageCtxT> class WriteLog : public AbstractWriteLog<ImageCtxT> { public: WriteLog(ImageCtxT &image_ctx, librbd::cache::pwl::ImageCacheState<ImageCtxT>* cache_state, cache::ImageWritebackInterface& image_writeback, plugin::Api<ImageCtxT>& plugin_api); ~WriteLog(); WriteLog(const WriteLog&) = delete; WriteLog &operator=(const WriteLog&) = delete; typedef io::Extent Extent; using This = AbstractWriteLog<ImageCtxT>; using C_BlockIORequestT = pwl::C_BlockIORequest<This>; using C_WriteRequestT = pwl::C_WriteRequest<This>; using C_WriteSameRequestT = pwl::C_WriteSameRequest<This>; bool alloc_resources(C_BlockIORequestT *req) override; void setup_schedule_append( pwl::GenericLogOperationsVector &ops, bool do_early_flush, C_BlockIORequestT *req) override; void complete_user_request(Context *&user_req, int r) override; protected: using AbstractWriteLog<ImageCtxT>::m_lock; using AbstractWriteLog<ImageCtxT>::m_log_entries; using AbstractWriteLog<ImageCtxT>::m_image_ctx; using AbstractWriteLog<ImageCtxT>::m_cache_state; using AbstractWriteLog<ImageCtxT>::m_first_free_entry; using AbstractWriteLog<ImageCtxT>::m_first_valid_entry; using AbstractWriteLog<ImageCtxT>::m_bytes_allocated; bool initialize_pool(Context *on_finish, pwl::DeferredContexts &later) override; void process_work() override; void append_scheduled_ops(void) override; void schedule_append_ops(pwl::GenericLogOperations &ops, C_BlockIORequestT *req) override; void remove_pool_file() override; void release_ram(std::shared_ptr<GenericLogEntry> log_entry) override; private: class AioTransContext { public: Context *on_finish; ::IOContext ioc; explicit AioTransContext(CephContext* cct, Context *cb) : on_finish(cb), ioc(cct, this) {} ~AioTransContext(){} void aio_finish() { on_finish->complete(ioc.get_return_value()); delete this; } }; //class AioTransContext struct WriteLogPoolRootUpdate { std::shared_ptr<pwl::WriteLogPoolRoot> root; Context *ctx; WriteLogPoolRootUpdate(std::shared_ptr<pwl::WriteLogPoolRoot> r, Context* c) : root(r), ctx(c) {} }; using WriteLogPoolRootUpdateList = std::list<std::shared_ptr<WriteLogPoolRootUpdate>>; WriteLogPoolRootUpdateList m_poolroot_to_update; /* pool root list to update to SSD */ bool m_updating_pool_root = false; std::atomic<int> m_async_update_superblock = {0}; BlockDevice *bdev = nullptr; pwl::WriteLogPoolRoot pool_root; Builder<This> *m_builderobj; Builder<This>* create_builder(); int create_and_open_bdev(); void load_existing_entries(pwl::DeferredContexts &later); void inc_allocated_cached_bytes( std::shared_ptr<pwl::GenericLogEntry> log_entry) override; void collect_read_extents( uint64_t read_buffer_offset, LogMapEntry<GenericWriteLogEntry> map_entry, std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read, std::vector<bufferlist*> &bls_to_read, uint64_t entry_hit_length, Extent hit_extent, pwl::C_ReadRequest *read_ctx) override; void complete_read( std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read, std::vector<bufferlist*> &bls_to_read, Context *ctx) override; void enlist_op_appender(); bool retire_entries(const unsigned long int frees_per_tx); bool has_sync_point_logs(GenericLogOperations &ops); void append_op_log_entries(GenericLogOperations &ops); void alloc_op_log_entries(GenericLogOperations &ops); void construct_flush_entries(pwl::GenericLogEntries entires_to_flush, DeferredContexts &post_unlock, bool has_write_entry) override; void append_ops(GenericLogOperations &ops, Context *ctx, uint64_t* new_first_free_entry); void write_log_entries(GenericLogEntriesVector log_entries, AioTransContext *aio, uint64_t *pos); void schedule_update_root(std::shared_ptr<WriteLogPoolRoot> root, Context *ctx); void enlist_op_update_root(); void update_root_scheduled_ops(); int update_pool_root_sync(std::shared_ptr<pwl::WriteLogPoolRoot> root); void update_pool_root(std::shared_ptr<WriteLogPoolRoot> root, AioTransContext *aio); void aio_read_data_block(std::shared_ptr<GenericWriteLogEntry> log_entry, bufferlist *bl, Context *ctx); void aio_read_data_blocks(std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries, std::vector<bufferlist *> &bls, Context *ctx); static void aio_cache_cb(void *priv, void *priv2) { AioTransContext *c = static_cast<AioTransContext*>(priv2); c->aio_finish(); } };//class WriteLog } // namespace ssd } // namespace pwl } // namespace cache } // namespace librbd extern template class librbd::cache::pwl::ssd::WriteLog<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CACHE_PWL_SSD_WRITE_LOG
5,926
36.751592
92
h
null
ceph-main/src/librbd/crypto/BlockCrypto.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_BLOCK_CRYPTO_H #define CEPH_LIBRBD_CRYPTO_BLOCK_CRYPTO_H #include "include/Context.h" #include "librbd/crypto/CryptoInterface.h" #include "librbd/crypto/openssl/DataCryptor.h" namespace librbd { namespace crypto { template <typename T> class BlockCrypto : public CryptoInterface { public: static BlockCrypto* create(CephContext* cct, DataCryptor<T>* data_cryptor, uint32_t block_size, uint64_t data_offset) { return new BlockCrypto(cct, data_cryptor, block_size, data_offset); } BlockCrypto(CephContext* cct, DataCryptor<T>* data_cryptor, uint64_t block_size, uint64_t data_offset); ~BlockCrypto(); int encrypt(ceph::bufferlist* data, uint64_t image_offset) override; int decrypt(ceph::bufferlist* data, uint64_t image_offset) override; uint64_t get_block_size() const override { return m_block_size; } uint64_t get_data_offset() const override { return m_data_offset; } const unsigned char* get_key() const override { return m_data_cryptor->get_key(); } int get_key_length() const override { return m_data_cryptor->get_key_length(); } private: CephContext* m_cct; DataCryptor<T>* m_data_cryptor; uint64_t m_block_size; uint64_t m_data_offset; uint32_t m_iv_size; int crypt(ceph::bufferlist* data, uint64_t image_offset, CipherMode mode); }; } // namespace crypto } // namespace librbd extern template class librbd::crypto::BlockCrypto<EVP_CIPHER_CTX>; #endif //CEPH_LIBRBD_CRYPTO_BLOCK_CRYPTO_H
1,693
26.770492
78
h
null
ceph-main/src/librbd/crypto/CryptoContextPool.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_CRYPTO_CONTEXT_POOL_H #define CEPH_LIBRBD_CRYPTO_CRYPTO_CONTEXT_POOL_H #include "librbd/crypto/DataCryptor.h" #include "include/ceph_assert.h" #include <boost/lockfree/queue.hpp> namespace librbd { namespace crypto { template <typename T> class CryptoContextPool : public DataCryptor<T> { public: CryptoContextPool(DataCryptor<T>* data_cryptor, uint32_t pool_size); ~CryptoContextPool(); T* get_context(CipherMode mode) override; void return_context(T* ctx, CipherMode mode) override; inline uint32_t get_block_size() const override { return m_data_cryptor->get_block_size(); } inline uint32_t get_iv_size() const override { return m_data_cryptor->get_iv_size(); } inline int get_key_length() const override { return m_data_cryptor->get_key_length(); } inline const unsigned char* get_key() const override { return m_data_cryptor->get_key(); } inline int init_context(T* ctx, const unsigned char* iv, uint32_t iv_length) const override { return m_data_cryptor->init_context(ctx, iv, iv_length); } inline int update_context(T* ctx, const unsigned char* in, unsigned char* out, uint32_t len) const override { return m_data_cryptor->update_context(ctx, in, out, len); } using ContextQueue = boost::lockfree::queue<T*>; private: DataCryptor<T>* m_data_cryptor; ContextQueue m_encrypt_contexts; ContextQueue m_decrypt_contexts; inline ContextQueue& get_contexts(CipherMode mode) { switch(mode) { case CIPHER_MODE_ENC: return m_encrypt_contexts; case CIPHER_MODE_DEC: return m_decrypt_contexts; default: ceph_assert(false); } } }; } // namespace crypto } // namespace librbd #endif // CEPH_LIBRBD_CRYPTO_CRYPTO_CONTEXT_POOL_H
2,036
28.521739
72
h
null
ceph-main/src/librbd/crypto/CryptoImageDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_CRYPTO_IMAGE_DISPATCH_H #define CEPH_LIBRBD_CRYPTO_CRYPTO_IMAGE_DISPATCH_H #include "librbd/io/ImageDispatchInterface.h" namespace librbd { namespace crypto { class CryptoImageDispatch : public io::ImageDispatchInterface { public: static CryptoImageDispatch* create(uint64_t data_offset) { return new CryptoImageDispatch(data_offset); } CryptoImageDispatch(uint64_t data_offset); io::ImageDispatchLayer get_dispatch_layer() const override { return io::IMAGE_DISPATCH_LAYER_CRYPTO; } void shut_down(Context* on_finish) override { on_finish->complete(0); } bool read( io::AioCompletion* aio_comp, io::Extents &&image_extents, io::ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool write( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool discard( io::AioCompletion* aio_comp, io::Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool write_same( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool compare_and_write( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool flush( io::AioCompletion* aio_comp, io::FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool list_snaps( io::AioCompletion* aio_comp, io::Extents&& image_extents, io::SnapIds&& snap_ids, int list_snaps_flags, io::SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool invalidate_cache(Context* on_finish) override { return false; } // called directly by ImageDispatcher // TODO: hoist these out and remove CryptoImageDispatch since it's // just a placeholder void remap_to_physical(io::Extents& image_extents, io::ImageArea area); io::ImageArea remap_to_logical(io::Extents& image_extents); private: uint64_t m_data_offset; }; } // namespace crypto } // namespace librbd #endif // CEPH_LIBRBD_CRYPTO_CRYPTO_IMAGE_DISPATCH_H
3,772
32.6875
80
h
null
ceph-main/src/librbd/crypto/CryptoInterface.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_CRYPTO_INTERFACE_H #define CEPH_LIBRBD_CRYPTO_CRYPTO_INTERFACE_H #include "include/buffer.h" #include "include/intarith.h" #include "librbd/io/Types.h" namespace librbd { namespace crypto { class CryptoInterface { public: virtual ~CryptoInterface() = default; virtual int encrypt(ceph::bufferlist* data, uint64_t image_offset) = 0; virtual int decrypt(ceph::bufferlist* data, uint64_t image_offset) = 0; virtual uint64_t get_block_size() const = 0; virtual uint64_t get_data_offset() const = 0; virtual const unsigned char* get_key() const = 0; virtual int get_key_length() const = 0; inline std::pair<uint64_t, uint64_t> get_pre_and_post_align( uint64_t off, uint64_t len) { if (len == 0) { return std::make_pair(0, 0); } auto block_size = get_block_size(); return std::make_pair(p2phase(off, block_size), p2nphase(off + len, block_size)); } inline std::pair<uint64_t, uint64_t> align(uint64_t off, uint64_t len) { auto aligns = get_pre_and_post_align(off, len); return std::make_pair(off - aligns.first, len + aligns.first + aligns.second); } inline bool is_aligned(uint64_t off, uint64_t len) { auto aligns = get_pre_and_post_align(off, len); return aligns.first == 0 && aligns.second == 0; } inline bool is_aligned(const io::ReadExtents& extents) { for (const auto& extent: extents) { if (!is_aligned(extent.offset, extent.length)) { return false; } } return true; } inline void align_extents(const io::ReadExtents& extents, io::ReadExtents* aligned_extents) { for (const auto& extent: extents) { auto aligned = align(extent.offset, extent.length); aligned_extents->emplace_back(aligned.first, aligned.second); } } inline int decrypt_aligned_extent(io::ReadExtent& extent, uint64_t image_offset) { if (extent.length == 0 || extent.bl.length() == 0) { return 0; } if (extent.extent_map.empty()) { extent.extent_map.emplace_back(extent.offset, extent.bl.length()); } ceph::bufferlist result_bl; io::Extents result_extent_map; ceph::bufferlist curr_block_bl; auto curr_offset = extent.offset; auto curr_block_start_offset = curr_offset; auto curr_block_end_offset = curr_offset; // this will add a final loop iteration for decrypting the last extent extent.extent_map.emplace_back( extent.offset + extent.length + get_block_size(), 0); for (auto [off, len]: extent.extent_map) { auto [aligned_off, aligned_len] = align(off, len); if (aligned_off > curr_block_end_offset) { curr_block_bl.append_zero(curr_block_end_offset - curr_offset); auto curr_block_length = curr_block_bl.length(); if (curr_block_length > 0) { auto r = decrypt( &curr_block_bl, image_offset + curr_block_start_offset - extent.offset); if (r != 0) { return r; } curr_block_bl.splice(0, curr_block_length, &result_bl); result_extent_map.emplace_back( curr_block_start_offset, curr_block_length); } curr_block_start_offset = aligned_off; curr_block_end_offset = aligned_off + aligned_len; curr_offset = aligned_off; } curr_block_bl.append_zero(off - curr_offset); extent.bl.splice(0, len, &curr_block_bl); curr_offset = off + len; curr_block_end_offset = aligned_off + aligned_len; } extent.bl = std::move(result_bl); extent.extent_map = std::move(result_extent_map); return 0; } }; } // namespace crypto } // namespace librbd #endif // CEPH_LIBRBD_CRYPTO_CRYPTO_INTERFACE_H
3,953
30.380952
74
h
null
ceph-main/src/librbd/crypto/CryptoObjectDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_CRYPTO_OBJECT_DISPATCH_H #define CEPH_LIBRBD_CRYPTO_CRYPTO_OBJECT_DISPATCH_H #include "librbd/crypto/CryptoInterface.h" #include "librbd/io/Types.h" #include "librbd/io/ObjectDispatchInterface.h" namespace librbd { struct ImageCtx; namespace crypto { template <typename ImageCtxT = librbd::ImageCtx> class CryptoObjectDispatch : public io::ObjectDispatchInterface { public: static CryptoObjectDispatch* create( ImageCtxT* image_ctx, CryptoInterface* crypto) { return new CryptoObjectDispatch(image_ctx, crypto); } CryptoObjectDispatch(ImageCtxT* image_ctx, CryptoInterface* crypto); io::ObjectDispatchLayer get_dispatch_layer() const override { return io::OBJECT_DISPATCH_LAYER_CRYPTO; } void shut_down(Context* on_finish) override; bool read( uint64_t object_no, io::ReadExtents* extents, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t* version, int* object_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool discard( uint64_t object_no, uint64_t object_off, uint64_t object_len, IOContext io_context, int discard_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, IOContext io_context, int op_flags, int write_flags, std::optional<uint64_t> assert_version, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write_same( uint64_t object_no, uint64_t object_off, uint64_t object_len, io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool compare_and_write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data, ceph::bufferlist&& write_data, IOContext io_context, int op_flags, const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset, int* object_dispatch_flags, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool flush( io::FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t* journal_tid, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool list_snaps( uint64_t object_no, io::Extents&& extents, io::SnapIds&& snap_ids, int list_snap_flags, const ZTracer::Trace &parent_trace, io::SnapshotDelta* snapshot_delta, int* object_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool invalidate_cache(Context* on_finish) override { return false; } bool reset_existence_cache(Context* on_finish) override { return false; } void extent_overwritten( uint64_t object_no, uint64_t object_off, uint64_t object_len, uint64_t journal_tid, uint64_t new_journal_tid) override { } int prepare_copyup( uint64_t object_no, io::SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override; private: ImageCtxT* m_image_ctx; CryptoInterface* m_crypto; uint64_t m_data_offset_object_no; }; } // namespace crypto } // namespace librbd extern template class librbd::crypto::CryptoObjectDispatch<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CRYPTO_CRYPTO_OBJECT_DISPATCH_H
4,094
34.301724
77
h
null
ceph-main/src/librbd/crypto/DataCryptor.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_DATA_CRYPTOR_H #define CEPH_LIBRBD_CRYPTO_DATA_CRYPTOR_H #include "include/int_types.h" #include "librbd/crypto/Types.h" namespace librbd { namespace crypto { template <typename T> class DataCryptor { public: virtual ~DataCryptor() = default; virtual uint32_t get_block_size() const = 0; virtual uint32_t get_iv_size() const = 0; virtual const unsigned char* get_key() const = 0; virtual int get_key_length() const = 0; virtual T* get_context(CipherMode mode) = 0; virtual void return_context(T* ctx, CipherMode mode) = 0; virtual int init_context(T* ctx, const unsigned char* iv, uint32_t iv_length) const = 0; virtual int update_context(T* ctx, const unsigned char* in, unsigned char* out, uint32_t len) const = 0; }; } // namespace crypto } // namespace librbd #endif // CEPH_LIBRBD_CRYPTO_DATA_CRYPTOR_H
1,018
25.815789
73
h
null
ceph-main/src/librbd/crypto/EncryptionFormat.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_ENCRYPTION_FORMAT_H #define CEPH_LIBRBD_CRYPTO_ENCRYPTION_FORMAT_H #include <memory> struct Context; namespace librbd { namespace crypto { struct CryptoInterface; template <typename ImageCtxT> struct EncryptionFormat { virtual ~EncryptionFormat() { } virtual std::unique_ptr<EncryptionFormat<ImageCtxT>> clone() const = 0; virtual void format(ImageCtxT* ictx, Context* on_finish) = 0; virtual void load(ImageCtxT* ictx, std::string* detected_format_name, Context* on_finish) = 0; virtual void flatten(ImageCtxT* ictx, Context* on_finish) = 0; virtual CryptoInterface* get_crypto() = 0; }; } // namespace crypto } // namespace librbd #endif // CEPH_LIBRBD_CRYPTO_ENCRYPTION_FORMAT_H
851
24.058824
73
h
null
ceph-main/src/librbd/crypto/FormatRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_FORMAT_REQUEST_H #define CEPH_LIBRBD_CRYPTO_FORMAT_REQUEST_H #include "include/rbd/librbd.hpp" #include "librbd/ImageCtx.h" struct Context; namespace librbd { class ImageCtx; namespace crypto { template <typename I> class FormatRequest { public: using EncryptionFormat = decltype(I::encryption_format); static FormatRequest* create( I* image_ctx, EncryptionFormat format, Context* on_finish) { return new FormatRequest(image_ctx, std::move(format), on_finish); } FormatRequest(I* image_ctx, EncryptionFormat format, Context* on_finish); void send(); void handle_shutdown_crypto(int r); void format(); void handle_format(int r); void flush(); void handle_flush(int r); void finish(int r); private: I* m_image_ctx; EncryptionFormat m_format; Context* m_on_finish; }; } // namespace crypto } // namespace librbd extern template class librbd::crypto::FormatRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CRYPTO_FORMAT_REQUEST_H
1,139
21.8
77
h
null
ceph-main/src/librbd/crypto/LoadRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_LOAD_REQUEST_H #define CEPH_LIBRBD_CRYPTO_LOAD_REQUEST_H #include "include/rbd/librbd.hpp" #include "librbd/ImageCtx.h" struct Context; namespace librbd { class ImageCtx; namespace crypto { template <typename I> class LoadRequest { public: using EncryptionFormat = decltype(I::encryption_format); static constexpr char UNKNOWN_FORMAT[] = "<unknown>"; static LoadRequest* create( I* image_ctx, std::vector<EncryptionFormat>&& formats, Context* on_finish) { return new LoadRequest(image_ctx, std::move(formats), on_finish); } LoadRequest(I* image_ctx, std::vector<EncryptionFormat>&& formats, Context* on_finish); void send(); void flush(); void handle_flush(int r); void load(); void handle_load(int r); void invalidate_cache(); void handle_invalidate_cache(int r); void finish(int r); private: I* m_image_ctx; Context* m_on_finish; size_t m_format_idx; bool m_is_current_format_cloned; std::vector<EncryptionFormat> m_formats; I* m_current_image_ctx; std::string m_detected_format_name; }; } // namespace crypto } // namespace librbd extern template class librbd::crypto::LoadRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CRYPTO_LOAD_REQUEST_H
1,413
22.966102
71
h
null
ceph-main/src/librbd/crypto/ShutDownCryptoRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_SHUT_DOWN_CRYPTO_REQUEST_H #define CEPH_LIBRBD_CRYPTO_SHUT_DOWN_CRYPTO_REQUEST_H #include "librbd/ImageCtx.h" struct Context; namespace librbd { class ImageCtx; namespace crypto { template <typename I> class ShutDownCryptoRequest { public: static ShutDownCryptoRequest* create(I* image_ctx, Context* on_finish) { return new ShutDownCryptoRequest(image_ctx, on_finish); } ShutDownCryptoRequest(I* image_ctx, Context* on_finish); void send(); void shut_down_object_dispatch(); void handle_shut_down_object_dispatch(int r); void shut_down_image_dispatch(); void handle_shut_down_image_dispatch(int r); void finish(int r); private: I* m_image_ctx; Context* m_on_finish; }; } // namespace crypto } // namespace librbd extern template class librbd::crypto::ShutDownCryptoRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CRYPTO_SHUT_DOWN_CRYPTO_REQUEST_H
1,036
22.568182
78
h
null
ceph-main/src/librbd/crypto/Utils.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_UTILS_H #define CEPH_LIBRBD_CRYPTO_UTILS_H #include "include/Context.h" namespace librbd { struct ImageCtx; namespace crypto { class CryptoInterface; template <typename> class EncryptionFormat; namespace util { template <typename ImageCtxT = librbd::ImageCtx> void set_crypto(ImageCtxT *image_ctx, decltype(ImageCtxT::encryption_format) encryption_format); int build_crypto( CephContext* cct, const unsigned char* key, uint32_t key_length, uint64_t block_size, uint64_t data_offset, std::unique_ptr<CryptoInterface>* result_crypto); } // namespace util } // namespace crypto } // namespace librbd #endif // CEPH_LIBRBD_CRYPTO_UTILS_H
810
22.852941
74
h
null
ceph-main/src/librbd/crypto/luks/FormatRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_LUKS_FORMAT_REQUEST_H #define CEPH_LIBRBD_CRYPTO_LUKS_FORMAT_REQUEST_H #include <string_view> #include "include/rbd/librbd.hpp" #include "librbd/ImageCtx.h" #include "librbd/crypto/CryptoInterface.h" #include "librbd/crypto/luks/Header.h" namespace librbd { class ImageCtx; namespace crypto { namespace luks { template <typename I> class FormatRequest { public: static FormatRequest* create( I* image_ctx, encryption_format_t format, encryption_algorithm_t alg, std::string_view passphrase, std::unique_ptr<CryptoInterface>* result_crypto, Context* on_finish, bool insecure_fast_mode) { return new FormatRequest(image_ctx, format, alg, passphrase, result_crypto, on_finish, insecure_fast_mode); } FormatRequest(I* image_ctx, encryption_format_t format, encryption_algorithm_t alg, std::string_view passphrase, std::unique_ptr<CryptoInterface>* result_crypto, Context* on_finish, bool insecure_fast_mode); void send(); void finish(int r); private: I* m_image_ctx; encryption_format_t m_format; encryption_algorithm_t m_alg; std::string_view m_passphrase; std::unique_ptr<CryptoInterface>* m_result_crypto; Context* m_on_finish; bool m_insecure_fast_mode; Header m_header; void handle_write_header(int r); }; } // namespace luks } // namespace crypto } // namespace librbd extern template class librbd::crypto::luks::FormatRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CRYPTO_LUKS_FORMAT_REQUEST_H
1,732
27.883333
80
h
null
ceph-main/src/librbd/crypto/luks/Header.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_LUKS_HEADER_H #define CEPH_LIBRBD_CRYPTO_LUKS_HEADER_H #include <libcryptsetup.h> #include "common/ceph_context.h" #include "include/buffer.h" namespace librbd { namespace crypto { namespace luks { class Header { public: Header(CephContext* cct); ~Header(); int init(); int write(const ceph::bufferlist& bl); ssize_t read(ceph::bufferlist* bl); int format(const char* type, const char* alg, const char* key, size_t key_size, const char* cipher_mode, uint32_t sector_size, uint32_t data_alignment, bool insecure_fast_mode); int add_keyslot(const char* passphrase, size_t passphrase_size); int load(const char* type); int read_volume_key(const char* passphrase, size_t passphrase_size, char* volume_key, size_t* volume_key_size); int get_sector_size(); uint64_t get_data_offset(); const char* get_cipher(); const char* get_cipher_mode(); const char* get_format_name(); private: void libcryptsetup_log(int level, const char* msg); static void libcryptsetup_log_wrapper(int level, const char* msg, void* header); CephContext* m_cct; int m_fd; struct crypt_device *m_cd; }; } // namespace luks } // namespace crypto } // namespace librbd #endif // CEPH_LIBRBD_CRYPTO_LUKS_HEADER_H
1,484
27.018868
78
h
null
ceph-main/src/librbd/crypto/luks/LUKSEncryptionFormat.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_LUKS_ENCRYPTION_FORMAT_H #define CEPH_LIBRBD_CRYPTO_LUKS_ENCRYPTION_FORMAT_H #include <string_view> #include "include/rbd/librbd.hpp" #include "librbd/crypto/CryptoInterface.h" #include "librbd/crypto/EncryptionFormat.h" namespace librbd { struct ImageCtx; namespace crypto { namespace luks { template <typename ImageCtxT> class EncryptionFormat : public crypto::EncryptionFormat<ImageCtxT> { public: void flatten(ImageCtxT* ictx, Context* on_finish) override; CryptoInterface* get_crypto() override { ceph_assert(m_crypto); return m_crypto.get(); } protected: std::unique_ptr<CryptoInterface> m_crypto; }; template <typename ImageCtxT> class LUKSEncryptionFormat : public EncryptionFormat<ImageCtxT> { public: LUKSEncryptionFormat(std::string_view passphrase) : m_passphrase(passphrase) {} std::unique_ptr<crypto::EncryptionFormat<ImageCtxT>> clone() const override { return std::make_unique<LUKSEncryptionFormat>(m_passphrase); } void format(ImageCtxT* ictx, Context* on_finish) override; void load(ImageCtxT* ictx, std::string* detected_format_name, Context* on_finish) override; private: std::string_view m_passphrase; }; template <typename ImageCtxT> class LUKS1EncryptionFormat : public EncryptionFormat<ImageCtxT> { public: LUKS1EncryptionFormat(encryption_algorithm_t alg, std::string_view passphrase) : m_alg(alg), m_passphrase(passphrase) {} std::unique_ptr<crypto::EncryptionFormat<ImageCtxT>> clone() const override { return std::make_unique<LUKS1EncryptionFormat>(m_alg, m_passphrase); } void format(ImageCtxT* ictx, Context* on_finish) override; void load(ImageCtxT* ictx, std::string* detected_format_name, Context* on_finish) override; private: encryption_algorithm_t m_alg; std::string_view m_passphrase; }; template <typename ImageCtxT> class LUKS2EncryptionFormat : public EncryptionFormat<ImageCtxT> { public: LUKS2EncryptionFormat(encryption_algorithm_t alg, std::string_view passphrase) : m_alg(alg), m_passphrase(passphrase) {} std::unique_ptr<crypto::EncryptionFormat<ImageCtxT>> clone() const override { return std::make_unique<LUKS2EncryptionFormat>(m_alg, m_passphrase); } void format(ImageCtxT* ictx, Context* on_finish) override; void load(ImageCtxT* ictx, std::string* detected_format_name, Context* on_finish) override; private: encryption_algorithm_t m_alg; std::string_view m_passphrase; }; } // namespace luks } // namespace crypto } // namespace librbd extern template class librbd::crypto::luks::LUKSEncryptionFormat< librbd::ImageCtx>; extern template class librbd::crypto::luks::LUKS1EncryptionFormat< librbd::ImageCtx>; extern template class librbd::crypto::luks::LUKS2EncryptionFormat< librbd::ImageCtx>; #endif // CEPH_LIBRBD_CRYPTO_LUKS_ENCRYPTION_FORMAT_H
2,993
28.643564
80
h
null
ceph-main/src/librbd/crypto/luks/LoadRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_LUKS_LOAD_REQUEST_H #define CEPH_LIBRBD_CRYPTO_LUKS_LOAD_REQUEST_H #include <string_view> #include "include/rbd/librbd.hpp" #include "librbd/ImageCtx.h" #include "librbd/crypto/CryptoInterface.h" #include "librbd/crypto/luks/Header.h" namespace librbd { class ImageCtx; namespace crypto { namespace luks { // max header size in LUKS1/2 (excl. keyslots) is 4MB const uint64_t MAXIMUM_HEADER_SIZE = 4 * 1024 * 1024; // default header size in LUKS2 2 X 16KB + 1 X 256KB keyslot const uint64_t DEFAULT_INITIAL_READ_SIZE = 288 * 1024; template <typename I> class LoadRequest { public: static LoadRequest* create( I* image_ctx, encryption_format_t format, std::string_view passphrase, std::unique_ptr<CryptoInterface>* result_crypto, std::string* detected_format_name, Context* on_finish) { return new LoadRequest(image_ctx, format, passphrase, result_crypto, detected_format_name, on_finish); } LoadRequest(I* image_ctx, encryption_format_t format, std::string_view passphrase, std::unique_ptr<CryptoInterface>* result_crypto, std::string* detected_format_name, Context* on_finish); void send(); void finish(int r); void set_initial_read_size(uint64_t read_size); private: I* m_image_ctx; encryption_format_t m_format; std::string_view m_passphrase; Context* m_on_finish; ceph::bufferlist m_bl; std::unique_ptr<CryptoInterface>* m_result_crypto; std::string* m_detected_format_name; uint64_t m_initial_read_size; Header m_header; uint64_t m_offset; void read(uint64_t end_offset, Context* on_finish); bool handle_read(int r); void handle_read_header(int r); void handle_read_keyslots(int r); void read_volume_key(); }; } // namespace luks } // namespace crypto } // namespace librbd extern template class librbd::crypto::luks::LoadRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_CRYPTO_LUKS_LOAD_REQUEST_H
2,167
29.111111
74
h
null
ceph-main/src/librbd/crypto/luks/Magic.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_LUKS_MAGIC_H #define CEPH_LIBRBD_CRYPTO_LUKS_MAGIC_H #include "common/ceph_context.h" #include "include/buffer.h" namespace librbd { namespace crypto { namespace luks { class Magic { public: static int is_luks(ceph::bufferlist& bl); static int is_rbd_clone(ceph::bufferlist& bl); static int replace_magic(CephContext* cct, ceph::bufferlist& bl); private: static int read(ceph::bufferlist& bl, uint32_t bl_off, uint32_t read_size, char* result); static int cmp(ceph::bufferlist& bl, uint32_t bl_off, const std::string& cmp_str); static void transform_secondary_header_magic(char* magic); }; } // namespace luks } // namespace crypto } // namespace librbd #endif // CEPH_LIBRBD_CRYPTO_LUKS_MAGIC_H
875
25.545455
70
h
null
ceph-main/src/librbd/crypto/openssl/DataCryptor.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_CRYPTO_OPENSSL_DATA_CRYPTOR_H #define CEPH_LIBRBD_CRYPTO_OPENSSL_DATA_CRYPTOR_H #include "librbd/crypto/DataCryptor.h" #include "include/Context.h" #include <openssl/evp.h> namespace librbd { namespace crypto { namespace openssl { class DataCryptor : public crypto::DataCryptor<EVP_CIPHER_CTX> { public: DataCryptor(CephContext* cct) : m_cct(cct) {}; ~DataCryptor(); int init(const char* cipher_name, const unsigned char* key, uint16_t key_length); uint32_t get_block_size() const override; uint32_t get_iv_size() const override; const unsigned char* get_key() const override; int get_key_length() const override; EVP_CIPHER_CTX* get_context(CipherMode mode) override; void return_context(EVP_CIPHER_CTX* ctx, CipherMode mode) override; int init_context(EVP_CIPHER_CTX* ctx, const unsigned char* iv, uint32_t iv_length) const override; int update_context(EVP_CIPHER_CTX* ctx, const unsigned char* in, unsigned char* out, uint32_t len) const override; private: CephContext* m_cct; unsigned char* m_key = nullptr; uint16_t m_key_size = 0; const EVP_CIPHER* m_cipher; uint32_t m_iv_size; void log_errors() const; }; } // namespace openssl } // namespace crypto } // namespace librbd #endif // CEPH_LIBRBD_CRYPTO_OPENSSL_DATA_CRYPTOR_H
1,486
28.74
72
h
null
ceph-main/src/librbd/deep_copy/Handler.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_DEEP_COPY_HANDLER_H #define CEPH_LIBRBD_DEEP_COPY_HANDLER_H #include "include/int_types.h" #include "include/rbd/librbd.hpp" namespace librbd { namespace deep_copy { struct Handler { virtual ~Handler() {} virtual void handle_read(uint64_t bytes_read) = 0; virtual int update_progress(uint64_t object_number, uint64_t object_count) = 0; }; struct NoOpHandler : public Handler { void handle_read(uint64_t bytes_read) override { } int update_progress(uint64_t object_number, uint64_t object_count) override { return 0; } }; class ProgressHandler : public NoOpHandler { public: ProgressHandler(ProgressContext* progress_ctx) : m_progress_ctx(progress_ctx) { } int update_progress(uint64_t object_number, uint64_t object_count) override { return m_progress_ctx->update_progress(object_number, object_count); } private: librbd::ProgressContext* m_progress_ctx; }; } // namespace deep_copy } // namespace librbd #endif // CEPH_LIBRBD_DEEP_COPY_HANDLER_H
1,188
22.313725
72
h
null
ceph-main/src/librbd/deep_copy/ImageCopyRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_DEEP_COPY_IMAGE_DEEP_COPY_REQUEST_H #define CEPH_LIBRBD_DEEP_COPY_IMAGE_DEEP_COPY_REQUEST_H #include "include/int_types.h" #include "include/rados/librados.hpp" #include "common/bit_vector.hpp" #include "common/ceph_mutex.h" #include "common/RefCountedObj.h" #include "librbd/Types.h" #include "librbd/deep_copy/Types.h" #include <functional> #include <map> #include <queue> #include <set> #include <vector> #include <boost/optional.hpp> class Context; namespace librbd { class ImageCtx; namespace deep_copy { class Handler; template <typename ImageCtxT = ImageCtx> class ImageCopyRequest : public RefCountedObject { public: static ImageCopyRequest* create(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx, librados::snap_t src_snap_id_start, librados::snap_t src_snap_id_end, librados::snap_t dst_snap_id_start, bool flatten, const ObjectNumber &object_number, const SnapSeqs &snap_seqs, Handler *handler, Context *on_finish) { return new ImageCopyRequest(src_image_ctx, dst_image_ctx, src_snap_id_start, src_snap_id_end, dst_snap_id_start, flatten, object_number, snap_seqs, handler, on_finish); } ImageCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx, librados::snap_t src_snap_id_start, librados::snap_t src_snap_id_end, librados::snap_t dst_snap_id_start, bool flatten, const ObjectNumber &object_number, const SnapSeqs &snap_seqs, Handler *handler, Context *on_finish); void send(); void cancel(); private: /** * @verbatim * * <start> * | * v * COMPUTE_DIFF * | * | . . . . . * | . . (parallel execution of * v v . multiple objects at once) * COPY_OBJECT . . . . * | * v * <finish> * * @endverbatim */ ImageCtxT *m_src_image_ctx; ImageCtxT *m_dst_image_ctx; librados::snap_t m_src_snap_id_start; librados::snap_t m_src_snap_id_end; librados::snap_t m_dst_snap_id_start; bool m_flatten; ObjectNumber m_object_number; SnapSeqs m_snap_seqs; Handler *m_handler; Context *m_on_finish; CephContext *m_cct; ceph::mutex m_lock; bool m_canceled = false; uint64_t m_object_no = 0; uint64_t m_end_object_no = 0; uint64_t m_current_ops = 0; std::priority_queue< uint64_t, std::vector<uint64_t>, std::greater<uint64_t>> m_copied_objects; bool m_updating_progress = false; SnapMap m_snap_map; int m_ret_val = 0; BitVector<2> m_object_diff_state; void map_src_objects(uint64_t dst_object, std::set<uint64_t> *src_objects); void compute_diff(); void handle_compute_diff(int r); void send_object_copies(); void send_next_object_copy(); void handle_object_copy(uint64_t object_no, int r); void finish(int r); }; } // namespace deep_copy } // namespace librbd extern template class librbd::deep_copy::ImageCopyRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_DEEP_COPY_IMAGE_DEEP_COPY_REQUEST_H
3,503
27.258065
80
h
null
ceph-main/src/librbd/deep_copy/MetadataCopyRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_DEEP_COPY_METADATA_COPY_REQUEST_H #define CEPH_LIBRBD_DEEP_COPY_METADATA_COPY_REQUEST_H #include "include/int_types.h" #include "include/buffer.h" #include "include/rados/librados.hpp" #include "librbd/ImageCtx.h" #include <map> #include <string> class Context; namespace librbd { namespace deep_copy { template <typename ImageCtxT = librbd::ImageCtx> class MetadataCopyRequest { public: static MetadataCopyRequest* create(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx, Context *on_finish) { return new MetadataCopyRequest(src_image_ctx, dst_image_ctx, on_finish); } MetadataCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx, Context *on_finish); void send(); private: /** * @verbatim * * <start> * | * v * LIST_SRC_METADATA <------\ * | | (repeat if additional * v | metadata) * SET_DST_METADATA --------/ * | * v * <finish> * * @endverbatim */ typedef std::map<std::string, bufferlist> Metadata; ImageCtxT *m_src_image_ctx; ImageCtxT *m_dst_image_ctx; Context *m_on_finish; CephContext *m_cct; bufferlist m_out_bl; std::map<std::string, bufferlist> m_metadata; std::string m_last_metadata_key; bool m_more_metadata = false; void list_src_metadata(); void handle_list_src_metadata(int r); void set_dst_metadata(); void handle_set_dst_metadata(int r); void finish(int r); }; } // namespace deep_copy } // namespace librbd extern template class librbd::deep_copy::MetadataCopyRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_DEEP_COPY_METADATA_COPY_REQUEST_H
1,854
22.481013
79
h
null
ceph-main/src/librbd/deep_copy/ObjectCopyRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_DEEP_COPY_OBJECT_COPY_REQUEST_H #define CEPH_LIBRBD_DEEP_COPY_OBJECT_COPY_REQUEST_H #include "include/int_types.h" #include "include/interval_set.h" #include "include/rados/librados.hpp" #include "common/snap_types.h" #include "librbd/ImageCtx.h" #include "librbd/deep_copy/Types.h" #include "librbd/io/Types.h" #include <list> #include <map> #include <string> class Context; class RWLock; namespace librbd { namespace io { class AsyncOperation; } namespace deep_copy { struct Handler; template <typename ImageCtxT = librbd::ImageCtx> class ObjectCopyRequest { public: static ObjectCopyRequest* create(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx, librados::snap_t src_snap_id_start, librados::snap_t dst_snap_id_start, const SnapMap &snap_map, uint64_t object_number, uint32_t flags, Handler* handler, Context *on_finish) { return new ObjectCopyRequest(src_image_ctx, dst_image_ctx, src_snap_id_start, dst_snap_id_start, snap_map, object_number, flags, handler, on_finish); } ObjectCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx, librados::snap_t src_snap_id_start, librados::snap_t dst_snap_id_start, const SnapMap &snap_map, uint64_t object_number, uint32_t flags, Handler* handler, Context *on_finish); void send(); // testing support inline librados::IoCtx &get_src_io_ctx() { return m_src_io_ctx; } inline librados::IoCtx &get_dst_io_ctx() { return m_dst_io_ctx; } private: /** * @verbatim * * <start> * | * v * LIST_SNAPS * | * |/---------\ * | | (repeat for each snapshot) * v | * READ ---------/ * | * | /-----------\ * | | | (repeat for each snapshot) * v v | * UPDATE_OBJECT_MAP ---/ (skip if object * | map disabled) * | /-----------\ * | | | (repeat for each snapshot) * v v | * WRITE_OBJECT --------/ * | * v * <finish> * * @endverbatim */ struct ReadOp { interval_set<uint64_t> image_interval; io::Extents image_extent_map; bufferlist out_bl; }; typedef std::pair<librados::snap_t, librados::snap_t> WriteReadSnapIds; ImageCtxT *m_src_image_ctx; ImageCtxT *m_dst_image_ctx; CephContext *m_cct; librados::snap_t m_src_snap_id_start; librados::snap_t m_dst_snap_id_start; SnapMap m_snap_map; uint64_t m_dst_object_number; uint32_t m_flags; Handler* m_handler; Context *m_on_finish; decltype(m_src_image_ctx->data_ctx) m_src_io_ctx; decltype(m_dst_image_ctx->data_ctx) m_dst_io_ctx; std::string m_dst_oid; io::Extents m_image_extents; io::ImageArea m_image_area = io::ImageArea::DATA; io::SnapshotDelta m_snapshot_delta; std::map<WriteReadSnapIds, ReadOp> m_read_ops; std::list<WriteReadSnapIds> m_read_snaps; io::SnapshotSparseBufferlist m_snapshot_sparse_bufferlist; std::map<librados::snap_t, interval_set<uint64_t>> m_dst_data_interval; std::map<librados::snap_t, interval_set<uint64_t>> m_dst_zero_interval; std::map<librados::snap_t, uint8_t> m_dst_object_state; std::map<librados::snap_t, bool> m_dst_object_may_exist; io::AsyncOperation* m_src_async_op = nullptr; void send_list_snaps(); void handle_list_snaps(int r); void send_read(); void handle_read(int r); void send_update_object_map(); void handle_update_object_map(int r); void process_copyup(); void send_write_object(); void handle_write_object(int r); Context *start_lock_op(ceph::shared_mutex &owner_lock, int* r); void compute_read_ops(); void merge_write_ops(); void compute_zero_ops(); void compute_dst_object_may_exist(); void finish(int r); }; } // namespace deep_copy } // namespace librbd extern template class librbd::deep_copy::ObjectCopyRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_DEEP_COPY_OBJECT_COPY_REQUEST_H
4,396
26.654088
80
h
null
ceph-main/src/librbd/deep_copy/SetHeadRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_DEEP_COPY_SET_HEAD_REQUEST_H #define CEPH_LIBRBD_DEEP_COPY_SET_HEAD_REQUEST_H #include "include/int_types.h" #include "include/rados/librados.hpp" #include "common/snap_types.h" #include "librbd/ImageCtx.h" #include "librbd/Types.h" #include <map> #include <set> #include <string> #include <tuple> class Context; namespace librbd { namespace deep_copy { template <typename ImageCtxT = librbd::ImageCtx> class SetHeadRequest { public: static SetHeadRequest* create(ImageCtxT *image_ctx, uint64_t size, const cls::rbd::ParentImageSpec &parent_spec, uint64_t parent_overlap, Context *on_finish) { return new SetHeadRequest(image_ctx, size, parent_spec, parent_overlap, on_finish); } SetHeadRequest(ImageCtxT *image_ctx, uint64_t size, const cls::rbd::ParentImageSpec &parent_spec, uint64_t parent_overlap, Context *on_finish); void send(); private: /** * @verbatim * * <start> * | * v (skip if not needed) * SET_SIZE * | * v (skip if not needed) * DETACH_PARENT * | * v (skip if not needed) * ATTACH_PARENT * | * v * <finish> * * @endverbatim */ ImageCtxT *m_image_ctx; uint64_t m_size; cls::rbd::ParentImageSpec m_parent_spec; uint64_t m_parent_overlap; Context *m_on_finish; CephContext *m_cct; void send_set_size(); void handle_set_size(int r); void send_detach_parent(); void handle_detach_parent(int r); void send_attach_parent(); void handle_attach_parent(int r); Context *start_lock_op(int* r); void finish(int r); }; } // namespace deep_copy } // namespace librbd extern template class librbd::deep_copy::SetHeadRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_DEEP_COPY_SET_HEAD_REQUEST_H
2,011
21.863636
77
h
null
ceph-main/src/librbd/deep_copy/SnapshotCopyRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_COPY_REQUEST_H #define CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_COPY_REQUEST_H #include "include/int_types.h" #include "include/rados/librados.hpp" #include "common/RefCountedObj.h" #include "common/snap_types.h" #include "librbd/ImageCtx.h" #include "librbd/Types.h" #include <map> #include <set> #include <string> #include <tuple> class Context; namespace librbd { namespace asio { struct ContextWQ; } namespace deep_copy { template <typename ImageCtxT = librbd::ImageCtx> class SnapshotCopyRequest : public RefCountedObject { public: static SnapshotCopyRequest* create(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx, librados::snap_t src_snap_id_start, librados::snap_t src_snap_id_end, librados::snap_t dst_snap_id_start, bool flatten, asio::ContextWQ *work_queue, SnapSeqs *snap_seqs, Context *on_finish) { return new SnapshotCopyRequest(src_image_ctx, dst_image_ctx, src_snap_id_start, src_snap_id_end, dst_snap_id_start, flatten, work_queue, snap_seqs, on_finish); } SnapshotCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx, librados::snap_t src_snap_id_start, librados::snap_t src_snap_id_end, librados::snap_t dst_snap_id_start, bool flatten, asio::ContextWQ *work_queue, SnapSeqs *snap_seqs, Context *on_finish); void send(); void cancel(); private: /** * @verbatim * * <start> * | * | /-----------\ * | | | * v v | (repeat as needed) * UNPROTECT_SNAP ----/ * | * | /-----------\ * | | | * v v | (repeat as needed) * REMOVE_SNAP -------/ * | * | /-----------\ * | | | * v v | (repeat as needed) * CREATE_SNAP -------/ * | * | /-----------\ * | | | * v v | (repeat as needed) * PROTECT_SNAP ------/ * | * v * SET_HEAD (skip if not needed) * | * v * RESIZE_OBJECT_MAP (skip if not needed) * | * v * <finish> * * @endverbatim */ typedef std::set<librados::snap_t> SnapIdSet; ImageCtxT *m_src_image_ctx; ImageCtxT *m_dst_image_ctx; librados::snap_t m_src_snap_id_start; librados::snap_t m_src_snap_id_end; librados::snap_t m_dst_snap_id_start; bool m_flatten; asio::ContextWQ *m_work_queue; SnapSeqs *m_snap_seqs_result; SnapSeqs m_snap_seqs; Context *m_on_finish; CephContext *m_cct; SnapIdSet m_src_snap_ids; SnapIdSet m_dst_snap_ids; librados::snap_t m_prev_snap_id = CEPH_NOSNAP; std::string m_snap_name; cls::rbd::SnapshotNamespace m_snap_namespace; cls::rbd::ParentImageSpec m_dst_parent_spec; ceph::mutex m_lock; bool m_canceled = false; void send_snap_unprotect(); void handle_snap_unprotect(int r); void send_snap_remove(); void handle_snap_remove(int r); void send_snap_create(); void handle_snap_create(int r); void send_snap_protect(); void handle_snap_protect(int r); void send_set_head(); void handle_set_head(int r); void send_resize_object_map(); void handle_resize_object_map(int r); bool handle_cancellation(); void error(int r); int validate_parent(ImageCtxT *image_ctx, cls::rbd::ParentImageSpec *spec); Context *start_lock_op(int* r); Context *start_lock_op(ceph::shared_mutex &owner_locki, int* r); void finish(int r); }; } // namespace deep_copy } // namespace librbd extern template class librbd::deep_copy::SnapshotCopyRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_COPY_REQUEST_H
4,112
26.059211
79
h
null
ceph-main/src/librbd/deep_copy/SnapshotCreateRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_CREATE_REQUEST_H #define CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_CREATE_REQUEST_H #include "include/int_types.h" #include "include/rados/librados.hpp" #include "common/snap_types.h" #include "librbd/ImageCtx.h" #include "librbd/Types.h" #include "librbd/internal.h" #include <map> #include <set> #include <string> #include <tuple> class Context; namespace librbd { namespace deep_copy { template <typename ImageCtxT = librbd::ImageCtx> class SnapshotCreateRequest { public: static SnapshotCreateRequest* create(ImageCtxT *dst_image_ctx, const std::string &snap_name, const cls::rbd::SnapshotNamespace &snap_namespace, uint64_t size, const cls::rbd::ParentImageSpec &parent_spec, uint64_t parent_overlap, Context *on_finish) { return new SnapshotCreateRequest(dst_image_ctx, snap_name, snap_namespace, size, parent_spec, parent_overlap, on_finish); } SnapshotCreateRequest(ImageCtxT *dst_image_ctx, const std::string &snap_name, const cls::rbd::SnapshotNamespace &snap_namespace, uint64_t size, const cls::rbd::ParentImageSpec &parent_spec, uint64_t parent_overlap, Context *on_finish); void send(); private: /** * @verbatim * * <start> * | * v * SET_HEAD * | * v * CREATE_SNAP * | * v (skip if not needed) * CREATE_OBJECT_MAP * | * v * <finish> * * @endverbatim */ ImageCtxT *m_dst_image_ctx; std::string m_snap_name; cls::rbd::SnapshotNamespace m_snap_namespace; uint64_t m_size; cls::rbd::ParentImageSpec m_parent_spec; uint64_t m_parent_overlap; Context *m_on_finish; CephContext *m_cct; NoOpProgressContext m_prog_ctx; void send_set_head(); void handle_set_head(int r); void send_create_snap(); void handle_create_snap(int r); void send_create_object_map(); void handle_create_object_map(int r); Context *start_lock_op(int* r); void finish(int r); }; } // namespace deep_copy } // namespace librbd extern template class librbd::deep_copy::SnapshotCreateRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_CREATE_REQUEST_H
2,564
24.909091
89
h
null
ceph-main/src/librbd/deep_copy/Types.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_DEEP_COPY_TYPES_H #define CEPH_LIBRBD_DEEP_COPY_TYPES_H #include "include/int_types.h" #include "include/rados/librados.hpp" #include <boost/optional.hpp> namespace librbd { namespace deep_copy { enum { OBJECT_COPY_REQUEST_FLAG_FLATTEN = 1U << 0, OBJECT_COPY_REQUEST_FLAG_MIGRATION = 1U << 1, OBJECT_COPY_REQUEST_FLAG_EXISTS_CLEAN = 1U << 2, }; typedef std::vector<librados::snap_t> SnapIds; typedef std::map<librados::snap_t, SnapIds> SnapMap; typedef boost::optional<uint64_t> ObjectNumber; } // namespace deep_copy } // namespace librbd #endif // CEPH_LIBRBD_DEEP_COPY_TYPES_H
720
23.862069
70
h
null
ceph-main/src/librbd/deep_copy/Utils.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_DEEP_COPY_UTILS_H #define CEPH_LIBRBD_DEEP_COPY_UTILS_H #include "include/common_fwd.h" #include "include/rados/librados.hpp" #include "librbd/Types.h" #include "librbd/deep_copy/Types.h" #include <boost/optional.hpp> namespace librbd { namespace deep_copy { namespace util { void compute_snap_map(CephContext* cct, librados::snap_t src_snap_id_start, librados::snap_t src_snap_id_end, const SnapIds& dst_snap_ids, const SnapSeqs &snap_seqs, SnapMap *snap_map); } // namespace util } // namespace deep_copy } // namespace librbd #endif // CEPH_LIBRBD_DEEP_COPY_UTILS_H
804
25.833333
70
h
null
ceph-main/src/librbd/exclusive_lock/ImageDispatch.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H #define CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H #include "librbd/io/ImageDispatchInterface.h" #include "include/int_types.h" #include "include/buffer.h" #include "common/ceph_mutex.h" #include "common/zipkin_trace.h" #include "librbd/io/ReadResult.h" #include "librbd/io/Types.h" #include <atomic> #include <list> #include <unordered_set> struct Context; namespace librbd { struct ImageCtx; namespace io { struct AioCompletion; } namespace exclusive_lock { template <typename ImageCtxT> class ImageDispatch : public io::ImageDispatchInterface { public: static ImageDispatch* create(ImageCtxT* image_ctx) { return new ImageDispatch(image_ctx); } void destroy() { delete this; } ImageDispatch(ImageCtxT* image_ctx); io::ImageDispatchLayer get_dispatch_layer() const override { return io::IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK; } void set_require_lock(bool init_shutdown, io::Direction direction, Context* on_finish); void unset_require_lock(io::Direction direction); void shut_down(Context* on_finish) override; bool read( io::AioCompletion* aio_comp, io::Extents &&image_extents, io::ReadResult &&read_result, IOContext io_context, int op_flags, int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool discard( io::AioCompletion* aio_comp, io::Extents &&image_extents, uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool write_same( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool compare_and_write( io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool flush( io::AioCompletion* aio_comp, io::FlushSource flush_source, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override; bool list_snaps( io::AioCompletion* aio_comp, io::Extents&& image_extents, io::SnapIds&& snap_ids, int list_snaps_flags, io::SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace, uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags, io::DispatchResult* dispatch_result, Context** on_finish, Context* on_dispatched) override { return false; } bool invalidate_cache(Context* on_finish) override { return false; } private: typedef std::list<Context*> Contexts; typedef std::unordered_set<uint64_t> Tids; ImageCtxT* m_image_ctx; mutable ceph::shared_mutex m_lock; bool m_require_lock_on_read = false; bool m_require_lock_on_write = false; Contexts m_on_dispatches; bool set_require_lock(io::Direction direction, bool enabled); bool is_lock_required(bool read_op) const; bool needs_exclusive_lock(bool read_op, uint64_t tid, io::DispatchResult* dispatch_result, Context* on_dispatched); void handle_acquire_lock(int r); }; } // namespace exclusiv_lock } // namespace librbd extern template class librbd::exclusive_lock::ImageDispatch<librbd::ImageCtx>; #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H
4,518
32.723881
80
h
null
ceph-main/src/librbd/exclusive_lock/Policy.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_POLICY_H #define CEPH_LIBRBD_EXCLUSIVE_LOCK_POLICY_H namespace librbd { namespace exclusive_lock { enum OperationRequestType { OPERATION_REQUEST_TYPE_GENERAL = 0, OPERATION_REQUEST_TYPE_TRASH_SNAP_REMOVE = 1, OPERATION_REQUEST_TYPE_FORCE_PROMOTION = 2, }; struct Policy { virtual ~Policy() { } virtual bool may_auto_request_lock() = 0; virtual int lock_requested(bool force) = 0; virtual bool accept_blocked_request(OperationRequestType) { return false; } }; } // namespace exclusive_lock } // namespace librbd #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_POLICY_H
732
21.90625
70
h
null
ceph-main/src/librbd/exclusive_lock/PostAcquireRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_POST_ACQUIRE_REQUEST_H #define CEPH_LIBRBD_EXCLUSIVE_LOCK_POST_ACQUIRE_REQUEST_H #include "include/int_types.h" #include "include/buffer.h" #include "librbd/ImageCtx.h" #include "msg/msg_types.h" #include <string> class Context; namespace librbd { namespace exclusive_lock { template <typename ImageCtxT = ImageCtx> class PostAcquireRequest { public: static PostAcquireRequest* create(ImageCtxT &image_ctx, Context *on_acquire, Context *on_finish); ~PostAcquireRequest(); void send(); private: /** * @verbatim * * <start> * | * | * v * REFRESH (skip if not * | needed) * v * OPEN_OBJECT_MAP (skip if * | disabled) * v * OPEN_JOURNAL (skip if * | * disabled) * | * * | * * * * * * * * * v * * ALLOCATE_JOURNAL_TAG * * | * * * | * * * v * * * PROCESS_PLUGIN_ACQUIRE* * | * * * | * * * | v v v * | PROCESS_PLUGIN_RELEASE * | | * | v * | CLOSE_JOURNAL * | | * | v * | CLOSE_OBJECT_MAP * | | * v | * <finish> <----------/ * * @endverbatim */ PostAcquireRequest(ImageCtxT &image_ctx, Context *on_acquire, Context *on_finish); ImageCtxT &m_image_ctx; Context *m_on_acquire; Context *m_on_finish; decltype(m_image_ctx.object_map) m_object_map; decltype(m_image_ctx.journal) m_journal; bool m_prepare_lock_completed = false; int m_error_result; void send_refresh(); void handle_refresh(int r); void send_open_journal(); void handle_open_journal(int r); void send_allocate_journal_tag(); void handle_allocate_journal_tag(int r); void send_open_object_map(); void handle_open_object_map(int r); void send_close_journal(); void handle_close_journal(int r); void send_close_object_map(); void handle_close_object_map(int r); void send_process_plugin_acquire_lock(); void handle_process_plugin_acquire_lock(int r); void send_process_plugin_release_lock(); void handle_process_plugin_release_lock(int r); void apply(); void revert(); void finish(); void save_result(int result) { if (m_error_result == 0 && result < 0) { m_error_result = result; } } }; } // namespace exclusive_lock } // namespace librbd extern template class librbd::exclusive_lock::PostAcquireRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_POST_ACQUIRE_REQUEST_H
2,910
22.288
83
h
null
ceph-main/src/librbd/exclusive_lock/PreReleaseRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H #define CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H #include "librbd/ImageCtx.h" #include <string> class AsyncOpTracker; class Context; namespace librbd { struct ImageCtx; namespace exclusive_lock { template <typename> struct ImageDispatch; template <typename ImageCtxT = ImageCtx> class PreReleaseRequest { public: static PreReleaseRequest* create(ImageCtxT &image_ctx, ImageDispatch<ImageCtxT>* image_dispatch, bool shutting_down, AsyncOpTracker &async_op_tracker, Context *on_finish); ~PreReleaseRequest(); void send(); private: /** * @verbatim * * <start> * | * v * CANCEL_OP_REQUESTS * | * v * SET_REQUIRE_LOCK * | * v * WAIT_FOR_OPS * | * v * PREPARE_LOCK * | * v * PROCESS_PLUGIN_RELEASE * | * v * SHUT_DOWN_IMAGE_CACHE * | * v * INVALIDATE_CACHE * | * v * FLUSH_IO * | * v * FLUSH_NOTIFIES . . . . . . . . . . . . . . * | . * v . * CLOSE_JOURNAL . * | (journal disabled, . * v object map enabled) . * CLOSE_OBJECT_MAP < . . . . . . . . . . . . * | . * v (object map disabled) . * <finish> < . . . . . . . . . . . . . . . . . * * @endverbatim */ PreReleaseRequest(ImageCtxT &image_ctx, ImageDispatch<ImageCtxT>* image_dispatch, bool shutting_down, AsyncOpTracker &async_op_tracker, Context *on_finish); ImageCtxT &m_image_ctx; ImageDispatch<ImageCtxT>* m_image_dispatch; bool m_shutting_down; AsyncOpTracker &m_async_op_tracker; Context *m_on_finish; int m_error_result = 0; decltype(m_image_ctx.object_map) m_object_map = nullptr; decltype(m_image_ctx.journal) m_journal = nullptr; void send_cancel_op_requests(); void handle_cancel_op_requests(int r); void send_set_require_lock(); void handle_set_require_lock(int r); void send_wait_for_ops(); void handle_wait_for_ops(int r); void send_prepare_lock(); void handle_prepare_lock(int r); void send_process_plugin_release_lock(); void handle_process_plugin_release_lock(int r); void send_invalidate_cache(); void handle_invalidate_cache(int r); void send_flush_io(); void handle_flush_io(int r); void send_flush_notifies(); void handle_flush_notifies(int r); void send_close_journal(); void handle_close_journal(int r); void send_close_object_map(); void handle_close_object_map(int r); void send_unlock(); void finish(); void save_result(int result) { if (m_error_result == 0 && result < 0) { m_error_result = result; } } }; } // namespace exclusive_lock } // namespace librbd #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H
3,252
22.235714
76
h
null
ceph-main/src/librbd/exclusive_lock/StandardPolicy.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_STANDARD_POLICY_H #define CEPH_LIBRBD_EXCLUSIVE_LOCK_STANDARD_POLICY_H #include "librbd/exclusive_lock/Policy.h" namespace librbd { struct ImageCtx; namespace exclusive_lock { template <typename ImageCtxT = ImageCtx> class StandardPolicy : public Policy { public: StandardPolicy(ImageCtxT* image_ctx) : m_image_ctx(image_ctx) { } bool may_auto_request_lock() override { return false; } int lock_requested(bool force) override; private: ImageCtxT* m_image_ctx; }; } // namespace exclusive_lock } // namespace librbd extern template class librbd::exclusive_lock::StandardPolicy<librbd::ImageCtx>; #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_STANDARD_POLICY_H
812
20.394737
79
h
null
ceph-main/src/librbd/image/AttachChildRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_ATTACH_CHILD_REQUEST_H #define CEPH_LIBRBD_IMAGE_ATTACH_CHILD_REQUEST_H #include "include/common_fwd.h" #include "include/int_types.h" #include "include/rados/librados.hpp" class Context; namespace librbd { class ImageCtx; namespace image { template <typename ImageCtxT = ImageCtx> class AttachChildRequest { public: static AttachChildRequest* create(ImageCtxT *image_ctx, ImageCtxT *parent_image_ctx, const librados::snap_t &parent_snap_id, ImageCtxT *old_parent_image_ctx, const librados::snap_t &old_parent_snap_id, uint32_t clone_format, Context* on_finish) { return new AttachChildRequest(image_ctx, parent_image_ctx, parent_snap_id, old_parent_image_ctx, old_parent_snap_id, clone_format, on_finish); } AttachChildRequest(ImageCtxT *image_ctx, ImageCtxT *parent_image_ctx, const librados::snap_t &parent_snap_id, ImageCtxT *old_parent_image_ctx, const librados::snap_t &old_parent_snap_id, uint32_t clone_format, Context* on_finish); void send(); private: /** * @verbatim * * <start> * (clone v1) | (clone v2) * /----------------/ \---------------\ * | | * v v * V1 ADD CHILD V2 SET CLONE * | | * v v * V1 VALIDATE PROTECTED V2 ATTACH CHILD * | | * | v * V1 REMOVE CHILD FROM OLD PARENT V2 DETACH CHILD FROM OLD PARENT * | | * \----------------\ /---------------/ * | * v * <finish> * * @endverbatim */ ImageCtxT *m_image_ctx; ImageCtxT *m_parent_image_ctx; librados::snap_t m_parent_snap_id; ImageCtxT *m_old_parent_image_ctx; librados::snap_t m_old_parent_snap_id; uint32_t m_clone_format; Context* m_on_finish; CephContext *m_cct; void v1_add_child(); void handle_v1_add_child(int r); void v1_refresh(); void handle_v1_refresh(int r); void v1_remove_child_from_old_parent(); void handle_v1_remove_child_from_old_parent(int r); void v2_set_op_feature(); void handle_v2_set_op_feature(int r); void v2_child_attach(); void handle_v2_child_attach(int r); void v2_child_detach_from_old_parent(); void handle_v2_child_detach_from_old_parent(int r); void finish(int r); }; } // namespace image } // namespace librbd extern template class librbd::image::AttachChildRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_ATTACH_CHILD_REQUEST_H
3,250
29.669811
80
h
null
ceph-main/src/librbd/image/AttachParentRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_ATTACH_PARENT_REQUEST_H #define CEPH_LIBRBD_IMAGE_ATTACH_PARENT_REQUEST_H #include "include/int_types.h" #include "include/buffer.h" #include "include/rados/librados.hpp" #include "librbd/Types.h" class Context; namespace librbd { class ImageCtx; namespace image { template <typename ImageCtxT = ImageCtx> class AttachParentRequest { public: static AttachParentRequest* create(ImageCtxT& image_ctx, const cls::rbd::ParentImageSpec& pspec, uint64_t parent_overlap, bool reattach, Context* on_finish) { return new AttachParentRequest(image_ctx, pspec, parent_overlap, reattach, on_finish); } AttachParentRequest(ImageCtxT& image_ctx, const cls::rbd::ParentImageSpec& pspec, uint64_t parent_overlap, bool reattach, Context* on_finish) : m_image_ctx(image_ctx), m_parent_image_spec(pspec), m_parent_overlap(parent_overlap), m_reattach(reattach), m_on_finish(on_finish) { } void send(); private: /** * @verbatim * * <start> * | * * * * * * * | * * -EOPNOTSUPP * v v * * ATTACH_PARENT * * * * | * v * <finish> * * @endverbatim */ ImageCtxT& m_image_ctx; cls::rbd::ParentImageSpec m_parent_image_spec; uint64_t m_parent_overlap; bool m_reattach; Context* m_on_finish; bool m_legacy_parent = false; void attach_parent(); void handle_attach_parent(int r); void finish(int r); }; } // namespace image } // namespace librbd extern template class librbd::image::AttachParentRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_ATTACH_PARENT_REQUEST_H
1,961
23.525
78
h
null
ceph-main/src/librbd/image/CloneRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_LIBRBD_IMAGE_CLONE_REQUEST_H #define CEPH_LIBRBD_IMAGE_CLONE_REQUEST_H #include "cls/rbd/cls_rbd_types.h" #include "common/config_fwd.h" #include "librbd/internal.h" #include "include/rbd/librbd.hpp" class Context; using librados::IoCtx; namespace librbd { namespace asio { struct ContextWQ; } namespace image { template <typename ImageCtxT = ImageCtx> class CloneRequest { public: static CloneRequest *create( ConfigProxy& config, IoCtx& parent_io_ctx, const std::string& parent_image_id, const std::string& parent_snap_name, const cls::rbd::SnapshotNamespace& parent_snap_namespace, uint64_t parent_snap_id, IoCtx &c_ioctx, const std::string &c_name, const std::string &c_id, ImageOptions c_options, cls::rbd::MirrorImageMode mirror_image_mode, const std::string &non_primary_global_image_id, const std::string &primary_mirror_uuid, asio::ContextWQ *op_work_queue, Context *on_finish) { return new CloneRequest(config, parent_io_ctx, parent_image_id, parent_snap_name, parent_snap_namespace, parent_snap_id, c_ioctx, c_name, c_id, c_options, mirror_image_mode, non_primary_global_image_id, primary_mirror_uuid, op_work_queue, on_finish); } CloneRequest(ConfigProxy& config, IoCtx& parent_io_ctx, const std::string& parent_image_id, const std::string& parent_snap_name, const cls::rbd::SnapshotNamespace& parent_snap_namespace, uint64_t parent_snap_id, IoCtx &c_ioctx, const std::string &c_name, const std::string &c_id, ImageOptions c_options, cls::rbd::MirrorImageMode mirror_image_mode, const std::string &non_primary_global_image_id, const std::string &primary_mirror_uuid, asio::ContextWQ *op_work_queue, Context *on_finish); void send(); private: /** * @verbatim * * <start> * | * v * OPEN PARENT * | * v * VALIDATE CHILD <finish> * | ^ * v | * CREATE CHILD * * * * * * * * * > CLOSE PARENT * | ^ * v | * OPEN CHILD * * * * * * * * * * > REMOVE CHILD * | ^ * v | * ATTACH PARENT * * * * * * * * > CLOSE CHILD * | ^ * v * * ATTACH CHILD * * * * * * * * * * * * * | * * v * * COPY META DATA * * * * * * * * * * ^ * | * * v (skip if not needed) * * GET MIRROR MODE * * * * * * * * * ^ * | * * v (skip if not needed) * * SET MIRROR ENABLED * * * * * * * * * * | * v * CLOSE CHILD * | * v * CLOSE PARENT * | * v * <finish> * * @endverbatim */ ConfigProxy& m_config; IoCtx &m_parent_io_ctx; std::string m_parent_image_id; std::string m_parent_snap_name; cls::rbd::SnapshotNamespace m_parent_snap_namespace; uint64_t m_parent_snap_id; ImageCtxT *m_parent_image_ctx; IoCtx &m_ioctx; std::string m_name; std::string m_id; ImageOptions m_opts; cls::rbd::ParentImageSpec m_pspec; ImageCtxT *m_imctx; cls::rbd::MirrorMode m_mirror_mode = cls::rbd::MIRROR_MODE_DISABLED; cls::rbd::MirrorImageMode m_mirror_image_mode; const std::string m_non_primary_global_image_id; const std::string m_primary_mirror_uuid; NoOpProgressContext m_no_op; asio::ContextWQ *m_op_work_queue; Context *m_on_finish; CephContext *m_cct; uint64_t m_clone_format = 2; bool m_use_p_features; uint64_t m_features; bufferlist m_out_bl; uint64_t m_size; int m_r_saved = 0; void validate_options(); void open_parent(); void handle_open_parent(int r); void validate_parent(); void validate_child(); void handle_validate_child(int r); void create_child(); void handle_create_child(int r); void open_child(); void handle_open_child(int r); void attach_parent(); void handle_attach_parent(int r); void attach_child(); void handle_attach_child(int r); void copy_metadata(); void handle_copy_metadata(int r); void get_mirror_mode(); void handle_get_mirror_mode(int r); void enable_mirror(); void handle_enable_mirror(int r); void close_child(); void handle_close_child(int r); void remove_child(); void handle_remove_child(int r); void close_parent(); void handle_close_parent(int r); void complete(int r); }; } //namespace image } //namespace librbd extern template class librbd::image::CloneRequest<librbd::ImageCtx>; #endif // CEPH_LIBRBD_IMAGE_CLONE_REQUEST_H
5,123
27.153846
77
h